From 76f0e60fffc87ffc15f47db8b6c8840201f5192a Mon Sep 17 00:00:00 2001 From: mxochicale Date: Sun, 25 Feb 2024 16:17:44 +0000 Subject: [PATCH] adds playground/yolo9 #59 * adds readme and demo python script; * updates for pyVEs/aiVE; * .gitignore yolov9-c.pt #98.37M; adds opencv/examples/python/webcam; decreasing resolution improve FPS --- .gitignore | 4 ++ opencv/examples/python/webcam/script.py | 24 ++++++++ playground/yolo/README.md | 12 ++++ playground/yolo/demoyolo9.py | 82 +++++++++++++++++++++++++ pyVEs/aiVE.yml | 31 +++++----- 5 files changed, 139 insertions(+), 14 deletions(-) create mode 100644 opencv/examples/python/webcam/script.py create mode 100644 playground/yolo/README.md create mode 100644 playground/yolo/demoyolo9.py diff --git a/.gitignore b/.gitignore index ca85d0a..6f282fa 100644 --- a/.gitignore +++ b/.gitignore @@ -35,6 +35,10 @@ checkpoints/ **/VGG18_dropout_earlystopping_training_history.pkl **/VGG18_dropout_training_history_lr_scheduler.pkl +## ignores others models +yolov9-c.pt + + ##VTK **/Frog FullHead.mhd diff --git a/opencv/examples/python/webcam/script.py b/opencv/examples/python/webcam/script.py new file mode 100644 index 0000000..86c39d0 --- /dev/null +++ b/opencv/examples/python/webcam/script.py @@ -0,0 +1,24 @@ +# https://docs.opencv.org/4.x/dd/d43/tutorial_py_video_display.html +import numpy as np +import cv2 as cv +cap = cv.VideoCapture(0) +if not cap.isOpened(): + print("Cannot open camera") + exit() +while True: + # Capture frame-by-frame + ret, frame = cap.read() + # if frame is read correctly ret is True + if not ret: + print("Can't receive frame (stream end?). Exiting ...") + break + # Our operations on the frame come here + gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) + # Display the resulting frame + cv.imshow('frame', gray) + if cv.waitKey(1) == ord('q'): + break +# When everything done, release the capture +cap.release() + + diff --git a/playground/yolo/README.md b/playground/yolo/README.md new file mode 100644 index 0000000..c6ff41e --- /dev/null +++ b/playground/yolo/README.md @@ -0,0 +1,12 @@ +# YOLO + +## Yolo9 +``` +mamba activate aiVE +wget https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-c.pt #98.37M 2.76MB/s in 37s +python demoyolo9.py +``` + +## References +https://github.com/kadirnar/yolov9-pip +https://github.com/WongKinYiu/yolov9 diff --git a/playground/yolo/demoyolo9.py b/playground/yolo/demoyolo9.py new file mode 100644 index 0000000..50bb4dd --- /dev/null +++ b/playground/yolo/demoyolo9.py @@ -0,0 +1,82 @@ +import numpy as np +import yolov9 +import cv2 +import time + +def generate_colors(n): + rgb_values = [] + r,g,b = 0,50,100 + step = 256 / n + for _ in range(n): + r += step + g += step + b += step + r = int(r) % 256 + g = int(g) % 256 + b = int(b) % 256 + rgb_values.append((r,g,b)) + return rgb_values + +model = yolov9.load( + "yolov9-c.pt", + device="cpu", +) + +# set model parameters +model.conf = 0.25 # NMS confidence threshold +model.iou = 0.45 # NMS IoU threshold +model.classes = None # (optional list) filter by class +# print(model.names, model.names[1]) # categories names +colors = generate_colors(len(model.names)) + +# set camera capture: +camera_id = 0 +#frameWidth = 640 +#frameHeight = 480 +frameWidth = 160 #640/4 +frameHeight = 120 #480/4 +#frameWidth = 80 #640/8 +#frameHeight = 60 #480/4 + +cap = cv2.VideoCapture(camera_id) +cap.set(3, frameWidth) +cap.set(4, frameHeight) + +while True: + ret, frame = cap.read() + if not ret: + break + + ## perform inference + t = time.time() + #results = model(frame) + results = model(frame, size=frameWidth) + t = time.time() - t + + # parse results + predictions = results.pred[0] + boxes = predictions[:, :4] # x1, y1, x2, y2 + scores = predictions[:, 4] + categories = predictions[:, 5] + bboxes = np.array(boxes, dtype="int") + classes = np.array(categories, dtype="int") + + ## show detection bounding boxes on image + for cls, bbox in zip(classes, bboxes): + (x, y, x2, y2) = bbox + cv2.rectangle(frame, (x, y), (x2, y2), colors[cls], 2) + cv2.putText(frame, model.names[cls], (x, y - 5), + cv2.FONT_HERSHEY_PLAIN, 2, colors[cls], 2) + cv2.putText(frame, "FPS: "+str(1/t), (10, 30), + cv2.FONT_HERSHEY_PLAIN, 1.5, (0,0,255), 2) + + # results.show() + cv2.imshow("Img", frame) + + key = cv2.waitKey(1) + if key == 27: + break + +cap.release() +cv2.destroyAllWindows() + diff --git a/pyVEs/aiVE.yml b/pyVEs/aiVE.yml index 33bc898..674ad74 100644 --- a/pyVEs/aiVE.yml +++ b/pyVEs/aiVE.yml @@ -1,12 +1,11 @@ ## USAGE -## cd $HOME/repositories/code/conda/create-virtual-environments -## conda update --all ## UPDATE ALL base PACKAGES -## conda update -n base -c defaults conda ## UPDATE CONDA -## conda list -n aiVE # show list of installed packages -## conda env create -f aiVE.yml ## INSTALL -## conda env update -f aiVE.yml --prune ## UPDATE -## conda activate aiVE ## ACTIVATE -## conda remove -n aiVE --all ## REMOVE +## LIST MAMBA ENVS: mamba list -n *VE # show list of installed packages +## UPDATE MAMBA: mamba update -n base mamba +## INSTALL MAMBA EV: mamba env create -f *VE.yml +## UPDATE MAMBA ENV: mamba env update -f *VE.yml --prune +## ACTIVATE MAMBA ENV: mamba activate *VE +## REMOVE MAMBA ENV: mamba remove -n *VE --all +## mamba env update --prune -n *VE -f eVE.yml ## QUICK TEST OF THE AVAILABILITY OF CUDA ## conda activate aiVE @@ -23,7 +22,8 @@ channels: #- huggingface # datasets #- anaconda # seaborn; pandas; scikit-learn; jupyter dependencies: - - python=3.10.* + #- python=3.10.* + - python=3.11.* #- pytorch=1.13.* #=1.11.* #- torchvision #=0.12.* #- cudatoolkit #=11.*.* @@ -32,11 +32,14 @@ dependencies: # pip install torch torchvision networkx ##<<<