Skip to content

Commit

Permalink
adds playground/yolo9 #59
Browse files Browse the repository at this point in the history
* adds readme and demo python script; * updates for pyVEs/aiVE; * .gitignore yolov9-c.pt #98.37M; adds opencv/examples/python/webcam; decreasing resolution improve FPS
  • Loading branch information
mxochicale committed Feb 25, 2024
1 parent afe9191 commit 76f0e60
Show file tree
Hide file tree
Showing 5 changed files with 139 additions and 14 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ checkpoints/
**/VGG18_dropout_earlystopping_training_history.pkl
**/VGG18_dropout_training_history_lr_scheduler.pkl

## ignores others models
yolov9-c.pt


##VTK
**/Frog
FullHead.mhd
Expand Down
24 changes: 24 additions & 0 deletions opencv/examples/python/webcam/script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# https://docs.opencv.org/4.x/dd/d43/tutorial_py_video_display.html
import numpy as np
import cv2 as cv
cap = cv.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# Our operations on the frame come here
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Display the resulting frame
cv.imshow('frame', gray)
if cv.waitKey(1) == ord('q'):
break
# When everything done, release the capture
cap.release()


12 changes: 12 additions & 0 deletions playground/yolo/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# YOLO

## Yolo9
```
mamba activate aiVE
wget https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-c.pt #98.37M 2.76MB/s in 37s
python demoyolo9.py
```

## References
https://github.com/kadirnar/yolov9-pip
https://github.com/WongKinYiu/yolov9
82 changes: 82 additions & 0 deletions playground/yolo/demoyolo9.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
import numpy as np
import yolov9
import cv2
import time

def generate_colors(n):
rgb_values = []
r,g,b = 0,50,100
step = 256 / n
for _ in range(n):
r += step
g += step
b += step
r = int(r) % 256
g = int(g) % 256
b = int(b) % 256
rgb_values.append((r,g,b))
return rgb_values

model = yolov9.load(
"yolov9-c.pt",
device="cpu",
)

# set model parameters
model.conf = 0.25 # NMS confidence threshold
model.iou = 0.45 # NMS IoU threshold
model.classes = None # (optional list) filter by class
# print(model.names, model.names[1]) # categories names
colors = generate_colors(len(model.names))

# set camera capture:
camera_id = 0
#frameWidth = 640
#frameHeight = 480
frameWidth = 160 #640/4
frameHeight = 120 #480/4
#frameWidth = 80 #640/8
#frameHeight = 60 #480/4

cap = cv2.VideoCapture(camera_id)
cap.set(3, frameWidth)
cap.set(4, frameHeight)

while True:
ret, frame = cap.read()
if not ret:
break

## perform inference
t = time.time()
#results = model(frame)
results = model(frame, size=frameWidth)
t = time.time() - t

# parse results
predictions = results.pred[0]
boxes = predictions[:, :4] # x1, y1, x2, y2
scores = predictions[:, 4]
categories = predictions[:, 5]
bboxes = np.array(boxes, dtype="int")
classes = np.array(categories, dtype="int")

## show detection bounding boxes on image
for cls, bbox in zip(classes, bboxes):
(x, y, x2, y2) = bbox
cv2.rectangle(frame, (x, y), (x2, y2), colors[cls], 2)
cv2.putText(frame, model.names[cls], (x, y - 5),
cv2.FONT_HERSHEY_PLAIN, 2, colors[cls], 2)
cv2.putText(frame, "FPS: "+str(1/t), (10, 30),
cv2.FONT_HERSHEY_PLAIN, 1.5, (0,0,255), 2)

# results.show()
cv2.imshow("Img", frame)

key = cv2.waitKey(1)
if key == 27:
break

cap.release()
cv2.destroyAllWindows()

31 changes: 17 additions & 14 deletions pyVEs/aiVE.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
## USAGE
## cd $HOME/repositories/code/conda/create-virtual-environments
## conda update --all ## UPDATE ALL base PACKAGES
## conda update -n base -c defaults conda ## UPDATE CONDA
## conda list -n aiVE # show list of installed packages
## conda env create -f aiVE.yml ## INSTALL
## conda env update -f aiVE.yml --prune ## UPDATE
## conda activate aiVE ## ACTIVATE
## conda remove -n aiVE --all ## REMOVE
## LIST MAMBA ENVS: mamba list -n *VE # show list of installed packages
## UPDATE MAMBA: mamba update -n base mamba
## INSTALL MAMBA EV: mamba env create -f *VE.yml
## UPDATE MAMBA ENV: mamba env update -f *VE.yml --prune
## ACTIVATE MAMBA ENV: mamba activate *VE
## REMOVE MAMBA ENV: mamba remove -n *VE --all
## mamba env update --prune -n *VE -f eVE.yml

## QUICK TEST OF THE AVAILABILITY OF CUDA
## conda activate aiVE
Expand All @@ -23,7 +22,8 @@ channels:
#- huggingface # datasets
#- anaconda # seaborn; pandas; scikit-learn; jupyter
dependencies:
- python=3.10.*
#- python=3.10.*
- python=3.11.*
#- pytorch=1.13.* #=1.11.*
#- torchvision #=0.12.*
#- cudatoolkit #=11.*.*
Expand All @@ -32,11 +32,14 @@ dependencies:
# pip install torch torchvision networkx ##<<<<FOR TORCH 1.13 MAKE USE OF THE TERMINAL
- matplotlib
- numpy
- notebook
- jupyter
- jupyter_contrib_nbextensions
- pillow
- av
- yolov9pip
### VERSIONS of opencv-contrib-python-headless https://pypi.org/project/opencv-contrib-python-headless/#history
#- opencv-contrib-python-headless
- opencv-python
#- notebook
#- jupyter
#- pillow
#- av
#- opencv-python
#- einops #https://github.com/arogozhnikov/einops
#- seaborn
Expand Down

0 comments on commit 76f0e60

Please sign in to comment.