Skip to content

Commit

Permalink
cleanup detector code
Browse files Browse the repository at this point in the history
  • Loading branch information
denniswittich committed Oct 24, 2023
1 parent 863abe4 commit 2085834
Show file tree
Hide file tree
Showing 5 changed files with 108 additions and 96 deletions.
28 changes: 28 additions & 0 deletions detector/.vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
{
"files.watcherExclude": {
"**/.git/objects/**": true,
"**/.git/subtree-cache/**": true
},
"editor.defaultFormatter": "esbenp.prettier-vscode",
"python.analysis.typeCheckingMode": "basic",
"editor.formatOnSave": true,
"editor.minimap.enabled": false,
"autopep8.args": ["--max-line-length=120", "--experimental"],
"pylint.args": [
"--disable=C0103", // Invalid name (e.g., variable/function/class naming conventions)
"--disable=C0111", // Missing docstring (in function/class/method)
"--disable=C0114", // Missing module docstring
"--disable=C0301", // Line too long (exceeds character limit)
"--disable=W0718", // Catching too general exception
"--disable=W0719", // Rraising too general exception
"--disable=W1203", // Use % formatting in logging functions and pass the % parameters as arguments
"--disable=W1514", // Using open without explicitly specifying an encoding
"--generated-members=numpy.* ,torch.*,cv2.*" // Required because pylint doesn't recognize numpy and torch methods
],
"[python]": {
"editor.defaultFormatter": "ms-python.autopep8",
"editor.codeActionsOnSave": {
"source.organizeImports": true
}
}
}
42 changes: 8 additions & 34 deletions detector/detector.code-workspace
Original file line number Diff line number Diff line change
Expand Up @@ -12,43 +12,17 @@
"path": "../../learning_loop_node"
}
],
"settings": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "esbenp.prettier-vscode",
"python.formatting.provider": "autopep8",
"[python]": {
"editor.defaultFormatter": "ms-python.python"
},
"python.formatting.autopep8Args": ["--max-line-length", "120"],
"python.testing.pytestArgs": ["."],
"python.testing.unittestEnabled": false,
"python.testing.nosetestsEnabled": false,
"python.testing.pytestEnabled": false,
"python.pythonPath": "/opt/conda/bin/python3",
"[json]": {
"editor.defaultFormatter": "vscode.json-language-features"
}
},
"extensions": {
"recommendations": [
"ms-python.vscode-pylance",
"ms-python.python",
"himanoa.python-autopep8",
"esbenp.prettier-vscode",
"littlefoxteam.vscode-python-test-adapter"
]
},
"launch": {
"version": "0.2.0",
"configurations": [
{
"name": "Uvicorn/FastAPI",
"type": "python",
"request": "attach",
"connect": {
"port": 5678
}
}
"littlefoxteam.vscode-python-test-adapter",
"mhutchie.git-graph",
"ms-python.autopep8",
"ms-python.isort",
"ms-python.mypy-type-checker",
"ms-python.pylint",
"ms-python.python",
"ms-python.vscode-pylance"
]
}
}
74 changes: 39 additions & 35 deletions detector/yolov5.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,16 @@
Original from https://github.com/wang-xinyu/tensorrtx/blob/7b79de466c7ac2fcf179e65c2fa4718107f236f9/yolov5/yolov5_det_trt.py
MIT License
"""

import os
import threading
import time
from typing import Dict, List
from collections import namedtuple

import cv2
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
from collections import namedtuple

CONF_THRESH = 0.2
IOU_THRESHOLD = 0.4
Expand Down Expand Up @@ -57,7 +57,7 @@ def __init__(self, engine_file_path: str):
bindings = []

for binding in engine:
print('bingding:', binding, engine.get_binding_shape(binding))
print('binding:', binding, engine.get_binding_shape(binding))
size = trt.volume(engine.get_binding_shape(binding))
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
Expand Down Expand Up @@ -100,14 +100,16 @@ def infer(self, image_raw):
cuda_outputs = self.cuda_outputs
bindings = self.bindings
# Do image preprocess
input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_raw)
input_image, image_raw, origin_h, origin_w = self.preprocess_image(
image_raw)
# Copy input image to host buffer
np.copyto(host_inputs[0], input_image.ravel())
start = time.time()
# Transfer input data to the GPU.
cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)
# Run inference.
context.execute_async(batch_size=self.batch_size, bindings=bindings, stream_handle=stream.handle)
context.execute_async(batch_size=self.batch_size,
bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)
# Synchronize the stream
Expand All @@ -121,21 +123,18 @@ def infer(self, image_raw):
detections = []
Detection = namedtuple('Detection', 'x y w h category probability')
result_boxes, result_scores, result_classid = self.post_process(output[0:LEN_ALL_RESULT], origin_h, origin_w)
for j in range(len(result_boxes)):
x, y, br_x, br_y = result_boxes[j]
for j, box in enumerate(result_boxes):
x, y, br_x, br_y = box
w = br_x - x
h = br_y - y
detections.append(Detection(
int(x), int(y), int(w), int(h),
int(result_classid[j]), round(float(result_scores[j]), 2))
)

detections.append(Detection(int(x), int(y), int(w), int(h),
int(result_classid[j]), round(float(result_scores[j]), 2)))
return detections, end - start

def destroy(self):
# Remove any context from the top of the context stack, deactivating it.
self.ctx.pop()

def get_raw_image_zeros(self, image_path_batch=None):
"""
description: Ready data for warmup
Expand All @@ -156,7 +155,7 @@ def preprocess_image(self, raw_bgr_image):
w: original width
"""
image_raw = raw_bgr_image
h, w, c = image_raw.shape
h, w, _ = image_raw.shape
image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)
# Calculate widht and height and paddings
r_w = self.input_w / w
Expand All @@ -173,21 +172,19 @@ def preprocess_image(self, raw_bgr_image):
tx1 = int((self.input_w - tw) / 2)
tx2 = self.input_w - tw - tx1
ty1 = ty2 = 0

# Resize the image with long side while maintaining ratio
image = cv2.resize(image, (tw, th))
# Pad the short side with (128,128,128)
image = cv2.copyMakeBorder(
image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, None, (128, 128, 128)
)
image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, None, (128, 128, 128))
image = image.astype(np.float32)
# Normalize to [0,1]
image /= 255.0
# HWC to CHW format:
image = np.transpose(image, [2, 0, 1])
# CHW to NCHW format
image = np.expand_dims(image, axis=0)
image /= 255.0 # Normalize to [0,1]
image = np.transpose(image, [2, 0, 1]) # HWC to CHW format:
image = np.expand_dims(image, axis=0) # CHW to NCHW format
# Convert the image to row-major order, also known as "C order":
image = np.ascontiguousarray(image)

return image, image_raw, h, w

def xywh2xyxy(self, origin_h, origin_w, x):
Expand All @@ -206,12 +203,16 @@ def xywh2xyxy(self, origin_h, origin_w, x):
if r_h > r_w:
y[:, 0] = x[:, 0] - x[:, 2] / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2 - \
(self.input_h - r_w * origin_h) / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2 - \
(self.input_h - r_w * origin_h) / 2
y /= r_w
else:
y[:, 0] = x[:, 0] - x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2
y[:, 0] = x[:, 0] - x[:, 2] / 2 - \
(self.input_w - r_h * origin_w) / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2 - \
(self.input_w - r_h * origin_w) / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2
y /= r_h
Expand All @@ -230,13 +231,13 @@ def post_process(self, output, origin_h, origin_w):
result_scores: finally scores, a numpy, each element is the score correspoing to box
result_classid: finally classid, a numpy, each element is the classid correspoing to box
"""
# Get the num of boxes detected
num = int(output[0])
# Reshape to a two dimentional ndarray
pred = np.reshape(output[1:], (-1, LEN_ONE_RESULT))[:num, :]

num = int(output[0]) # Get the num of boxes detected
pred = np.reshape(output[1:], (-1, LEN_ONE_RESULT))[:num, :] # Reshape to a 2D ndarray
pred = pred[:, :6]
# Do nms
boxes = self.non_max_suppression(pred, origin_h, origin_w, conf_thres=CONF_THRESH, nms_thres=IOU_THRESHOLD)
boxes = self.non_max_suppression(
pred, origin_h, origin_w, conf_thres=CONF_THRESH, nms_thres=IOU_THRESHOLD)
result_boxes = boxes[:, :4] if len(boxes) else np.array([])
result_scores = boxes[:, 4] if len(boxes) else np.array([])
result_classid = boxes[:, 5] if len(boxes) else np.array([])
Expand Down Expand Up @@ -270,7 +271,7 @@ def bbox_iou(self, box1, box2, x1y1x2y2=True):
inter_rect_y2 = np.minimum(b1_y2, b2_y2)
# Intersection area
inter_area = np.clip(inter_rect_x2 - inter_rect_x1 + 1, 0, None) * \
np.clip(inter_rect_y2 - inter_rect_y1 + 1, 0, None)
np.clip(inter_rect_y2 - inter_rect_y1 + 1, 0, None)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
Expand Down Expand Up @@ -308,7 +309,8 @@ def non_max_suppression(self, prediction, origin_h, origin_w, conf_thres=0.5, nm
# Perform non-maximum suppression
keep_boxes = []
while boxes.shape[0]:
large_overlap = self.bbox_iou(np.expand_dims(boxes[0, :4], 0), boxes[:, :4]) > nms_thres
large_overlap = self.bbox_iou(np.expand_dims(
boxes[0, :4], 0), boxes[:, :4]) > nms_thres
label_match = boxes[0, -1] == boxes[:, -1]
# Indices of boxes with lower confidence scores, large IOUs and matching labels
invalid = large_overlap & label_match
Expand All @@ -317,11 +319,13 @@ def non_max_suppression(self, prediction, origin_h, origin_w, conf_thres=0.5, nm
boxes = np.stack(keep_boxes, 0) if len(keep_boxes) else np.array([])
return boxes


class warmUpThread(threading.Thread):
def __init__(self, yolov5_wrapper):
threading.Thread.__init__(self)
self.yolov5_wrapper = yolov5_wrapper

def run(self):
_, use_time = self.yolov5_wrapper.infer(self.yolov5_wrapper.get_raw_image_zeros())
_, use_time = self.yolov5_wrapper.infer(
self.yolov5_wrapper.get_raw_image_zeros())
print('warm_up time->{:.2f}ms'.format(use_time * 1000))
56 changes: 29 additions & 27 deletions detector/yolov5_detector.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,41 @@
from typing import Any, List
from learning_loop_node import ModelInformation, Detector
from learning_loop_node.detector import Detections, BoxDetection, PointDetection
from learning_loop_node.data_classes import Category, CategoryType
import ctypes
import logging
import os
import subprocess
import re
import yolov5
import ctypes
import subprocess
import time
from typing import List, Optional

import cv2
import numpy as np
import time
from learning_loop_node.data_classes import (BoxDetection, CategoryType,
Detections, PointDetection)
from learning_loop_node.detector.detector_logic import DetectorLogic

import yolov5


class Yolov5Detector(Detector):
class Yolov5Detector(DetectorLogic):

def __init__(self) -> None:
super().__init__('yolov5_wts')
self.yolov5: Optional[yolov5.YoLov5TRT] = None

def init(self, model_info: ModelInformation):
self.model_info = model_info
engine_file = self._create_engine(
model_info.resolution,
len(model_info.categories),
f'{model_info.model_root_path}/model.wts'
)
def init(self) -> None:
resolution = self.model_info.resolution
assert resolution is not None
engine_file = self._create_engine(resolution,
len(self.model_info.categories),
f'{self.model_info.model_root_path}/model.wts')
ctypes.CDLL('/tensorrtx/yolov5/build/libmyplugins.so')
self.yolov5 = yolov5.YoLov5TRT(engine_file)
for i in range(3):
for _ in range(3):
warmup = yolov5.warmUpThread(self.yolov5)
warmup.start()
warmup.join()

def evaluate(self, image: List[np.uint8]) -> Detections:
assert self.yolov5 is not None, 'init() must be called first'
detections = Detections()
try:
t = time.time()
Expand All @@ -45,21 +48,17 @@ def evaluate(self, image: List[np.uint8]) -> Detections:
if w <= 2 or h <= 2: # skip very small boxes.
skipped_detections.append((category.name, detection))
continue

if category.type == CategoryType.Box:
detections.box_detections.append(BoxDetection(
category.name, x, y, w, h, self.model_info.version, probability
))
category.name, x, y, w, h, self.model_info.version, probability))
elif category.type == CategoryType.Point:
cx, cy = (np.average([x, x + w]), np.average([y, y + h]))
detections.point_detections.append(PointDetection(
category.name, int(cx), int(cy), self.model_info.version, probability
))
category.name, int(cx), int(cy), self.model_info.version, probability))
if skipped_detections:
log_msg = '\n'.join([str(d) for d in skipped_detections])
logging.warning(
f'Removed very small detections from inference result (count={len(skipped_detections)}): \n{log_msg}')
except Exception as e:
logging.warning(f'Removed {len(skipped_detections)} small detections from result: \n{log_msg}')
except Exception:
logging.exception('inference failed')
return detections

Expand All @@ -71,6 +70,7 @@ def _create_engine(self, resolution: int, cat_count: int, wts_file: str) -> str:

# NOTE cmake and inital building is done in Dockerfile (to speeds things up)
os.chdir('/tensorrtx/yolov5/build')

# Adapt resolution
with open('../src/config.h', 'r+') as f:
content = f.read()
Expand All @@ -79,8 +79,10 @@ def _create_engine(self, resolution: int, cat_count: int, wts_file: str) -> str:
f.seek(0)
f.truncate()
f.write(content)
subprocess.run('make -j6 -Wno-deprecated-declarations', shell=True)

subprocess.run('make -j6 -Wno-deprecated-declarations', shell=True, check=True)
logging.warning('currently we assume a Yolov5 s6 model;\
parameterization of the variant (s, s6, m, m6, ...) still needs to be done')
subprocess.run(f'./yolov5_det -s {wts_file} {engine_file} s6', shell=True) # TODO parameterize variant "s6"
# TODO parameterize variant "s6"
subprocess.run(f'./yolov5_det -s {wts_file} {engine_file} s6', shell=True, check=True)
return engine_file
4 changes: 4 additions & 0 deletions trainer/trainer.code-workspace
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@
"name": "yoloyv5_node",
"path": "../"
}
{
"name": "detector",
"path": "../detector"
}
],
"settings": {
"workbench.colorCustomizations": {
Expand Down

0 comments on commit 2085834

Please sign in to comment.