Skip to content

Commit

Permalink
Add main files
Browse files Browse the repository at this point in the history
  • Loading branch information
palmpalmpalm authored Jan 30, 2022
1 parent 015694b commit 29d9e38
Show file tree
Hide file tree
Showing 63 changed files with 4,707 additions and 0 deletions.
1 change: 1 addition & 0 deletions models/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

Binary file added models/__pycache__/__init__.cpython-36.pyc
Binary file not shown.
Binary file added models/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file added models/__pycache__/__init__.cpython-38.pyc
Binary file not shown.
Binary file added models/__pycache__/__init__.cpython-39.pyc
Binary file not shown.
Binary file added models/__pycache__/models.cpython-36.pyc
Binary file not shown.
Binary file added models/__pycache__/models.cpython-37.pyc
Binary file not shown.
Binary file added models/__pycache__/models.cpython-38.pyc
Binary file not shown.
Binary file added models/__pycache__/models.cpython-39.pyc
Binary file not shown.
68 changes: 68 additions & 0 deletions models/export.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import argparse

import torch

from utils.google_utils import attempt_download

if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='./yolov4.pt', help='weights path')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size')
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
opt = parser.parse_args()
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt)

# Input
img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection

# Load PyTorch model
attempt_download(opt.weights)
model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float()
model.eval()
model.model[-1].export = True # set Detect() layer export=True
y = model(img) # dry run

# TorchScript export
try:
print('\nStarting TorchScript export with torch %s...' % torch.__version__)
f = opt.weights.replace('.pt', '.torchscript.pt') # filename
ts = torch.jit.trace(model, img)
ts.save(f)
print('TorchScript export success, saved as %s' % f)
except Exception as e:
print('TorchScript export failure: %s' % e)

# ONNX export
try:
import onnx

print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
f = opt.weights.replace('.pt', '.onnx') # filename
model.fuse() # only for ONNX
torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
output_names=['classes', 'boxes'] if y is None else ['output'])

# Checks
onnx_model = onnx.load(f) # load onnx model
onnx.checker.check_model(onnx_model) # check onnx model
print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
print('ONNX export success, saved as %s' % f)
except Exception as e:
print('ONNX export failure: %s' % e)

# CoreML export
try:
import coremltools as ct

print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
# convert model from torchscript and apply pixel scaling as per detect.py
model = ct.convert(ts, inputs=[ct.ImageType(name='images', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
f = opt.weights.replace('.pt', '.mlmodel') # filename
model.save(f)
print('CoreML export success, saved as %s' % f)
except Exception as e:
print('CoreML export failure: %s' % e)

# Finish
print('\nExport complete. Visualize with https://github.com/lutzroeder/netron.')
761 changes: 761 additions & 0 deletions models/models.py

Large diffs are not rendered by default.

220 changes: 220 additions & 0 deletions object_detection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@

import time
import cv2
import torch
# import torch.backends.cudnn as cudnn
from numpy import random
import numpy as np

from utils.datasets import letterbox
from utils.general import non_max_suppression, scale_coords, xyxy2xywh
from utils.plots import plot_one_box
from utils.torch_utils import select_device, time_synchronized

from models.models import *


# path
CONFIG_PATH = 'config/'
WEIGHTS_PATH = CONFIG_PATH + 'yolor_p6.pt'
NAMES_PATH = CONFIG_PATH + 'coco.names'
DEVICE = "cpu"
CFG_PATH = CONFIG_PATH + 'yolor_p6.cfg'
IMAGE_SIZE = 1280

class ObjectDetection:

def __init__(self):
self.device = select_device(DEVICE)
# half precision only supported on CUDA
self.half = self.device.type != 'cpu'

# load model
# .cuda() #if you want cuda remove the comment
self.model = Darknet(CFG_PATH, IMAGE_SIZE)
self.model.load_state_dict(torch.load(WEIGHTS_PATH, map_location=self.device)['model'])
self.model.to(DEVICE).eval()

if self.half:
self.model.half()

# Get names and colors
self.names = self.load_classes(NAMES_PATH)
self.color = [255, 0, 0]

def load_classes(self, path):
# Loads *.names file at 'path'
with open(path, 'r') as f:
names = f.read().split('\n')
# filter removes empty strings (such as last line)
return list(filter(None, names))

def detect(self, input_image):

# Run inference
t0 = time.time()
img = torch.zeros((1, 3, IMAGE_SIZE, IMAGE_SIZE), device=self.device) # init img
# run once
_ = self.model(img.half() if self.half else img) if self.device.type != 'cpu' else None

# Padded resize
img = letterbox(input_image, new_shape=IMAGE_SIZE, auto_size=64)[0]

# Convert
# BGR to RGB, to 3x416x416
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)

print("recieving image with shape {}".format(img.shape))

img = torch.from_numpy(img).to(DEVICE)
# uint8 to fp16/32
img = img.half() if self.half else img.float()
# 0 - 255 to 0.0 - 1.0
img /= 255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)

# Inference
print("Inferencing ...")
pred = self.model(img)[0]

# Apply NMS
pred = non_max_suppression(
pred, conf_thres=0.4, iou_thres=0.5, classes=None, agnostic=False)

print("found {} object".format(len(pred)))

# print string
s = ""
s += '%gx%g ' % img.shape[2:]

# Process detections
for i, det in enumerate(pred):
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(
img.shape[2:], det[:, :4], input_image.shape).round()

# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, self.names[int(c)]) # add to string

# Write results
for *xyxy, conf, cls in det:

# python yolor_example.py
label = '%s %.2f' % (self.names[int(cls)], conf)
plot_one_box(xyxy, input_image, label=label,
color=self.color, line_thickness=3)

# Print time (inference + NMS)q
print('{}Done. {:.3} s'.format(s, time.time() - t0))

return input_image

def get_bbox(self, input_image):

# object bbox list
bbox_list = []

# Run inference
t0 = time.time()
img = torch.zeros((1, 3, IMAGE_SIZE, IMAGE_SIZE), device=self.device) # init img
# run once
_ = self.model(img.half() if self.half else img) if self.device.type != 'cpu' else None

# Padded resize
img = letterbox(input_image, new_shape=IMAGE_SIZE, auto_size=64)[0]

# Convert
# BGR to RGB, to 3x416x416
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)

print("recieving image with shape {}".format(img.shape))

img = torch.from_numpy(img).to(DEVICE)
# uint8 to fp16/32
img = img.half() if self.half else img.float()
# 0 - 255 to 0.0 - 1.0
img /= 255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)

# Inference
print("Inferencing ...")
pred = self.model(img)[0]

# Apply NMS
pred = non_max_suppression(
pred, conf_thres=0.4, iou_thres=0.5, classes=None, agnostic=False)

print("found {} object".format(len(pred)))

# print string
s = ""
s += '%gx%g ' % img.shape[2:]

# Process detections
for i, det in enumerate(pred):
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(
img.shape[2:], det[:, :4], input_image.shape).round()

# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, self.names[int(c)]) # add to string

# Write results
for *xyxy, conf, cls in det:
temp = []
for ts in xyxy:
temp.append(ts.item())
bbox = list(np.array(temp).astype(int))
bbox.append(self.names[int(cls)])
bbox_list.append(bbox)

# Print time (inference + NMS)q
print('{}Done. {:.3} s'.format(s, time.time() - t0))

return bbox_list

# format bbox list for mediapipe
def format_bbox(self, bbox_list):
format_bboxs = []
for bbox in bbox_list:
format_bboxs.append([bbox[4], tuple([bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]), False])
return format_bboxs

def main():
# create model
OD = ObjectDetection()

# load our input image and grab its spatial dimensions
img = cv2.imread("./test1.jpg")
cv2.imshow('test1', img)

# preprocess image
npimg = np.array(img)
image = npimg.copy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

with torch.no_grad():
# get detected image
res = OD.detect(image)

# get bboxs of object in images
bboxs = OD.get_bbox(image)

# show output
image = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
cv2.imshow('yolor_test1', image)
cv2.waitKey(0)


if __name__ == '__main__':
main()
28 changes: 28 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# pip install -qr requirements.txt

# base ----------------------------------------
Cython
matplotlib>=3.2.2
numpy>=1.18.5
opencv-python>=4.1.2
Pillow
PyYAML>=5.3.1
scipy>=1.4.1
tensorboard>=1.5
tqdm>=4.41.0

# logging -------------------------------------
# wandb

# plotting ------------------------------------
seaborn>=0.11.0
pandas

# export --------------------------------------
# coremltools>=4.1
# onnx>=1.8.1
# scikit-learn==0.19.2 # for coreml quantization

# extras --------------------------------------
thop # FLOPS computation
pycocotools==2.0.0 # COCO mAP
Binary file added test1.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions utils/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

Binary file added utils/__pycache__/__init__.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file added utils/__pycache__/__init__.cpython-38.pyc
Binary file not shown.
Binary file added utils/__pycache__/__init__.cpython-39.pyc
Binary file not shown.
Binary file added utils/__pycache__/datasets.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/datasets.cpython-37.pyc
Binary file not shown.
Binary file added utils/__pycache__/datasets.cpython-38.pyc
Binary file not shown.
Binary file added utils/__pycache__/datasets.cpython-39.pyc
Binary file not shown.
Binary file added utils/__pycache__/general.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/general.cpython-37.pyc
Binary file not shown.
Binary file added utils/__pycache__/general.cpython-38.pyc
Binary file not shown.
Binary file added utils/__pycache__/general.cpython-39.pyc
Binary file not shown.
Binary file added utils/__pycache__/google_utils.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/google_utils.cpython-37.pyc
Binary file not shown.
Binary file added utils/__pycache__/google_utils.cpython-38.pyc
Binary file not shown.
Binary file added utils/__pycache__/google_utils.cpython-39.pyc
Binary file not shown.
Binary file added utils/__pycache__/layers.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/layers.cpython-37.pyc
Binary file not shown.
Binary file added utils/__pycache__/layers.cpython-38.pyc
Binary file not shown.
Binary file added utils/__pycache__/layers.cpython-39.pyc
Binary file not shown.
Binary file added utils/__pycache__/loss.cpython-38.pyc
Binary file not shown.
Binary file added utils/__pycache__/metrics.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/metrics.cpython-37.pyc
Binary file not shown.
Binary file added utils/__pycache__/metrics.cpython-38.pyc
Binary file not shown.
Binary file added utils/__pycache__/metrics.cpython-39.pyc
Binary file not shown.
Binary file added utils/__pycache__/parse_config.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/parse_config.cpython-37.pyc
Binary file not shown.
Binary file added utils/__pycache__/parse_config.cpython-38.pyc
Binary file not shown.
Binary file added utils/__pycache__/parse_config.cpython-39.pyc
Binary file not shown.
Binary file added utils/__pycache__/plots.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/plots.cpython-37.pyc
Binary file not shown.
Binary file added utils/__pycache__/plots.cpython-38.pyc
Binary file not shown.
Binary file added utils/__pycache__/plots.cpython-39.pyc
Binary file not shown.
Binary file added utils/__pycache__/torch_utils.cpython-36.pyc
Binary file not shown.
Binary file added utils/__pycache__/torch_utils.cpython-37.pyc
Binary file not shown.
Binary file added utils/__pycache__/torch_utils.cpython-38.pyc
Binary file not shown.
Binary file added utils/__pycache__/torch_utils.cpython-39.pyc
Binary file not shown.
72 changes: 72 additions & 0 deletions utils/activations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# Activation functions

import torch
import torch.nn as nn
import torch.nn.functional as F


# Swish https://arxiv.org/pdf/1905.02244.pdf ---------------------------------------------------------------------------
class Swish(nn.Module): #
@staticmethod
def forward(x):
return x * torch.sigmoid(x)


class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
@staticmethod
def forward(x):
# return x * F.hardsigmoid(x) # for torchscript and CoreML
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX


class MemoryEfficientSwish(nn.Module):
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x * torch.sigmoid(x)

@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
return grad_output * (sx * (1 + x * (1 - sx)))

def forward(self, x):
return self.F.apply(x)


# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
class Mish(nn.Module):
@staticmethod
def forward(x):
return x * F.softplus(x).tanh()


class MemoryEfficientMish(nn.Module):
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))

@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
fx = F.softplus(x).tanh()
return grad_output * (fx + x * sx * (1 - fx * fx))

def forward(self, x):
return self.F.apply(x)


# FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
class FReLU(nn.Module):
def __init__(self, c1, k=3): # ch_in, kernel
super().__init__()
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1)
self.bn = nn.BatchNorm2d(c1)

def forward(self, x):
return torch.max(x, self.bn(self.conv(x)))
Loading

0 comments on commit 29d9e38

Please sign in to comment.