Skip to content

Commit

Permalink
Add ONNX support for models.py
Browse files Browse the repository at this point in the history
  • Loading branch information
ersheng-ai committed Jun 15, 2020
1 parent 4d298fa commit da35f12
Show file tree
Hide file tree
Showing 5 changed files with 280 additions and 26 deletions.
56 changes: 44 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,16 @@ A minimal PyTorch implementation of YOLOv4.

```
├── README.md
├── dataset.py dataset
├── demo.py demo to run pytorch --> tool/darknet2pytorch
├── darknet2onnx.py tool to convert into onnx --> tool/darknet2pytorch
├── demo_onnx.py demo to run the converted onnx model
├── models.py model for pytorch
├── train.py train models.py
├── cfg.py cfg.py for train
├── cfg cfg --> darknet2pytorch
├── dataset.py dataset
├── demo.py demo to run pytorch --> tool/darknet2pytorch
├── demo_darknet2onnx.py tool to convert into onnx --> tool/darknet2pytorch
├── demo_pytorch2onnx.py tool to convert into onnx
├── models.py model for pytorch
├── train.py train models.py
├── cfg.py cfg.py for train
├── cfg cfg --> darknet2pytorch
├── data
├── weight --> darknet2pytorch
├── weight --> darknet2pytorch
├── tool
│   ├── camera.py a demo camera
│   ├── coco_annotatin.py coco dataset generator
Expand Down Expand Up @@ -93,6 +93,8 @@ python models.py <num_classes> <weightfile> <imgfile> <namefile(optional)>

# 3. Darknet2ONNX (Evolving)

- **This script is to convert the official pretrained darknet model into ONNX**

- **Pytorch version Recommended: 1.4.0**

- **Install onnxruntime**
Expand All @@ -104,15 +106,45 @@ python models.py <num_classes> <weightfile> <imgfile> <namefile(optional)>
- **Run python script to generate onnx model and run the demo**

```sh
python demo_onnx.py <cfgFile> <weightFile> <imageFile> <batchSize>
python demo_darknet2onnx.py <cfgFile> <weightFile> <imageFile> <batchSize>
```

This script will generate 2 onnx models.

- One is for running the demo (batch_size=1)
- The other one is what you want to generate (batch_size=batchSize)

# 4. ONNX2TensorRT (Evolving)
# 4. Pytorch2ONNX (Evolving)

- **You can convert your trained pytorch model into ONNX using this script**

- **Pytorch version Recommended: 1.4.0**

- **Install onnxruntime**

```sh
pip install onnxruntime
```

- **Run python script to generate onnx model and run the demo**

```sh
python demo_pytorch2onnx.py <weight_file> <image_path> <batch_size> <n_classes> <IN_IMAGE_H> <IN_IMAGE_W>
```

For example:

```sh
python demo_pytorch2onnx.py yolov4.pth dog.jpg 8 80 416 416
```

This script will generate 2 onnx models.

- One is for running the demo (batch_size=1)
- The other one is what you want to generate (batch_size=batch_size)


# 5. ONNX2TensorRT (Evolving)

- **TensorRT version Recommended: 7.0, 7.1**

Expand All @@ -132,7 +164,7 @@ python models.py <num_classes> <weightfile> <imgfile> <namefile(optional)>
- Note2: extra NMS operations are needed for the tensorRT output. This demo uses TianXiaomo's NMS code from `tool/utils.py`.
# 5. ONNX2Tensorflow
# 6. ONNX2Tensorflow
- **First:Conversion to ONNX**
Expand Down
92 changes: 92 additions & 0 deletions demo_darknet2onnx.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import sys
import onnx
import os
import argparse
import numpy as np
import cv2
import onnxruntime

from tool.utils import *
from tool.darknet2onnx import *


def main(cfg_file, weight_file, image_path, batch_size):

# Transform to onnx as specified batch size
transform_to_onnx(cfg_file, weight_file, batch_size)
# Transform to onnx for demo
onnx_path_demo = transform_to_onnx(cfg_file, weight_file, 1)

session = onnxruntime.InferenceSession(onnx_path_demo)
# session = onnx.load(onnx_path)
print("The model expects input shape: ", session.get_inputs()[0].shape)

image_src = cv2.imread(image_path)
detect(session, image_src)



def detect(session, image_src):
IN_IMAGE_H = session.get_inputs()[0].shape[2]
IN_IMAGE_W = session.get_inputs()[0].shape[3]

# Input
resized = cv2.resize(image_src, (IN_IMAGE_W, IN_IMAGE_H), interpolation=cv2.INTER_LINEAR)
img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
img_in = np.expand_dims(img_in, axis=0)
img_in /= 255.0
print("Shape of the network input: ", img_in.shape)

# Compute
input_name = session.get_inputs()[0].name
# output, output_exist = session.run(['decoder.output_conv', 'lane_exist.linear2'], {"input.1": image_np})

# print(img_in)

outputs = session.run(None, {input_name: img_in})

'''
print(len(outputs))
print(outputs[0].shape)
print(outputs[1].shape)
print(outputs[2].shape)
print(outputs[3].shape)
print(outputs[4].shape)
print(outputs[5].shape)
'''

outputs = [
[outputs[0],outputs[1]],
[outputs[2],outputs[3]],
[outputs[4],outputs[5]]
]

# print(outputs[2])

num_classes = 80
boxes = post_processing(img_in, 0.5, num_classes, 0.4, outputs)

if num_classes == 20:
namesfile = 'data/voc.names'
elif num_classes == 80:
namesfile = 'data/coco.names'
else:
namesfile = 'data/names'

class_names = load_class_names(namesfile)
plot_boxes_cv2(image_src, boxes, savename='predictions_onnx.jpg', class_names=class_names)



if __name__ == '__main__':
print("Converting to onnx and running demo ...")
if len(sys.argv) == 5:
cfg_file = sys.argv[1]
weight_file = sys.argv[2]
image_path = sys.argv[3]
batch_size = int(sys.argv[4])
main(cfg_file, weight_file, image_path, batch_size)
else:
print('Please run this way:\n')
print(' python demo_onnx.py <cfgFile> <weightFile> <imageFile> <batchSize>')
72 changes: 72 additions & 0 deletions demo_pytorch2onnx.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import sys
import onnx
import os
import argparse
import numpy as np
import cv2
import onnxruntime
import torch

from tool.utils import *
from models import Yolov4
from demo_darknet2onnx import detect


def transform_to_onnx(weight_file, batch_size, n_classes, IN_IMAGE_H, IN_IMAGE_W):

model = Yolov4(n_classes=n_classes, inference=True)

pretrained_dict = torch.load(weight_file, map_location=torch.device('cuda'))
model.load_state_dict(pretrained_dict)

x = torch.randn((batch_size, 3, IN_IMAGE_H, IN_IMAGE_W), requires_grad=True) # .cuda()

onnx_file_name = "yolov4_{}_3_{}_{}.onnx".format(batch_size, IN_IMAGE_H, IN_IMAGE_W)

# Export the model
print('Export the onnx model ...')
torch.onnx.export(model,
x,
onnx_file_name,
export_params=True,
opset_version=11,
do_constant_folding=True,
# input_names=['input'], output_names=['output_1', 'output_2', 'output_3'],
dynamic_axes=None)

print('Onnx model exporting done')
return onnx_file_name



def main(weight_file, image_path, batch_size, n_classes, IN_IMAGE_H, IN_IMAGE_W):

# Transform to onnx as specified batch size
transform_to_onnx(weight_file, batch_size, n_classes, IN_IMAGE_H, IN_IMAGE_W)
# Transform to onnx for demo
onnx_path_demo = transform_to_onnx(weight_file, 1, n_classes, IN_IMAGE_H, IN_IMAGE_W)

session = onnxruntime.InferenceSession(onnx_path_demo)
# session = onnx.load(onnx_path)
print("The model expects input shape: ", session.get_inputs()[0].shape)

image_src = cv2.imread(image_path)
detect(session, image_src)



if __name__ == '__main__':
print("Converting to onnx and running demo ...")
if len(sys.argv) == 7:

weight_file = sys.argv[1]
image_path = sys.argv[2]
batch_size = int(sys.argv[3])
n_classes = int(sys.argv[4])
IN_IMAGE_H = int(sys.argv[5])
IN_IMAGE_W = int(sys.argv[6])

main(weight_file, image_path, batch_size, n_classes, IN_IMAGE_H, IN_IMAGE_W)
else:
print('Please run this way:\n')
print(' python demo_onnx.py <weight_file> <image_path> <batch_size> <n_classes> <IN_IMAGE_H> <IN_IMAGE_W>')
39 changes: 25 additions & 14 deletions models.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,19 @@ class Upsample(nn.Module):
def __init__(self):
super(Upsample, self).__init__()

def forward(self, x, target_size):
def forward(self, x, target_size, inference=False):
assert (x.data.dim() == 4)
_, _, H, W = target_size
return F.interpolate(x, size=(H, W), mode='nearest')
_, _, tH, tW = target_size

if inference:
B = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)

return x.view(B, C, H, 1, W, 1).expand(B, C, H, tH // H, W, tW // W).contiguous().view(B, C, tH, tW)
else:
return F.interpolate(x, size=(tH, tW), mode='nearest')


class Conv_Bn_Activation(nn.Module):
Expand Down Expand Up @@ -224,8 +233,10 @@ def forward(self, input):


class Neck(nn.Module):
def __init__(self):
def __init__(self, inference=False):
super().__init__()
self.inference = inference

self.conv1 = Conv_Bn_Activation(1024, 512, 1, 1, 'leaky')
self.conv2 = Conv_Bn_Activation(512, 1024, 3, 1, 'leaky')
self.conv3 = Conv_Bn_Activation(1024, 512, 1, 1, 'leaky')
Expand Down Expand Up @@ -262,7 +273,7 @@ def __init__(self):
self.conv19 = Conv_Bn_Activation(128, 256, 3, 1, 'leaky')
self.conv20 = Conv_Bn_Activation(256, 128, 1, 1, 'leaky')

def forward(self, input, downsample4, downsample3):
def forward(self, input, downsample4, downsample3, inference=False):
x1 = self.conv1(input)
x2 = self.conv2(x1)
x3 = self.conv3(x2)
Expand All @@ -277,7 +288,7 @@ def forward(self, input, downsample4, downsample3):
x6 = self.conv6(x5)
x7 = self.conv7(x6)
# UP
up = self.upsample1(x7, downsample4.size())
up = self.upsample1(x7, downsample4.size(), self.inference)
# R 85
x8 = self.conv8(downsample4)
# R -1 -3
Expand All @@ -291,7 +302,7 @@ def forward(self, input, downsample4, downsample3):
x14 = self.conv14(x13)

# UP
up = self.upsample2(x14, downsample3.size())
up = self.upsample2(x14, downsample3.size(), self.inference)
# R 54
x15 = self.conv15(downsample3)
# R -1 -3
Expand All @@ -306,9 +317,9 @@ def forward(self, input, downsample4, downsample3):


class Yolov4Head(nn.Module):
def __init__(self, output_ch, yolo_layer_included=False):
def __init__(self, output_ch, inference=False):
super().__init__()
self.yolo_layer_included = yolo_layer_included
self.inference = inference

self.conv1 = Conv_Bn_Activation(128, 256, 3, 1, 'leaky')
self.conv2 = Conv_Bn_Activation(256, output_ch, 1, 1, 'linear', bn=False, bias=True)
Expand Down Expand Up @@ -380,7 +391,7 @@ def forward(self, input1, input2, input3):
x17 = self.conv17(x16)
x18 = self.conv18(x17)

if self.yolo_layer_included:
if self.inference:
y1 = self.yolo1(x2)
y2 = self.yolo2(x10)
y3 = self.yolo3(x18)
Expand All @@ -393,7 +404,7 @@ def forward(self, input1, input2, input3):


class Yolov4(nn.Module):
def __init__(self, yolov4conv137weight=None, n_classes=80, yolo_layer_included=False):
def __init__(self, yolov4conv137weight=None, n_classes=80, inference=False):
super().__init__()

output_ch = (4 + 1 + n_classes) * 3
Expand All @@ -405,7 +416,7 @@ def __init__(self, yolov4conv137weight=None, n_classes=80, yolo_layer_included=F
self.down4 = DownSample4()
self.down5 = DownSample5()
# neck
self.neek = Neck()
self.neek = Neck(inference)
# yolov4conv137
if yolov4conv137weight:
_model = nn.Sequential(self.down1, self.down2, self.down3, self.down4, self.down5, self.neek)
Expand All @@ -419,7 +430,7 @@ def __init__(self, yolov4conv137weight=None, n_classes=80, yolo_layer_included=F
_model.load_state_dict(model_dict)

# head
self.head = Yolov4Head(output_ch, yolo_layer_included)
self.head = Yolov4Head(output_ch, inference)


def forward(self, input):
Expand Down Expand Up @@ -453,7 +464,7 @@ def forward(self, input):
print('Usage: ')
print(' python models.py num_classes weightfile imgfile namefile')

model = Yolov4(n_classes=n_classes, yolo_layer_included=True)
model = Yolov4(n_classes=n_classes, inference=True)

pretrained_dict = torch.load(weightfile, map_location=torch.device('cuda'))
model.load_state_dict(pretrained_dict)
Expand Down
Loading

0 comments on commit da35f12

Please sign in to comment.