Skip to content

Commit

Permalink
Fix ONNX inference code (ultralytics#1928)
Browse files Browse the repository at this point in the history
  • Loading branch information
SahilChachra authored Apr 11, 2022
1 parent c2c113e commit ae37b2d
Showing 1 changed file with 4 additions and 2 deletions.
6 changes: 4 additions & 2 deletions models/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,9 +314,11 @@ def __init__(self, weights='yolov3.pt', device=None, dnn=True):
net = cv2.dnn.readNetFromONNX(w)
elif onnx: # ONNX Runtime
LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime'))
cuda = torch.cuda.is_available()
check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
import onnxruntime
session = onnxruntime.InferenceSession(w, None)
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
session = onnxruntime.InferenceSession(w, providers=providers)
else: # TensorFlow model (TFLite, pb, saved_model)
import tensorflow as tf
if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
Expand Down

0 comments on commit ae37b2d

Please sign in to comment.