forked from ML-Cai/LaneDetector
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_edge_tpu.py
101 lines (81 loc) · 3.89 KB
/
test_edge_tpu.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import sys
import os
import numpy as np
import json
import cv2
import time
# from tflite_runtime.interpreter import Interpreter
# from tflite_runtime.interpreter import load_delegate
import tensorflow as tf
from tqdm import tqdm
import atexit
# --------------------------------------------------------------------------------------------------------------
def preprocess_image(image_path, input_size):
"""
Preprocess the image (resize, normalize, etc.)
"""
image = cv2.imread(image_path)
image = cv2.resize(image, input_size)
# Normalize or other preprocessing steps if required
return image
def del_interpreter(interpreter):
del interpreter
return
def tflite_image_test(tflite_model_quant_file, folder_path, with_post_process=True):
# Load the model onto the Edge TPU
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_quant_file),
experimental_delegates=[tf.lite.experimental.load_delegate(
"edgetpu.dll")])
# lambda that deletes the interpreter reference at the end of the program
atexit.register(del_interpreter, interpreter)
# interpreter = Interpreter(model_path=str(tflite_model_quant_file),
# experimental_delegates=[load_delegate('libedgetpu.so.1')])
interpreter.allocate_tensors()
# Get index of inputs and outputs# Model input information
input_details = interpreter.get_input_details()
input_size = input_details[0]['shape'][1:3] # Assuming input shape is in the form [1, height, width, 3]
# Get part of data from output tensor
COLORS = [(0, 0, 255), (0, 255, 0), (255, 0, 0),
(0, 255, 255), (255, 0, 255), (255, 255, 0)]
frame_count = 0
total_inference_time = 0
dirs = os.listdir(folder_path)
for image_name in tqdm(dirs, desc="Processing images", total=len(dirs)):
if image_name.lower().endswith(('.png', '.jpg', '.jpeg')):
image_path = os.path.join(folder_path, image_name)
image = preprocess_image(image_path, (input_size[1], input_size[0])) # width, height
# Prepare input data
if input_details[0]['dtype'] == np.uint8:
input_data = np.uint8(image * 255)
else:
input_data = image.astype(np.float32) / 255.0
start_time = time.time()
# Inference
start_time = time.time()
interpreter.set_tensor(input_details[0]['index'], [input_data])
interpreter.invoke()
inference_time = time.time() - start_time
print(f"Inference time: {inference_time} seconds")
total_inference_time += inference_time
frame_count += 1
avg_fps = frame_count / total_inference_time
print(f"Average FPS: {avg_fps}")
print(f"Average inference time per frame: {total_inference_time / frame_count} seconds")
# --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
# read configs
with open('add_ins/cvat_config2.json', 'r') as inf:
config = json.load(inf)
net_input_img_size = config["model_info"]["input_image_size"]
x_anchors = config["model_info"]["x_anchors"]
y_anchors = config["model_info"]["y_anchors"]
max_lane_count = config["model_info"]["max_lane_count"]
checkpoint_path = config["model_info"]["checkpoint_path"]
tflite_model_name = config["model_info"]["tflite_model_name"]
if not os.path.exists(tflite_model_name):
print("tlite model doesn't exist, please run \"generate_tflite_nidel.py\" first to convert tflite model.")
sys.exit(0)
# set path of training data
images = "C:/Users/inf21034/source/IMG_ROOTS/1280x960_CVATROOT/test_set/2023-10-02-12-59-12"
# "/mnt/c/Users/inf21034/source/IMG_ROOTS/1280x960_CVATROOT/test_set"
tflite_image_test(tflite_model_name, images)