-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathyoloface.py
72 lines (60 loc) · 2.81 KB
/
yoloface.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import argparse
import sys
import os
from utils_yolo import *
#####################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--model-cfg', type=str, default='./cfg/yolov3-face.cfg',
help='path to config file')
parser.add_argument('--model-weights', type=str,
default='./model-weights/yolov3-wider_16000.weights',
help='path to weights of model')
parser.add_argument('--image', type=str, default='',
help='path to image file')
parser.add_argument('--video', type=str, default='',
help='path to video file')
parser.add_argument('--src', type=int, default=0,
help='source of the camera')
parser.add_argument('--output-dir', type=str, default='outputs/',
help='path to the output directory')
args = parser.parse_args()
#####################################################################
# print the arguments
print('----- info -----')
print('[i] The config file: ', args.model_cfg)
print('[i] The weights of model file: ', args.model_weights)
print('[i] Path to image file: ', args.image)
print('[i] Path to video file: ', args.video)
print('###########################################################\n')
if not os.path.exists(args.output_dir):
print('==> Creating the {} directory...'.format(args.output_dir))
os.makedirs(args.output_dir)
else:
print('==> Skipping create the {} directory...'.format(args.output_dir))
net = cv2.dnn.readNetFromDarknet(args.model_cfg, args.model_weights)
# net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
# net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
def detect_faces(frame, showFrame=True):
#if isinstance(None, frame):
# print('[i] ==> Done processing!!!')
# Create a 4D blob from a frame. (image, scalefactor=None, size=None, mean=None, swapRB=None, crop=None,
blob = cv2.dnn.blobFromImage(image=frame, scalefactor=1 / 255, size=(IMG_WIDTH, IMG_HEIGHT),
mean=[0, 0, 0], swapRB=1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(get_outputs_names(net))
# Remove the bounding boxes with low confidence
faces = post_process(frame, outs, CONF_THRESHOLD, NMS_THRESHOLD)
if showFrame:
wind_name = 'face detection using YOLOv3'
cv2.namedWindow(wind_name, cv2.WINDOW_NORMAL)
cv2.waitKey(1)
cv2.imshow(wind_name, frame)
key = cv2.waitKey(1)
if key & 0xff == 27 or key & 0xff == ord('q'):
print('[i] ==> Interrupted by user!')
cv2.destroyAllWindows()
return faces