-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathface_parts_detection.py
86 lines (71 loc) · 2.72 KB
/
face_parts_detection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# USAGE
# python detect_face_parts.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/example_01.jpg
# import the necessary packages
from imutils import face_utils
import numpy as np
import argparse
import imutils
import dlib
import cv2
from matplotlib import pyplot as plt
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# load the input image, resize it, and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
rects = detector(gray, 1)
# loop over the face detections
for (i, rect) in enumerate(rects):
# determine the facial landmarks for the face region, then
# convert the landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# loop over the face parts individually
for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
# clone the original image so we can draw on it, then
# display the name of the face part on the image
clone = image.copy()
cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 0, 255), 2)
# loop over the subset of facial landmarks, drawing the
# specific face part
for (x, y) in shape[i:j]:
cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)
# extract the ROI of the face region as a separate image
(x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
roi = image[y:y + h, x:x + w]
roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)
# show the particular face part
cv2.imshow("ROI", roi)
cv2.imshow("Image", clone)
cv2.waitKey(0)
# visualize all facial landmarks with a transparent overlay
output = face_utils.visualize_facial_landmarks(image, shape)
cv2.imshow("Output", output)
cv2.waitKey(0)
# # show the particular face part
# plt.imshow(roi)
# plt.xticks([]);plt.yticks([])
# plt.imshow(clone)
# plt.xticks([]);plt.yticks([])
# plt.show()
# fname = "results/"+"face"+str(n)+"_"+name+".png"
# # print(fname)
# plt.savefig(fname)
# # visualize all facial landmarks with a transparent overlay
# output = face_utils.visualize_facial_landmarks(image, shape)
# plt.imshow(output)
# plt.xticks([]);plt.yticks([])
# fname = "results/"+"face"+"_"+"overlay.png"
# plt.savefig(fname)