forked from Er-AI-GK/oneAPI-Sign-Language-Gesture-Translator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdata_collection.py
65 lines (52 loc) · 2.3 KB
/
data_collection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import os
import numpy as np
import cv2
import mediapipe as mp
from itertools import product
from my_functions import *
import keyboard
actions = np.array(['a', 'b'])
sequences = 30
frames = 10
PATH = os.path.join('data')
for action, sequence in product(actions, range(sequences)):
try:
os.makedirs(os.path.join(PATH, action, str(sequence)))
except:
pass
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot access camera.")
exit()
with mp.solutions.holistic.Holistic(min_detection_confidence=0.75, min_tracking_confidence=0.75) as holistic:
for action, sequence, frame in product(actions, range(sequences), range(frames)):
if frame == 0:
while True:
if keyboard.is_pressed(' '):
break
_, image = cap.read()
results = image_process(image, holistic)
draw_landmarks(image, results)
cv2.putText(image, 'Recroding data for the "{}". Sequence number {}.'.format(action, sequence),
(20,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)
cv2.putText(image, 'Pause.', (20,400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(image, 'Press "Space" when you are ready.', (20,450), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA)
cv2.imshow('Camera', image)
cv2.waitKey(1)
if cv2.getWindowProperty('Camera',cv2.WND_PROP_VISIBLE) < 1:
break
else:
_, image = cap.read()
results = image_process(image, holistic)
draw_landmarks(image, results)
cv2.putText(image, 'Recroding data for the "{}". Sequence number {}.'.format(action, sequence),
(20,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)
cv2.imshow('Camera', image)
cv2.waitKey(1)
if cv2.getWindowProperty('Camera',cv2.WND_PROP_VISIBLE) < 1:
break
keypoints = keypoint_extraction(results)
frame_path = os.path.join(PATH, action, str(sequence), str(frame))
np.save(frame_path, keypoints)
cap.release()
cv2.destroyAllWindows()