forked from taufeeque9/HumanFallDetection
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfall_detector.py
150 lines (133 loc) · 6.52 KB
/
fall_detector.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import openpifpaf
import torch
import argparse
import copy
import logging
import torch.multiprocessing as mp
import csv
from default_params import *
from algorithms import *
from helpers import last_ip
import os
import matplotlib.pyplot as plt
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
class FallDetector:
def __init__(self, t=DEFAULT_CONSEC_FRAMES):
self.consecutive_frames = t
self.args = self.cli()
def cli(self):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
openpifpaf.network.Factory.cli(parser)
openpifpaf.decoder.cli(parser)
parser.add_argument('--resolution', default=0.4, type=float,
help=('Resolution prescale factor from 640x480. '
'Will be rounded to multiples of 16.'))
parser.add_argument('--resize', default=None, type=str,
help=('Force input image resize. '
'Example WIDTHxHEIGHT.'))
parser.add_argument('--num_cams', default=1, type=int,
help='Number of Cameras.')
parser.add_argument('--video', default=None, type=str,
help='Path to the video file.\nFor single video fall detection(--num_cams=1), save your videos as abc.xyz and set --video=abc.xyz\nFor 2 video fall detection(--num_cams=2), save your videos as abc1.xyz & abc2.xyz and set --video=abc.xyz')
parser.add_argument('--debug', default=False, action='store_true',
help='debug messages and autoreload')
parser.add_argument('--disable_cuda', default=False, action='store_true',
help='disables cuda support and runs from gpu')
vis_args = parser.add_argument_group('Visualisation')
vis_args.add_argument('--plot_graph', default=False, action='store_true',
help='Plot the graph of features extracted from keypoints of pose.')
vis_args.add_argument('--joints', default=True, action='store_true',
help='Draw joint\'s keypoints on the output video.')
vis_args.add_argument('--skeleton', default=True, action='store_true',
help='Draw skeleton on the output video.')
vis_args.add_argument('--coco_points', default=False, action='store_true',
help='Visualises the COCO points of the human pose.')
vis_args.add_argument('--save_output', default=False, action='store_true',
help='Save the result in a video file. Output videos are saved in the same directory as input videos with "out" appended at the start of the title')
vis_args.add_argument('--fps', default=18, type=int,
help='FPS for the output video.')
# vis_args.add_argument('--out-path', default='result.avi', type=str,
# help='Save the output video at the path specified. .avi file format.')
args = parser.parse_args()
# Log
logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)
args.force_complete_pose = True
args.instance_threshold = 0.2
args.seed_threshold = 0.5
# Add args.device
args.device = torch.device('cpu')
args.pin_memory = False
if not args.disable_cuda and torch.cuda.is_available():
args.device = torch.device('cuda')
args.pin_memory = True
if args.checkpoint is None:
args.checkpoint = 'shufflenetv2k16w'
openpifpaf.decoder.configure(args)
openpifpaf.network.Factory.configure(args)
return args
def begin(self):
print('Starting...')
e = mp.Event()
queues = [mp.Queue() for _ in range(self.args.num_cams)]
counter1 = mp.Value('i', 0)
counter2 = mp.Value('i', 0)
argss = [copy.deepcopy(self.args) for _ in range(self.args.num_cams)]
if self.args.num_cams == 1:
if self.args.video is None:
argss[0].video = 0
process1 = mp.Process(target=extract_keypoints_parallel,
args=(queues[0], argss[0], counter1, counter2, self.consecutive_frames, e))
process1.start()
if self.args.coco_points:
process1.join()
else:
process2 = mp.Process(target=alg2_sequential, args=(queues, argss,
self.consecutive_frames, e))
process2.start()
process1.join()
elif self.args.num_cams == 2:
if self.args.video is None:
argss[0].video = 0
argss[1].video = 1
else:
try:
vid_name = self.args.video.split('.')
argss[0].video = ''.join(vid_name[:-1])+'1.'+vid_name[-1]
argss[1].video = ''.join(vid_name[:-1])+'2.'+vid_name[-1]
print('Video 1:', argss[0].video)
print('Video 2:', argss[1].video)
except Exception as exep:
print('Error: argument --video not properly set')
print('For 2 video fall detection(--num_cams=2), save your videos as abc1.xyz & abc2.xyz and set --video=abc.xyz')
return
process1_1 = mp.Process(target=extract_keypoints_parallel,
args=(queues[0], argss[0], counter1, counter2, self.consecutive_frames, e))
process1_2 = mp.Process(target=extract_keypoints_parallel,
args=(queues[1], argss[1], counter2, counter1, self.consecutive_frames, e))
process1_1.start()
process1_2.start()
if self.args.coco_points:
process1_1.join()
process1_2.join()
else:
process2 = mp.Process(target=alg2_sequential, args=(queues, argss,
self.consecutive_frames, e))
process2.start()
process1_1.join()
process1_2.join()
else:
print('More than 2 cameras are currently not supported')
return
if not self.args.coco_points:
process2.join()
print('Exiting...')
return
if __name__ == "__main__":
f = FallDetector()
f.begin()