forked from Priyanka-M96/Hand_pose_DKE
-
Notifications
You must be signed in to change notification settings - Fork 0
/
detect_single_threaded.py
121 lines (107 loc) · 3.8 KB
/
detect_single_threaded.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
from utils import detector_utils as detector_utils
import cv2
import tensorflow as tf
import datetime
import argparse
detection_graph, sess = detector_utils.load_inference_graph()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-sth',
'--scorethreshold',
dest='score_thresh',
type=float,
default=0.2,
help='Score threshold for displaying bounding boxes')
parser.add_argument(
'-fps',
'--fps',
dest='fps',
type=int,
default=1,
help='Show FPS on detection/display visualization')
parser.add_argument(
'-src',
'--source',
dest='video_source',
default=0,
help='Device index of the camera.')
parser.add_argument(
'-wd',
'--width',
dest='width',
type=int,
default=320,
help='Width of the frames in the video stream.')
parser.add_argument(
'-ht',
'--height',
dest='height',
type=int,
default=180,
help='Height of the frames in the video stream.')
parser.add_argument(
'-ds',
'--display',
dest='display',
type=int,
default=1,
help='Display the detected images using OpenCV. This reduces FPS')
parser.add_argument(
'-num-w',
'--num-workers',
dest='num_workers',
type=int,
default=4,
help='Number of workers.')
parser.add_argument(
'-q-size',
'--queue-size',
dest='queue_size',
type=int,
default=5,
help='Size of the queue.')
args = parser.parse_args()
cap = cv2.VideoCapture(args.video_source)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, args.width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height)
start_time = datetime.datetime.now()
num_frames = 0
im_width, im_height = (cap.get(3), cap.get(4))
# max number of hands we want to detect/track
num_hands_detect = 2
cv2.namedWindow('Single-Threaded Detection', cv2.WINDOW_NORMAL)
while True:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
ret, image_np = cap.read()
# image_np = cv2.flip(image_np, 1)
try:
image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
except:
print("Error converting to RGB")
# Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
# while scores contains the confidence for each of these boxes.
# Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)
boxes, scores = detector_utils.detect_objects(image_np,
detection_graph, sess)
# draw bounding boxes on frame
detector_utils.draw_box_on_image(num_hands_detect, args.score_thresh,
scores, boxes, im_width, im_height,
image_np)
# Calculate Frames per second (FPS)
num_frames += 1
elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
fps = num_frames / elapsed_time
if (args.display > 0):
# Display FPS on frame
if (args.fps > 0):
detector_utils.draw_fps_on_image("FPS : " + str(int(fps)),
image_np)
cv2.imshow('Single-Threaded Detection',
cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
else:
print("frames processed: ", num_frames, "elapsed time: ",
elapsed_time, "fps: ", str(int(fps)))