-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathline_follower.py
212 lines (175 loc) · 6.4 KB
/
line_follower.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
#!/usr/bin/env python
import cv2
import numpy as np
from djitellopy import tello
from pick_color_mask import pick_color_mask, get_frame
from utils import countdown, process_frame, reduce_frame
# parameters
THRESHOLD = .80 # % of detection in area to set to 1
TRANS_GAIN = .33 # translation gain, the higher the more sensitive
SENSORS = 3 # number of areas for track sensing
ROTA_VALS = [-25, -15, 0, 15, 25] # rotation gain, match with SENSORS
FWD_SPEED = 15 # default fwd speed
video_source = "WEBCAM"
# Functions definition
def thresholding(frame, lower_threshold, upper_threshold):
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(frame_hsv, lower_threshold, upper_threshold)
return mask
def get_lateral_offset(mask, frame, decorate_frame=True):
GREEN = (0.0, 255.0, 0.0)
PINK = (255.0, 0.0, 255.0)
RED = (0.0, 0.0, 255.0)
cx = 0.0
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if len(contours) != 0:
largest_area_detected = max(contours, key=cv2.contourArea)
x_r, y_r, w_r, h_r = cv2.boundingRect(largest_area_detected)
cx = x_r + w_r//2
cy = y_r + h_r//2
if decorate_frame:
# draw boundary of track detected
cv2.drawContours(frame, largest_area_detected, -1, PINK, 3)
# draw centroid
cv2.circle(frame, (cx, cy), 5, GREEN, cv2.FILLED)
# draw bounding rectangle
# cv2.rectangle(frame, (x_r, y_r), (x_r+w_r, y_r+h_r), RED, 2)
h, w, c = frame.shape
# draw arrow
cv2.arrowedLine(frame, (w//2, h//2, ), (cx, h//2,), GREEN, 1)
print("Offset:", w//2-cx)
return cx, frame
def get_sensor_output(mask, SENSORS):
# Note: hsplit only works if frame_width is divisible by SENSORS
areas = np.hsplit(mask, SENSORS)
pix_total = areas[0].shape[0]*areas[0].shape[1]//SENSORS
sens_out = []
for i, area in enumerate(areas):
pix_count = cv2.countNonZero(area)
if pix_count > THRESHOLD * pix_total:
sens_out.append(1)
win_title = str(i)+": 1"
else:
sens_out.append(0)
win_title = str(i)+": 0"
# display a window per area
# cv2.imshow(win_title, area)
print("Sensors:", sens_out)
return sens_out
def compute_commands(sens_out, cx):
global TRANS_GAIN, ROTA_GAIN, FWD_SPEED
# translation
left_right = (cx - frame_width/2) * TRANS_GAIN
left_right = int(np.clip(left_right, -10, 10))
if left_right > -2 and left_right < 2:
left_right = 0
# rotation
fwd_speed = FWD_SPEED
if sens_out == [1, 0, 0]:
yaw = ROTA_VALS[0]
elif sens_out == [1, 1, 0]:
yaw = ROTA_VALS[1]
elif sens_out == [0, 1, 0]:
yaw = ROTA_VALS[2]
elif sens_out == [0, 1, 1]:
yaw = ROTA_VALS[3]
elif sens_out == [0, 0, 1]:
yaw = ROTA_VALS[4]
elif sens_out == [1, 1, 1]:
yaw = 0
else:
yaw = 0
fwd_speed = 0
print(f"Command: ({left_right}, {fwd_speed}, 0, {yaw})")
return left_right, fwd_speed, 0, yaw
if __name__ == "__main__":
# Initialization of variables
# reduced size of images for processing
frame_width = 360
frame_height = 240
# video_source = "STATIC" for testing on still photos
# video_source = "WEBCAM" for testing on stream from video source
# video_source = "DRONE" for testing on Tello drone
# Initialization
if video_source == "WEBCAM":
# list of available cameras with:
# $ v4l2-ctl --list-devices
# USB camera on laptop: '/dev/video2'
# integrated laptop cam: 0
video_stream = cv2.VideoCapture('/dev/video2')
video_stream = cv2.VideoCapture(0)
video_link = video_stream
elif video_source == "DRONE":
drone = tello.Tello()
drone.connect()
# print(f"Battery: {drone.get_battery()}%")
drone.streamon()
video_link = drone
elif video_source == "STATIC":
IMAGE_PATH = './assets/data/IMG_7733_xs.PNG'
video_link = IMAGE_PATH
else:
print(f"Video Source: {video_source} not recognized.")
# color thresholds
# add an env file to store the values in XML?
# read values from XML and run color calibration
# only on request or if XML params not present
# HSV values for no filter
"""
lower_threshold = [0, 0, 0]
upper_threshold = [179, 255, 255]
"""
# picked manually
# HSV values for Cossio
"""
lower_threshold = [85, 13, 177]
upper_threshold = [108, 97, 223]
"""
# HSV values for Durruti
"""
lower_threshold = [25, 10, 179]
upper_threshold = [117, 54, 255]
"""
# HSV values from Tello for Durruti - night
lower_threshold = [94, 0, 189]
upper_threshold = [179, 255, 255]
# HSV values test sheet
lower_threshold = [115, 70, 0]
upper_threshold = [179, 255, 255]
# Call pick_color_mask to fine-tune initial values
lower_threshold, upper_threshold = pick_color_mask(
video_source, video_link, (frame_width, frame_height),
lower_threshold, upper_threshold)
# display final values
print("lower_threshold = ", lower_threshold)
print("upper_threshold = ", upper_threshold)
if video_source == "DRONE":
countdown(5)
drone.takeoff()
# main loop
while True:
# get frame
frame = get_frame(video_source, video_link)
# resize preserving aspect ratio
frame = cv2.resize(frame, (frame_width, frame_height), interpolation=cv2.INTER_AREA)
# frame = reduce_frame(frame, (frame_width, frame_height))
mask = thresholding(frame, lower_threshold, upper_threshold)
cx, frame = get_lateral_offset(mask, frame) # for translation
sens_out = get_sensor_output(mask, SENSORS) # for rotation
left_right, fwd_speed, up_down, yaw = compute_commands(sens_out, cx)
text_bat = "NA"
if video_source == "DRONE":
drone.send_rc_control(left_right, fwd_speed, up_down, yaw)
text_bat = drone.get_battery()
# show images
frame = process_frame(frame, text_bat)
cv2.imshow("Output", frame)
# cv2.imshow("Mask", mask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if video_source == "DRONE":
drone.land
drone.streamoff()
drone.end()
cv2.destroyAllWindows()