-
Notifications
You must be signed in to change notification settings - Fork 2
/
02_contours_and_translations.py
269 lines (203 loc) · 9.85 KB
/
02_contours_and_translations.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
"""Second step of our approach: create contours and translations for DAVIS images.
Contour points are extracted from DAVIS annotations using OpenCV.
Translations are computed based on Lucas Kanade optical flow implemented in OpenCV.
"""
import os
import re
import cv2
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.preprocessing import normalize
import src.config as cfg
from src.vis_utils import extract_longest_contour, load_gray_img
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
def get_normalized_translations(contour_0, contour_1):
"""Computes translation between contour_0 and contour_1 and normalizes it.
If there is only one contour point, return break_it flag
Parameters
----------
contour_0 : ndarray
Array containing points from contour_0
contour_1 : ndarray
Array containing points from contour_1
Returns
-------
translations : ndarray
Array containing normalized translation
break_it : bool
Flag to indicate whether there is only one contour point
"""
break_it = False
contour_0 = np.squeeze(contour_0)
contour_1 = np.squeeze(contour_1)
translations = contour_1 - contour_0
if translations.ndim > 1:
translations = normalize(translations)
else:
break_it = True
return translations, break_it
def save_annotation_with_contour_and_translation(annotation, contour, translation, path):
"""Saves annotation plotted with contour and translation.
Parameters
----------
annotation : ndarray
Array containing DAVIS annotation
contour : ndarray
Array containing extracted contour
translation : ndarray
Array containing translation
path : str
Path to store whole figure
"""
plt.figure(figsize=(15,10))
# Plot contour
contour = np.squeeze(contour)
plt.scatter(contour[:, 0], contour[:, 1])
# Plot annotation
plt.imshow(annotation)
# Plot translation
for c, t in zip(contour, translation):
plt.arrow(c[0], c[1],
t[0], t[1],
width=1, color='r')
# Save image
plt.savefig(path, bbox_inches=0)
plt.close()
def find_closest(contour_point_1, contour_1_real):
"""Finds closest point to contour_point_1 in contour_1_real.
Parameters
----------
contour_point_1 : ndarray
Array containing contour point
contour_1_real : ndarray
Array containing contour points extracted using cv2
Returns
-------
contour_1_final : ndarray
Array containing contour points
"""
contour_1_real = np.squeeze(contour_1_real, axis=1)
dist = cdist(contour_point_1, contour_1_real, metric='euclidean')
arg_contour_point_1_final = np.argmin(dist)
contour_point_1_final = contour_1_real[arg_contour_point_1_final]
return contour_point_1_final
def match_points(contour_1, contour_1_real):
"""Projects contour points onto real contour points.
See create_contours_and_translations(...) for a detailed explanation.
Parameters
----------
contour_1 : ndarray
Array containing contour points computed with optical flow
contour_1_real : ndarray
Array containing contour points extracted using cv2
Returns
-------
contour_1_final : ndarray
Array containing contour points
"""
rows_1, _, _ = contour_1.shape
contour_1_final = np.zeros((rows_1, 2))
for x in range(rows_1):
contour_1_final[x] = find_closest(contour_1[x], contour_1_real)
return contour_1_final
def create_contours_and_translations(annotations_folders_path, contours_folders_path,
translations_folders_path,
closing_kernel_size):
"""Creates contours and translations for for DAVIS images.
Contour points are extracted from DAVIS annotations using OpenCV.
Translations are computed based on Lucas Kanade optical flow implemented in OpenCV.
Parameters
----------
annotations_folders_path : str
Path to augmented DAVIS annotations
contours_folders_path : str
Path to where contours should be stored
translations_folders_path : str
Path to where translations should be stored
closing_kernel_size : int
Size of closing kernel
"""
# Get list of sequences
sequences = os.listdir(annotations_folders_path)
sequences.sort()
# Iterate through sequences
for i, sequence in enumerate(sequences):
print('#{}: {}'.format(i, sequence))
# If val sequence, augmentation_count = 1
if sequence not in cfg.TRAIN_SEQUENCES:
augmentation_count = 1
else:
augmentation_count = cfg.AUGMENTATION_COUNT + 1
# Iterate through augmentations:
for j in range(augmentation_count):
j = str(j)
print('\t{} #{}'.format('Augmentation', j))
# Create folder to save Contours
contours_folder_path = os.path.join(contours_folders_path, sequence, j)
if not os.path.exists(contours_folder_path):
os.makedirs(contours_folder_path)
# Create folder to save Translations
translations_folder_path = os.path.join(translations_folders_path, sequence, j)
if not os.path.exists(translations_folder_path):
os.makedirs(translations_folder_path)
# Get list of frames if augmentation for this sequence exists
if os.path.exists(os.path.join(annotations_folders_path, sequence, j)):
frames = os.listdir(os.path.join(annotations_folders_path, sequence, j))
if '.ipynb_checkpoints' in frames:
frames.remove('.ipynb_checkpoints')
frames.sort()
# Iterate through frames
for k, frame in enumerate(frames):
#print('\t\t#{}: {}'.format(k, frame))
# Skip these sequences as annotations are completely black
if (sequence == 'bmx-bumps' and frame == '00059.png'): break
if (sequence == 'surf' and frame == '00053.png'): break
# Get path to frames
annotation_0_path = os.path.join(annotations_folders_path, sequence, j, frame)
try:
annotation_1_path = os.path.join(annotations_folders_path, sequence, j, frames[k+1])
# Break if frame_0 is last frame
except IndexError as e:
break
# Load frames as gray img
annotation_0_gray = load_gray_img(annotation_0_path)
annotation_1_gray = load_gray_img(annotation_1_path)
# Extract longest contour and save it
contour_0 = extract_longest_contour(annotation_0_gray, closing_kernel_size,
cv2.CHAIN_APPROX_TC89_KCOS)
np.save(os.path.join(contours_folder_path, frame[:5]), contour_0)
# Calculate optical flow to get contour_1
contour_1, st, err = cv2.calcOpticalFlowPyrLK(annotation_0_gray, annotation_1_gray,
contour_0, None, **lk_params)
# Compute translation between contour_0 and contour_1. Normalize it as translation
# sometimes is really long.
# If translation contains just one point break and stop for this augmentation
translation_0_1_normalized, break_it = get_normalized_translations(contour_0, contour_1)
if break_it == True:
break
# Update contour_1 using contour_0 + normalized translations. Note that contour_1 does
# not necessarily lie on the real contour, which is why we project the points of
# contour_1 on real contour_1 in the next steps.
contour_1 = np.add(np.squeeze(contour_0), translation_0_1_normalized)
contour_1 = np.expand_dims(contour_1, axis=1)
# Extract real contour_1
contour_1_real = extract_longest_contour(annotation_1_gray, closing_kernel_size,
cv2.CHAIN_APPROX_NONE)
# Project contour_1 on contour_1_real
contour_1_final = match_points(contour_1, contour_1_real)
# Compute real translation and save them
translation_0_1_final = contour_1_final - np.squeeze(contour_0)
np.save(os.path.join(translations_folder_path, frame[:5]), translation_0_1_final)
# Save annotation with contour and translation
#annotation = cv2.imread(os.path.join(annotations_folders_path, sequence, j, frame))
#save_annotation_with_contour_and_translation(annotation, contour_0,
# translation_0_1_final, os.path.join(translations_folder_path, frame[:5] + '.png'))
if __name__ == "__main__":
create_contours_and_translations(cfg.ANNOTATIONS_AUGMENTED_FOLDERS_PATH,
cfg.CONTOURS_FOLDERS_PATH,
cfg.TRANSLATIONS_FOLDERS_PATH,
cfg.CLOSING_KERNEL_SIZE)