-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathKNNFeaturesExtractor.py
165 lines (109 loc) · 6.07 KB
/
KNNFeaturesExtractor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
# This program reads all the training images specified using variables DATA_LOCS, TRAINING_LABELS
# and finds and saves the grid feature set in csv file. This will be used for training of KNN
import cv2, numpy as np, time, math
import silatra
from math import ceil
import os.path
def segment(src_img):
"""
### Segment skin areas from hand using a YCrCb mask.
This function returns a mask with white areas signifying skin and black areas otherwise.
Returns: mask
"""
import cv2
from numpy import array, uint8
blurred_img = cv2.GaussianBlur(src_img,(5,5),0)
blurred_img = cv2.medianBlur(blurred_img,5)
blurred_img = cv2.cvtColor(blurred_img, cv2.COLOR_BGR2YCrCb)
lower = array([0,137,100], uint8)
upper = array([255,200,150], uint8)
mask = cv2.inRange(blurred_img, lower, upper)
open_kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5,5))
close_kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (7,7))
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, open_kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, close_kernel)
return mask
start = time.time()
# Set the mode to a (append) or w (write) depending on the need
# This is an incorrect temporary file so as to prevent ruining the sample file.
# If you understand this and find this, you will be able to change it.
dump_file = open('./TempFeatureSetFiles/silatra_gesture_signs.csv','w')
# dump_file = open('./TempFeatureSetFiles/silatra_digits_letters.csv','a')
grid = (10,10) #(rows,columns)
print('Labels: ',end='\r')
print([str(i) for i in range(10)]+[chr(ord('a')+i) for i in range(26)]+['Cup_Closed','Cup_Open','Sun_Up','ThumbsUp'])
total_images_parsed = 0
# Gesture Signs data should be stored in 1 csv file. Example: gestures_pregenerated_sample.csv
# DATA_LOCS = ['../Dataset/Hand_Poses_Dataset/Gesture_Signs/']
# TRAINING_LABELS = ['Leader_L','Apple_Finger','Cup_Closed','Cup_Open','ThumbsUp','Sun_Up','Fist','OpenPalmHori','That_Is_Good_Circle','That_Is_Good_Point']
# Digits and letters data should be stored together in 1 csv file. Example: silatra_signs_pregenerated_sample
DATA_LOCS = ['../Dataset/Hand_Poses_Dataset/Digits/']
TRAINING_LABELS = ['0','1','2','3','4','5','6','7','8','9']
# DATA_LOCS = ['../Dataset/Hand_Poses_Dataset/Letters/']
# TRAINING_LABELS = [chr(ord('a')+i) for i in range(26)]
for loc in range(len(DATA_LOCS)):
DATA_LOC = DATA_LOCS[loc]
# lower = np.array([0,params[loc],60],np.uint8)
# upper = np.array([255,180,127],np.uint8)
for label in TRAINING_LABELS:
for i in range(1,1200+1):
try:
if not(os.path.isfile(DATA_LOC+str(label)+"/"+str(i)+'.png')):
# skip if image not present. This is possible if some images are manually deleted if they look like outlier data
continue
print(' '*120+'\rProcessing image: %3d, Label = %s, From Location: %s' % (i,label,DATA_LOC+str(label)+"\\"+str(i)+'.png'),end='\r')
image = cv2.imread(DATA_LOC+str(label)+"/"+str(i)+'.png')
if image.shape[0] == 0: continue
# blur = cv2.blur(image,(3,3))
# ycrcb = cv2.cvtColor(blur,cv2.COLOR_BGR2YCR_CB)
# #Create a binary image with where white will be skin colors and rest is black
# mask = cv2.inRange(ycrcb,lower,upper)
# open_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
# close_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
# mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, open_kernel)
# mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, close_kernel)
# Replacement for above block. If this silatra module is not installed using
# python3 setup.py install
# from inside of SilatraPythonModuleBuilder, then uncomment above block of code
# mask,_,_ = silatra.segment(image)
mask = segment(image)
_,thresh = cv2.threshold(mask,127,255,0)
_,contours,_ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
length = len(contours)
maxArea = -1
if length > 0:
for i in range(length): # find the biggest contour (according to area)
temp = contours[i]
area = cv2.contourArea(temp)
if area > maxArea:
maxArea = area
ci = i
x,y,w,h = cv2.boundingRect(contours[ci])
hand = thresh[y:y+h,x:x+w]
HEIGHT, WIDTH = hand.shape
data = [ [0 for haha in range(grid[0])] for hah in range(grid[1]) ]
h, w = float(HEIGHT/grid[1]), float(WIDTH/grid[0])
for column in range(1,grid[1]+1):
for row in range(1,grid[0]+1):
fragment = hand[ceil((column-1)*h):min(ceil(column*h), HEIGHT),ceil((row-1)*w):min(ceil(row*w),WIDTH)]
_,contour,_ = cv2.findContours(fragment,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
try: area = cv2.contourArea(contour[0])
except: area=0.0
area = float(area/(h*w))
data[column-1][row-1] = area
to_write_data = ''
for column in range(grid[1]):
for row in range(grid[0]):
to_write_data += str(data[column][row]) + ','
to_write_data += str(label) + '\n'
# cv2.imshow('Hand',hand)
dump_file.write(to_write_data)
total_images_parsed += 1
# cv2.waitKey(30)
except Exception as e:
print(e)
continue
dump_file.close()
total = (time.time() - start)
print(' '*160+'\rTotal time required = %3.3fs' % (total))
print('Total images parsed: %d'%(total_images_parsed))