-
Notifications
You must be signed in to change notification settings - Fork 1
/
security.py
319 lines (269 loc) · 12.2 KB
/
security.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
#Import Files
from __future__ import print_function
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from httplib2 import Http
from oauth2client import file, client, tools
import cv2, time, pandas
from datetime import datetime
import face_recognition as fr
from twilio.rest import Client
from VoiceIt import *
import pyaudio
import tkinter as Tk
import pyttsx
import wave
##VoiceIt developer ID
myVoiceIt = VoiceIt("")
# Your Account SID from twilio.com/console
account_sid = ""
# Your Auth Token from twilio.com/console
auth_token = ""
clients = Client(account_sid, auth_token)
first_frame =None
status_list=[None,None]
times=[]
##Dataframe recording from start to the end of video
df= pandas.DataFrame(columns=["Start","End"])
video = cv2.VideoCapture(0)
engine = pyttsx.init()
# Load a sample picture and learn how to recognize it.
#Person 1
image_1 = fr.load_image_file("image1.jpg")
image_2 = fr.load_image_file("image2.jpg")
image_3 = fr.load_image_file("image3.jpg")
image_4 = fr.load_image_file("image4.jpg")
image_5 = fr.load_image_file("image5.jpg")
image_6= fr.load_image_file("image6.jpg")
##Encoding face from sample image - Ashwanth
face_encoding_1 = fr.face_encodings(image_1)[0]
face_encoding_2 = fr.face_encodings(image_2)[0]
face_encoding_3 = fr.face_encodings(image_3)[0]
face_encoding_4 = fr.face_encodings(image_4)[0]
face_encoding_5 = fr.face_encodings(image_5)[0]
face_encoding_6 = fr.face_encodings(image_6)[0]
# Initialize some variables - face recognition
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
## Results is an array of True/False telling if the unknown face matched anyone in the known_faces array
known_faces = [
face_encoding_1,
face_encoding_2,
face_encoding_3,
face_encoding_4,
face_encoding_5,
face_encoding_6
]
## Recorder to record voice and converting to file
class Recorder(object):
'''A recorder class for recording audio to a WAV file.
Records in mono by default.
'''
def __init__(self, channels=1, rate=44100, frames_per_buffer=1024):
self.channels = channels
self.rate = rate
self.frames_per_buffer = frames_per_buffer
def open(self, fname, mode='wb'):
return RecordingFile(fname, mode, self.channels, self.rate,
self.frames_per_buffer)
class RecordingFile(object):
def __init__(self, fname, mode, channels,
rate, frames_per_buffer):
self.fname = fname
self.mode = mode
self.channels = channels
self.rate = rate
self.frames_per_buffer = frames_per_buffer
self._pa = pyaudio.PyAudio()
self.wavefile = self._prepare_file(self.fname, self.mode)
self._stream = None
def __enter__(self):
return self
def __exit__(self, exception, value, traceback):
self.close()
def record(self, duration):
# Use a stream with no callback function in blocking mode
self._stream = self._pa.open(format=pyaudio.paInt16,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer=self.frames_per_buffer)
for _ in range(int(self.rate / self.frames_per_buffer * duration)):
audio = self._stream.read(self.frames_per_buffer)
self.wavefile.writeframes(audio)
return None
def start_recording(self):
# Use a stream with a callback in non-blocking mode
self._stream = self._pa.open(format=pyaudio.paInt16,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer=self.frames_per_buffer,
stream_callback=self.get_callback())
self._stream.start_stream()
return self
def stop_recording(self):
self._stream.stop_stream()
return self
def get_callback(self):
def callback(in_data, frame_count, time_info, status):
self.wavefile.writeframes(in_data)
return in_data, pyaudio.paContinue
return callback
def close(self):
self._stream.close()
self._pa.terminate()
self.wavefile.close()
def _prepare_file(self, fname, mode='wb'):
wavefile = wave.open(fname, mode)
wavefile.setnchannels(self.channels)
wavefile.setsampwidth(self._pa.get_sample_size(pyaudio.paInt16))
wavefile.setframerate(self.rate)
return wavefile
## Text-to-speech engine function
def voicephrase():
engine.say('Say the Catch phrase NEVER FORGET TOMORROW IS A NEW DAY in ')
engine.say('3')
engine.say('2')
engine.say('1')
engine.runAndWait()
## Save catch phrase in .wav file
print ('Say the catch phrase')
rec = Recorder(channels=2)
with rec.open('Ashwanth-voicePrint.wav', 'wb') as recfile:
recfile.record(duration=5.0)
## Text to speech engine function
engine.say('Please show your face infront of the camera in')
engine.say('3')
engine.say('2')
engine.say('1')
engine.runAndWait()
run_once = 0
show_once = 0
#Motion detection runs in the background
while True:
# storing video feed in variables
check, frame = video.read()
status = 0
#Frame properties
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(21,21),0) # 21 is a good number and 0 is the standard deviation
if(first_frame is None):
first_frame=gray
continue
delta_frame = cv2.absdiff(first_frame,gray)
thresh_frame=cv2.threshold(delta_frame, 30, 255,cv2.THRESH_BINARY)[1]
thresh_frame=cv2.dilate(thresh_frame,None,iterations=2)
(_,cnts,_)=cv2.findContours(thresh_frame.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 10000:
continue
status = 1
(x, y, w, h)=cv2.boundingRect(contour)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),3)
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = fr.face_locations(small_frame)
face_encodings = fr.face_encodings(small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matchA = fr.compare_faces(known_faces, face_encoding)
name = "Unknown"
## If face matches known face - person A
if matchA[0]:
#prints name around face as A
name = "A"
# function call to convert voice recording to wav file
voicephrase()
# Response sent to VoiceIt api with credentials to authenticate voice
response = myVoiceIt.authentication("VoiceIt_userid", "VoiceIt_password", "nameoffile.wav", "en-US")
print(response)
#If response fails once
if 'failed' in response:
# Setting number of attempts to 2
number_of_guesses = 0
while number_of_guesses < 2:
#Repeats voice authentication
engine.say('Authentication Failed. Voice Not Detected. Please say the catch phrase again')
engine.runAndWait()
voicephrase()
response = myVoiceIt.authentication("VoiceIt_userid", "VoiceIt_password", "nameoffile.wav", "en-US")
print(response)
#Increase attempt by 1
number_of_guesses = number_of_guesses + 1
# Success after a failure attempt
if 'success' in response:
engine.say('Authentication Successful. Voice Detected. You can enter the house.')
engine.runAndWait()
#Repeats process
response = myVoiceIt.authentication("VoiceIt_userid", "VoiceIt_password", "nameoffile.wav", "en-US")
print(response)
print('Authentication Successful. Voice Detected. You can enter the house.')
exit()
# If authentication fails three times
engine.say('Authentication Failed three times. You cannot enter the house.')
engine.runAndWait()
# Calls Twilio API to send a text message to linked phone number
message = clients.api.account.messages.create(to="<enter your phone number>",
from_="<enter your phone number>",
body="Alert! An unknown person is trying to enter the building")
print('Authentication Failed. Voice Not Detected.')
exit()
# If success in attempt
if 'success' in response:
engine.say('Authentication Successful. Voice Detected. You can enter the house.')
engine.runAndWait()
print(response)
print('Authentication Successful. Voice Detected. You can enter the house.')
#After message loop exits
exit()
## If the face is unknown
if run_once == 0:
if name == "Unknown":
print('Unknown')
engine.say('Unknown face detected. Please move away from the camera')
engine.runAndWait()
# Sends text message to phone number
message = clients.api.account.messages.create(to="<enter your phone number>",
from_="<enter your phone number>",
body="Alert! An unknown person has failed face recognition")
run_once = 1
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
status_list.append(status)
status_list=status_list[-2:]
if status_list[-1]==1 and status_list[-2]==0:
times.append(datetime.now())
if status_list[-1]==0 and status_list[-2]==1:
times.append(datetime.now())
cv2.imshow("Color Frame",frame)
key = cv2.waitKey(1)
if key==ord('q'):
if status==1:
times.append(datetime.now())
break
print(status_list)
print(times)
for i in range(0,len(times),2):
df=df.append({"Start":times[i],"End":times[i+1]},ignore_index=True)
# Writes recorded time and motion to csv file
df.to_csv("times.csv")
video.release()
cv2.destroyAllWindows()