-
Notifications
You must be signed in to change notification settings - Fork 1
/
dev_parseframes.py
177 lines (133 loc) · 5.65 KB
/
dev_parseframes.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import cv2
import time
import sys
import numpy as np
from pathlib import Path
import matplotlib
class VideoFramesProcessor:
def __init__(self, filepath):
super(VideoFramesProcessor,self).__init__()
self.filepath = filepath
self.abs_filepath = ''
self.filepathObj = None
self.outputDir = './output'
self.video = None
self.video_fps = 0
self.video_frames = 0
self.startTimeEpoch = 1518513474 #int(time.time())
self.frameProcessorData = {}
self.checkFileExists()
self.checkFileMetaData()
def setOutputDir(self, outputDir):
self.outputDir = outputDir
def setStartTimeEpoch(self, startTimeEpoch):
self.startTimeEpoch = startTimeEpoch
def checkFileExists(self):
if(self.filepath==None or self.filepath==""):
print("Filepath was empty or not specified.")
exit(0)
else:
self.filepathObj = Path(self.filepath)
exists = self.filepathObj.is_file()
if(exists):
print("File Exists. Processing \n")
self.abs_filepath = str(self.filepathObj.absolute())
return True
else:
print("File does not exist. Exiting.")
sys.exit(0)
def checkFileMetaData(self):
self.video = cv2.VideoCapture(self.abs_filepath)
self.video_fps = self.video.get(cv2.CAP_PROP_FPS)
self.video_frames = self.video.get(cv2.CAP_PROP_FRAME_COUNT)
print("Video Path: ", self.abs_filepath)
print("Video FPS: ", self.video_fps)
print("Video Frames: ", self.video_frames)
def processFile(self):
# fps = video.get(cv2.CAP_PROP_FPS)
# print(fps ,time.time())
# video.release()
#
# Reset current frame cursor
self.video.set(cv2.CAP_PROP_POS_FRAMES, 0)
framesBuffer = []
lastEpoch = -1
startTime = time.time()
frameProcessorTime = 0.0
while True:
# Flag can be false if unable to read or need more time to buffer or end of file.
# So dont use it to break loop.
flag, frame = self.video.read()
if flag:
currentFramePos = self.video.get(cv2.CAP_PROP_POS_FRAMES)
frameVideoSec = int(self.video.get(cv2.CAP_PROP_POS_MSEC)/1000)
startEpochSec = self.startTimeEpoch
frameEpochSec = startEpochSec + frameVideoSec
# print("Processing frame position:", currentFramePos, " at epoch:", frameEpochSec)
if (frameEpochSec != lastEpoch) and len(framesBuffer)>0:
a = time.time()
self.processFramesWithinMillisecond(framesBuffer, lastEpoch)
frameProcessorTime+=(time.time()-a)
print("Frameprocessorlooptime", time.time()-a)
framesBuffer = []
framesBuffer.append(frame)
lastEpoch = frameEpochSec
# If reached end of file
if( int(currentFramePos)+1 == self.video_frames):
# Flush last buffer
self.processFramesWithinMillisecond(framesBuffer, frameEpochSec)
framesBuffer = []
endTime = time.time()
print("Total Processing FPS", self.video_frames/(endTime-startTime))
print("Total Time", endTime-startTime)
print("Total Time In FrameProcessor", frameProcessorTime)
break
def processFramesWithinMillisecond(self, frames, frameEpochSec):
startTime = time.time()
# cv2.waitKey(1)
if self.frameProcessorData is None or self.frameProcessorData == {}:
self.frameProcessorData = {
'fgbg': cv2.createBackgroundSubtractorMOG2(history=1, varThreshold=100, detectShadows=True),
'kernel': cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)),
'firstFrame': True,
'detectionVigor': 40 # 20-80
}
# We need only to single channel height and width
msecAggregate = np.zeros(frames[0].shape[:2])
for frame in frames:
# print("Parsing frame")
# gray1 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# gray = cv2.GaussianBlur(frame, (11, 11), 2, 2)
# print(self.frameProcessorData)
if self.frameProcessorData['firstFrame']:
# First frame, detect nothing, so get difference between same frame upon itself
fgmask = self.frameProcessorData['fgbg'].apply(frame, None, 0.01)
fgmask = self.frameProcessorData['fgbg'].apply(frame, None, 0.01)
self.frameProcessorData['firstFrame'] = False
res = np.zeros(frame.shape[:2])
else:
fgmask = self.frameProcessorData['fgbg'].apply(frame, None, 0.01)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, self.frameProcessorData['kernel'])
# res += ( self.frameProcessorData['detectionVigor'] * fgmask.astype(np.float64) + gray1.astype(np.float64))
msecAggregate = msecAggregate + fgmask
print("Frame aggregator", time.time()-startTime)
# Don't know which to use, average or minmaxRangebind (MMRB)
# Decision: use Averaging. It is one step before MMRB. Hence MMRB can be done on demand using the averages.
msecAggregate2 = msecAggregate / len(frames)
# msecAggregate = (msecAggregate / msecAggregate.max()) * 255
# msecAggregate = np.array(msecAggregate, dtype = np.uint8)
# cv2.imshow( "video", msecAggregate);
cv2.imwrite( self.outputDir + "/image" + str(frameEpochSec) + "x.jpg", msecAggregate2);
print("Processing ", len(frames), "frames at epoch:", frameEpochSec)
if __name__ == '__main__' :
if(len(sys.argv) > 1):
processor = VideoFramesProcessor(sys.argv[1])
processor.processFile()
else:
print("Filepath was empty or not specified.")
# # Start default camera
# video = cv2.VideoCapture("./camera_trim.mp4");
# fps = video.get(cv2.CAP_PROP_FPS)
# print(fps ,time.time())
# video.release()
# video.get(cv2.CAP_PROP_POS_MSEC)