-
Notifications
You must be signed in to change notification settings - Fork 0
/
main_ui.py
231 lines (195 loc) · 7.94 KB
/
main_ui.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
import cv2
import numpy as np
import pyautogui as pg
import subprocess
import threading
import math
import os
from pathlib import Path
import pyaudio
from pydub import AudioSegment
from videorecord import ScreenRecorder_QT
from audiorecord import AudioRecorder_QT
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import (
QMainWindow,
QApplication,
QPushButton,
QLCDNumber,
QDoubleSpinBox,
QSpinBox,
)
from PyQt5.QtCore import QTimer, QThreadPool
from PyQt5 import uic
import time
import sys
def timing_adjust(vrec, arec):
timing_adjustment = vrec.elapsed_time - arec.elapsed_time
if timing_adjustment < 0:
print("WARNING: Your recorded audio time is shorter than video time.")
return 0
else:
print("Adjust timing of audio by delaying (s)", timing_adjustment)
return timing_adjustment
def insert_silent(timing_adjustment, aoutput_name):
# apply audio delay
# create silent audio segment
silent_segment = AudioSegment.silent(
duration=math.ceil(timing_adjustment * 1000)
) # duration in milliseconds
# read wav file to an audio segment
audio_segment = AudioSegment.from_wav(aoutput_name)
# add above two audio segments
delay_segment = silent_segment + audio_segment
# save modified audio
delay_segment.export(aoutput_name, format="wav")
def video_audio_merge(moutput_name, aoutput_name, voutput_name):
# cmd line
cmd = (
"ffmpeg.exe -y -i "
+ aoutput_name
+ " -i "
+ voutput_name
+ " -pix_fmt yuv420p "
+ moutput_name
)
subprocess.call(cmd, shell=True)
def video_convert(moutput_name, voutput_name):
# cmd line
cmd = "ffmpeg.exe -y -i " + voutput_name + " -pix_fmt yuv420p " + moutput_name
subprocess.call(cmd, shell=True)
def time_converter(t):
millis = int(t)
seconds = (millis / 10) % 60
seconds = int(seconds)
minutes = (millis / (10 * 60)) % 60
minutes = int(minutes)
hours = int(millis / (10 * 60 * 60))
return str(hours) + ":" + str(minutes) + ":" + str(seconds) + "." + str(millis % 10)
def encode_video(set_fps, recorded_fps, voutput_name):
cmd = "ffmpeg.exe -y -r " + str(recorded_fps) + " -i " + str(voutput_name) + " -pix_fmt yuv420p -r " + str(set_fps) + " re_" + str(voutput_name)
subprocess.call(cmd, shell=True)
os.remove(str(voutput_name))
os.rename("re_"+str(voutput_name), str(voutput_name))
class UI(QMainWindow):
def __init__(self):
super(UI, self).__init__()
uic.loadUi("recorder.ui", self)
# find the widgets in the xml file
self.timer_label = self.findChild(QLCDNumber, "lcdNumber")
self.record_button = self.findChild(QPushButton, "recordpushButton")
self.stop_button = self.findChild(QPushButton, "stoppushButton")
self.save_button = self.findChild(QPushButton, "savepushButton")
self.fps_spinbox = self.findChild(QDoubleSpinBox, "fpsdoubleSpinBox")
self.index_spinbox = self.findChild(QSpinBox, "indexspinBox")
self.record_button.clicked.connect(self.clickedrecordBtn)
self.stop_button.clicked.connect(self.clickedstopBtn)
self.save_button.clicked.connect(self.clickedsaveBtn)
self.fps_spinbox.valueChanged.connect(self.valuechangefps)
self.index_spinbox.valueChanged.connect(self.valuechangeindex)
self.record_button.setEnabled(True)
self.stop_button.setEnabled(False)
self.save_button.setEnabled(False)
# temporary output video name and type
self.voutput_name = "video.mp4"
# temporary output audio name and type
self.aoutput_name = "audio.wav"
self.fps = float(self.fps_spinbox.value())
self.device_index = int(self.index_spinbox.value())
self.threadpool = QThreadPool()
self.components()
self.audio_device_show()
self.show()
def audio_device_show(self):
p = pyaudio.PyAudio()
print("Number of device: ", p.get_device_count())
for i in range(p.get_device_count()):
device_info = p.get_device_info_by_index(i)
print(
"Index: {} has device name: {}".format(
device_info["index"], device_info["name"]
)
)
def components(self):
self.count = 0
self.flag = False
self.timer_label.display(self.count)
timer = QTimer(self)
timer.timeout.connect(self.showTime)
timer.start(100) # every 0.1 s
def showTime(self):
if self.flag:
self.count += 1
# t = self.count / 10
t = time_converter(self.count)
self.timer_label.display(t)
def clickedrecordBtn(self):
self.flag = True
self.setWindowOpacity(0.5) # make window transparent during recording
self.record_button.setEnabled(False) # turn off record button
self.save_button.setEnabled(False) # turn off save button
self.count = 0
# make a new record - start video and audio recording
self.threadpool.clear()
# create screen recorder object
self.vrec = ScreenRecorder_QT(output_name=self.voutput_name, fps=self.fps)
# create audio recorder object
self.arec = AudioRecorder_QT(
output_name=self.aoutput_name,
input_device_index=self.device_index,
fps=self.fps,
)
self.threadpool.start(self.vrec)
self.threadpool.start(self.arec)
self.stop_button.setEnabled(True) # turn on stop button
def clickedstopBtn(self):
self.flag = False
self.setWindowOpacity(1.0) # make window visible again after recording
# stop video and audio recording
self.arec.quit_flag = True
self.vrec.quit_flag = True
self.threadpool.clear()
self.record_button.setEnabled(True) # turn on record button
self.stop_button.setEnabled(False) # turn off stop button
self.save_button.setEnabled(True) # turn on save button
def clickedsaveBtn(self):
self.flag = False
self.setWindowOpacity(1.0) # make window visible again after recording
self.record_button.setEnabled(False) # turn off record button
self.stop_button.setEnabled(False) # turn off stop button
self.save_button.setEnabled(False) # turn off save button
self.count = 0
self.timer_label.display(self.count)
# save to intended directory and input file name
dialog = QtWidgets.QFileDialog()
pathsave_custom = dialog.getSaveFileName(
None, "Select destination folder and file name", "./", "mp4 files (*.mp4)"
)[0]
# re-encode video according to measured video length
if Path(self.voutput_name).is_file():
encode_video(self.vrec.fps, self.vrec.recorded_fps, self.voutput_name)
# merge video and audio
if len(self.arec.audio_frames) != 0 and Path(self.aoutput_name).is_file() and Path(self.voutput_name).is_file():
timing_adjustment = timing_adjust(self.vrec, self.arec)
insert_silent(timing_adjustment, self.aoutput_name)
print("Merge video and audio......")
video_audio_merge(pathsave_custom, self.aoutput_name, self.voutput_name)
elif Path(self.voutput_name).is_file():
print("No sound recorded. Transfer video......")
video_convert(pathsave_custom, self.voutput_name)
else:
print("No sound and no video recorded.")
self.record_button.setEnabled(True) # turn on record button
self.stop_button.setEnabled(False) # turn off stop button
self.save_button.setEnabled(False) # turn off save button
def valuechangefps(self):
self.fps = float(self.fps_spinbox.value())
def valuechangeindex(self):
self.device_index = int(self.index_spinbox.value())
def main():
app = QApplication(sys.argv)
window = UI()
ret = app.exec_()
sys.exit(ret)
if __name__ == "__main__":
main()