Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions production/.idea/inspectionProfiles/Project_Default.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions production/.idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions production/.idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 11 additions & 0 deletions production/.idea/production.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

391 changes: 391 additions & 0 deletions production/.idea/workspace.xml

Large diffs are not rendered by default.

19 changes: 16 additions & 3 deletions production/listener.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from multiprocessing.dummy import Queue, Process, Value
from aubio import notes, onset, tempo
from structures import Note, Chord
import wave

"""
1 beat in bpm is 1/4 of musical beat
Expand Down Expand Up @@ -38,6 +39,10 @@ def run_queue_in(listener):
input=True,
frames_per_buffer=buffer_size)

listener.wavefile.setnchannels(n_channels)
listener.wavefile.setsampwidth(p.get_sample_size(pyaudio_format))
listener.wavefile.setframerate(sample_rate / 2)

notes_o = notes("default", win_s, hop_s, sample_rate)
onset_o = onset("default", win_s, hop_s, sample_rate)
temp_o = aubio.tempo("specdiff", win_s, hop_s, sample_rate)
Expand All @@ -49,9 +54,14 @@ def run_queue_in(listener):
prev_time = 0
while (listener.runing.value):
# read data from audio input
audiobuffer = stream.read(buffer_size, exception_on_overflow = False)
audiobuffer = stream.read(buffer_size, exception_on_overflow=False)
samples = np.fromstring(audiobuffer, dtype=np.float32)

##voice recording
converted_samples = np.int16((samples + 1) * (2 ** 16))
listener.wavefile.writeframes(bytes(converted_samples))
##finish of voice recording

if (onset_o(samples)):
last_onset = onset_o.get_last_ms()
if (temp_o(samples)):
Expand All @@ -66,11 +76,13 @@ def run_queue_in(listener):
prev_time = last_onset

class Listener:
def __init__(self, queue=Queue(), runing=Value('i', False), tempo=Value('i', default_tempo), deadline=Value('f', max_time)):
def __init__(self, queue=Queue(), runing=Value('i', False), tempo=Value('i', default_tempo),
deadline=Value('f', max_time), name_of_voice="your_voice.wav"):
self.queue_in = queue
self.runing = runing
self.tempo = tempo
self.deadline = deadline
self.deadline = deadline
self.wavefile = wave.open(name_of_voice, "w")

def run(self):
self.runing.value = True
Expand All @@ -81,6 +93,7 @@ def stop(self):
self.runing.value = False
self.process.join()
self.queue_in = Queue()
self.wavefile.close()

def get(self):
if self.queue_in.empty() is False:
Expand Down
69 changes: 69 additions & 0 deletions production/tmp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@

Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Что это за файл?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Отдалённо напоминает юнит-тесты.

import pyaudio
import numpy as np
import aubio

from mido import Message, MidiFile, MidiTrack
import wave
from aubio import notes, onset, tempo


sample_rate = 44100
win_s = 2048
hop_s = win_s // 4
buffer_size = hop_s


p = pyaudio.PyAudio()
# open stream
pyaudio_format = pyaudio.paFloat32
n_channels = 1
stream = p.open(format=pyaudio_format,
channels=n_channels,
rate=sample_rate,
input=True,
frames_per_buffer=buffer_size)

wavefile = wave.open("your_voice.wav", "w")
wavefile.setnchannels(n_channels)
wavefile.setsampwidth(p.get_sample_size(pyaudio_format))
wavefile.setframerate(sample_rate / 2)

notes_o = notes("default", win_s, hop_s, sample_rate)
onset_o = onset("default", win_s, hop_s, sample_rate)

max = 300
cnt = 0
last_onset = 0

mid = MidiFile()
tracks = []
num = 0
while (cnt < max):
# read data from audio input
audiobuffer = stream.read(buffer_size, exception_on_overflow=False)
print(audiobuffer)
samples = np.fromstring(audiobuffer, dtype=np.float32)
prom = np.int16((samples + 1) * (2**16))
print(prom)
toOut = bytes(prom)
wavefile.writeframes(toOut)
cnt += 1
if (onset_o(samples)):
last_onset = int(onset_o.get_last_ms())
new_note = notes_o(samples)
if (new_note[0] != 0):
tracks.append(MidiTrack())
mid.tracks.append(tracks[num])
new_note[0] = int(new_note[0])
tracks[num].append(Message('program_change', program=33, time=10))
nn = int(new_note[0])
tracks[num].append(Message('note_on', note=nn, velocity=124, time=last_onset))
tracks[num].append(Message('note_off', note=nn, velocity=0, time=int(new_note[1])))
tracks[num].append(Message('note_on', note=(nn + 12) % 128, velocity=124, time=last_onset))
tracks[num].append(Message('note_off', note=(nn + 12) % 128, velocity=0, time=int(new_note[1])))
num += 1
print(cnt)

wavefile.close()
mid.save('zmey.midi')
Binary file added production/your_voice.wav
Binary file not shown.
Binary file added production/zmey.midi
Binary file not shown.