Skip to content

Commit

Permalink
Upload code
Browse files Browse the repository at this point in the history
  • Loading branch information
YashSharma authored Apr 28, 2023
1 parent 2defb79 commit 0a522a6
Show file tree
Hide file tree
Showing 4 changed files with 678 additions and 0 deletions.
382 changes: 382 additions & 0 deletions Interpolation.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,382 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import random\n",
"import librosa\n",
"import numpy as np\n",
"from datasets import load_dataset\n",
"from IPython.display import Audio\n",
"from librosa.beat import beat_track\n",
"from diffusers import DiffusionPipeline\n",
"import pandas as pd\n",
"from diffusers import Mel\n",
"import scipy.io.wavfile\n",
"import random"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Load Model"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
"generator = torch.Generator(device=device)"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# Install audio diffusion pipeline - https://github.com/teticio/audio-diffusion\n",
"audio_diffusion = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256').to(device)\n",
"mel = audio_diffusion.mel\n",
"sample_rate = mel.get_sample_rate()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Randomly Generate Data Pairs"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"df = pd.read_csv('/scratch/ys5hd/Riffusion/music/UrbanSound8K/metadata/UrbanSound8K_train.csv')\n",
"df = df[['slice_file_name', 'fsID', 'fold', 'classID']]"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3\n",
"2\n",
"1\n",
"0\n",
"9\n",
"8\n",
"5\n",
"4\n",
"7\n",
"6\n"
]
}
],
"source": [
"sample_df = []\n",
"for cls in df['classID'].unique():\n",
" smp1 = df.loc[df['classID']==cls].reset_index(drop=True)\n",
" smp2 = smp1.iloc[random.sample(list(range(smp1.shape[0])), 100)].reset_index(drop=True)\n",
" smp2.columns = [x+'_1' for x in smp2.columns]\n",
" smp2['slice_file_name_2'] = ''\n",
"\n",
" for index, row in smp2.iterrows():\n",
" smp2.loc[index, 'slice_file_name_2'] = \\\n",
" random.sample(list(smp1.loc[smp1['fsID']!=row['fsID_1'], 'slice_file_name'].values), 1)[0]\n",
"\n",
" smp1.columns = [x+'_2' for x in smp1.columns] \n",
" smp2 = pd.merge(smp1, smp2, on='slice_file_name_2') \n",
" \n",
" sample_df.append(smp2)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"sample_df = pd.concat(sample_df)\n",
"sample_df.to_csv('sample_aug.csv', index=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Interpolate"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"def interpolate_audio(fname1, fname2, TARGET_PATH):\n",
" mel = Mel(x_res=256,\n",
" y_res=256,\n",
" hop_length=256,\n",
" sample_rate=22050,\n",
" n_fft=2048,\n",
" n_iter=32)\n",
"\n",
" image = mel.load_audio(fname1)\n",
" image = mel.audio_slice_to_image(0)\n",
"\n",
" image2 = mel.load_audio(fname2)\n",
" image2 = mel.audio_slice_to_image(0)\n",
"\n",
" noise = audio_diffusion.encode([image])\n",
" noise2 = audio_diffusion.encode([image2])\n",
"\n",
" alpha = 0.5 #@param {type:\"slider\", min:0, max:1, step:0.1}\n",
" output = audio_diffusion(\n",
" noise=audio_diffusion.slerp(noise, noise2, alpha),\n",
" generator=generator)\n",
" audio = output.audios[0, 0]\n",
" \n",
" scipy.io.wavfile.write(TARGET_PATH, sample_rate, audio[:len(mel.audio)]) \n",
" \n",
" return \n",
"\n",
"# display(Audio(mel.image_to_audio(image), rate=sample_rate))\n",
"# display(Audio(mel.image_to_audio(image2), rate=sample_rate))\n",
"# display(Audio(audio[:len(mel.audio)], rate=sample_rate))"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Iterate through sample_df and generate new audio file\n",
"\n",
"TARGET_PATH = '/scratch/ys5hd/Riffusion/music/UrbanSound8K/audio/fold11/'\n",
"PATH = '/scratch/ys5hd/Riffusion/music/UrbanSound8K/audio/fold'\n",
"\n",
"for index, row in sample_df.iterrows():\n",
" fname1 = PATH+str(row['fold_2'])+'/'+row['slice_file_name_2']\n",
" fname2 = PATH+str(row['fold_1'])+'/'+row['slice_file_name_1']\n",
"\n",
" interpolate_audio(fname1, fname2, \\\n",
" TARGET_PATH+row['slice_file_name_2'][:-4]+'_'+row['slice_file_name_1'])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Generate New Training Data File for Urban Sound 8k"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"- We add generated audios as another fold to our training dataset"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [],
"source": [
"df = pd.read_csv('/scratch/ys5hd/Riffusion/music/UrbanSound8K/metadata/UrbanSound8K_train.csv')"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>slice_file_name</th>\n",
" <th>fsID</th>\n",
" <th>start</th>\n",
" <th>end</th>\n",
" <th>salience</th>\n",
" <th>fold</th>\n",
" <th>classID</th>\n",
" <th>class</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>100032-3-0-0.wav</td>\n",
" <td>100032</td>\n",
" <td>0.0</td>\n",
" <td>0.317551</td>\n",
" <td>1</td>\n",
" <td>5</td>\n",
" <td>3</td>\n",
" <td>dog_bark</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>100263-2-0-117.wav</td>\n",
" <td>100263</td>\n",
" <td>58.5</td>\n",
" <td>62.500000</td>\n",
" <td>1</td>\n",
" <td>5</td>\n",
" <td>2</td>\n",
" <td>children_playing</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>100263-2-0-121.wav</td>\n",
" <td>100263</td>\n",
" <td>60.5</td>\n",
" <td>64.500000</td>\n",
" <td>1</td>\n",
" <td>5</td>\n",
" <td>2</td>\n",
" <td>children_playing</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>100263-2-0-126.wav</td>\n",
" <td>100263</td>\n",
" <td>63.0</td>\n",
" <td>67.000000</td>\n",
" <td>1</td>\n",
" <td>5</td>\n",
" <td>2</td>\n",
" <td>children_playing</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>100263-2-0-137.wav</td>\n",
" <td>100263</td>\n",
" <td>68.5</td>\n",
" <td>72.500000</td>\n",
" <td>1</td>\n",
" <td>5</td>\n",
" <td>2</td>\n",
" <td>children_playing</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" slice_file_name fsID start end salience fold classID \\\n",
"0 100032-3-0-0.wav 100032 0.0 0.317551 1 5 3 \n",
"1 100263-2-0-117.wav 100263 58.5 62.500000 1 5 2 \n",
"2 100263-2-0-121.wav 100263 60.5 64.500000 1 5 2 \n",
"3 100263-2-0-126.wav 100263 63.0 67.000000 1 5 2 \n",
"4 100263-2-0-137.wav 100263 68.5 72.500000 1 5 2 \n",
"\n",
" class \n",
"0 dog_bark \n",
"1 children_playing \n",
"2 children_playing \n",
"3 children_playing \n",
"4 children_playing "
]
},
"execution_count": 35,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [],
"source": [
"sample_df['slice_file_name'] = sample_df['slice_file_name_2'].str[:-4]+'_'+sample_df['slice_file_name_1']"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [],
"source": [
"sdf = sample_df[['slice_file_name', 'classID_1', 'fold_1']]\n",
"sdf.columns = ['slice_file_name', 'classID', 'fold']\n",
"sdf['fold'] = 11"
]
},
{
"cell_type": "code",
"execution_count": 51,
"metadata": {},
"outputs": [],
"source": [
"pd.concat([df, sdf]).to_csv('/scratch/ys5hd/Riffusion/music/UrbanSound8K/metadata/UrbanSound8K_train_aug.csv', index=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "riffusion",
"language": "python",
"name": "riffusion"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
Loading

0 comments on commit 0a522a6

Please sign in to comment.