From be4bf0ebfb910d62263f6aba4230323ec698e009 Mon Sep 17 00:00:00 2001 From: Aidan Pine Date: Tue, 26 Nov 2024 23:59:51 +0000 Subject: [PATCH 1/7] chore: update submodule --- everyvoice/model/feature_prediction/FastSpeech2_lightning | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/everyvoice/model/feature_prediction/FastSpeech2_lightning b/everyvoice/model/feature_prediction/FastSpeech2_lightning index 73b7d4b0..8d2ebc3a 160000 --- a/everyvoice/model/feature_prediction/FastSpeech2_lightning +++ b/everyvoice/model/feature_prediction/FastSpeech2_lightning @@ -1 +1 @@ -Subproject commit 73b7d4b0475c1709bbe819a8b960ffbac9d5fa08 +Subproject commit 8d2ebc3a6bee5dc15bac8aeef32d69ed782593bd From d0f4bf17374d261effb651efff0337185083fb63 Mon Sep 17 00:00:00 2001 From: Aidan Pine Date: Wed, 27 Nov 2024 00:00:22 +0000 Subject: [PATCH 2/7] fix(demo): return filepath audio --- everyvoice/demo/app.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/everyvoice/demo/app.py b/everyvoice/demo/app.py index f56bc7cd..1501d37a 100644 --- a/everyvoice/demo/app.py +++ b/everyvoice/demo/app.py @@ -37,6 +37,7 @@ def synthesize_audio( language, speaker, output_format, + style_reference, text_to_spec_model, vocoder_model, vocoder_config, @@ -71,6 +72,7 @@ def synthesize_audio( raise gr.Error("Speaker is not selected. Please select an output format.") config, device, predictions, callbacks = synthesize_helper( model=text_to_spec_model, + style_reference=style_reference, vocoder_model=vocoder_model, vocoder_config=vocoder_config, texts=[text], @@ -289,6 +291,11 @@ def create_demo_app( interactive=interactive_output, label="Output Format", ) + if model.config.model.use_global_style_token_module: + with gr.Row(): + style_reference = gr.Audio(type="filepath") + else: + style_reference = None btn = gr.Button("Synthesize") with gr.Column(): out_audio = gr.Audio(format="wav") @@ -300,7 +307,7 @@ def create_demo_app( outputs = [out_audio, out_file] btn.click( synthesize_audio_preset, - inputs=[inp_text, inp_slider, inp_lang, inp_speak, output_format], - outputs=outputs, + inputs=[inp_text, inp_slider, inp_lang, inp_speak, output_format, style_reference], + outputs=[out_audio], ) return demo From 86f6d198bc391be35730286dac9b97270ab8dffc Mon Sep 17 00:00:00 2001 From: Aidan Pine Date: Wed, 27 Nov 2024 00:11:22 +0000 Subject: [PATCH 3/7] fix: only include the style reference input if the model supports it --- everyvoice/demo/app.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/everyvoice/demo/app.py b/everyvoice/demo/app.py index 1501d37a..819e6a31 100644 --- a/everyvoice/demo/app.py +++ b/everyvoice/demo/app.py @@ -294,8 +294,6 @@ def create_demo_app( if model.config.model.use_global_style_token_module: with gr.Row(): style_reference = gr.Audio(type="filepath") - else: - style_reference = None btn = gr.Button("Synthesize") with gr.Column(): out_audio = gr.Audio(format="wav") @@ -305,9 +303,17 @@ def create_demo_app( else: out_file = gr.File(label="File Output") outputs = [out_audio, out_file] + inputs = [inp_text, inp_slider, inp_lang, inp_speak, output_format] + # Only include the style reference input if the model supports it + if model.config.model.use_global_style_token_module: + inputs.append(style_reference) + else: + synthesize_audio_preset = partial( + synthesize_audio_preset, style_reference=None + ) btn.click( synthesize_audio_preset, - inputs=[inp_text, inp_slider, inp_lang, inp_speak, output_format, style_reference], + inputs=inputs, outputs=[out_audio], ) return demo From 09a0ef7ee49788f6bfd066d68f019a9f1abb487f Mon Sep 17 00:00:00 2001 From: Aidan Pine Date: Tue, 14 Jan 2025 12:48:41 -0800 Subject: [PATCH 4/7] fix(ci): fix mypy reference --- everyvoice/demo/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/everyvoice/demo/app.py b/everyvoice/demo/app.py index 819e6a31..31468b61 100644 --- a/everyvoice/demo/app.py +++ b/everyvoice/demo/app.py @@ -306,7 +306,7 @@ def create_demo_app( inputs = [inp_text, inp_slider, inp_lang, inp_speak, output_format] # Only include the style reference input if the model supports it if model.config.model.use_global_style_token_module: - inputs.append(style_reference) + inputs.append(style_reference) # type: ignore else: synthesize_audio_preset = partial( synthesize_audio_preset, style_reference=None From 308565f193698108b5cae03f3fb79aee895979af Mon Sep 17 00:00:00 2001 From: Aidan Pine Date: Tue, 14 Jan 2025 13:07:19 -0800 Subject: [PATCH 5/7] chore: bump version to 0.3 due to gst module and update schemas --- .../.schema/everyvoice-aligner-0.3.json | 850 ++++++ .../.schema/everyvoice-shared-data-0.3.json | 233 ++ .../.schema/everyvoice-shared-text-0.3.json | 137 + .../.schema/everyvoice-spec-to-wav-0.3.json | 846 ++++++ .../.schema/everyvoice-text-to-spec-0.3.json | 1122 +++++++ .../.schema/everyvoice-text-to-wav-0.3.json | 2579 +++++++++++++++++ everyvoice/_version.py | 2 +- 7 files changed, 5768 insertions(+), 1 deletion(-) create mode 100644 everyvoice/.schema/everyvoice-aligner-0.3.json create mode 100644 everyvoice/.schema/everyvoice-shared-data-0.3.json create mode 100644 everyvoice/.schema/everyvoice-shared-text-0.3.json create mode 100644 everyvoice/.schema/everyvoice-spec-to-wav-0.3.json create mode 100644 everyvoice/.schema/everyvoice-text-to-spec-0.3.json create mode 100644 everyvoice/.schema/everyvoice-text-to-wav-0.3.json diff --git a/everyvoice/.schema/everyvoice-aligner-0.3.json b/everyvoice/.schema/everyvoice-aligner-0.3.json new file mode 100644 index 00000000..6b9fc6ed --- /dev/null +++ b/everyvoice/.schema/everyvoice-aligner-0.3.json @@ -0,0 +1,850 @@ +{ + "$defs": { + "AdamOptimizer": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "learning_rate": { + "default": 0.0001, + "description": "The initial learning rate to use", + "title": "Learning Rate", + "type": "number" + }, + "eps": { + "default": 1e-08, + "description": "Advanced. The value of optimizer constant Epsilon, used for numerical stability.", + "title": "Eps", + "type": "number" + }, + "weight_decay": { + "default": 0.01, + "title": "Weight Decay", + "type": "number" + }, + "betas": { + "default": [ + 0.9, + 0.98 + ], + "description": "Advanced. The values of the Adam Optimizer beta coefficients.", + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "title": "Betas", + "type": "array" + }, + "name": { + "default": "adam", + "description": "The name of the optimizer to use.", + "title": "Name", + "type": "string" + } + }, + "title": "AdamOptimizer", + "type": "object" + }, + "AdamWOptimizer": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "learning_rate": { + "default": 0.0001, + "description": "The initial learning rate to use", + "title": "Learning Rate", + "type": "number" + }, + "eps": { + "default": 1e-08, + "description": "Advanced. The value of optimizer constant Epsilon, used for numerical stability.", + "title": "Eps", + "type": "number" + }, + "weight_decay": { + "default": 0.01, + "title": "Weight Decay", + "type": "number" + }, + "betas": { + "default": [ + 0.9, + 0.98 + ], + "description": "Advanced. The values of the AdamW Optimizer beta coefficients.", + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "title": "Betas", + "type": "array" + }, + "name": { + "default": "adamw", + "description": "The name of the optimizer to use.", + "title": "Name", + "type": "string" + } + }, + "title": "AdamWOptimizer", + "type": "object" + }, + "AudioConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "min_audio_length": { + "default": 0.4, + "description": "The minimum length of an audio sample in seconds. Audio shorter than this will be ignored during preprocessing.", + "title": "Min Audio Length", + "type": "number" + }, + "max_audio_length": { + "default": 11.0, + "description": "The maximum length of an audio sample in seconds. Audio longer than this will be ignored during preprocessing. Increasing the max_audio_length will result in larger memory usage. If you are running out of memory, consider lowering the max_audio_length.", + "title": "Max Audio Length", + "type": "number" + }, + "max_wav_value": { + "default": 32767.0, + "description": "Advanced. The maximum value allowed to be in your wav files. For 16-bit audio, this should be (2**16)/2 - 1.", + "title": "Max Wav Value", + "type": "number" + }, + "input_sampling_rate": { + "default": 22050, + "description": "The sampling rate describes the number of samples per second of audio. The 'input_sampling_rate' is with respect to your vocoder, or spec-to-wav model. This means that the spectrograms predicted by your text-to-spec model will also be calculated from audio at this sampling rate. If you change this value, your audio will automatically be re-sampled during preprocessing.", + "title": "Input Sampling Rate", + "type": "integer" + }, + "output_sampling_rate": { + "default": 22050, + "description": "Advanced. The sampling rate describes the number of samples per second of audio. The 'output_sampling_rate' is with respect to your vocoder, or spec-to-wav model. This means that the wav files generated by your vocoder or spec-to-wav model will be at this sampling rate. If you change this value, you will also need to change the upsample rates in your vocoder. Your audio will automatically be re-sampled during preprocessing.", + "title": "Output Sampling Rate", + "type": "integer" + }, + "alignment_sampling_rate": { + "default": 22050, + "description": "Advanced. The sampling rate describes the number of samples per second of audio. The 'alignment_sampling_rate' describes the sampling rate used when training an alignment model. If you change this value, your audio will automatically be re-sampled during preprocessing.", + "title": "Alignment Sampling Rate", + "type": "integer" + }, + "target_bit_depth": { + "default": 16, + "description": "Advanced. This is the bit depth of each sample in your audio files.", + "title": "Target Bit Depth", + "type": "integer" + }, + "n_fft": { + "default": 1024, + "description": "Advanced. This is the number of bins used by the Fast Fourier Transform (FFT).", + "title": "FFT Size", + "type": "integer" + }, + "fft_window_size": { + "default": 1024, + "description": "Advanced. This is the window size used by the Fast Fourier Transform (FFT).", + "title": "FFT Window Size", + "type": "integer" + }, + "fft_hop_size": { + "default": 256, + "description": "Advanced. This is the hop size for calculating the Short-Time Fourier Transform (STFT) which calculates a sequence of spectrograms from a single audio file. Another way of putting it is that the hop size is equal to the amount of non-intersecting samples from the audio in each spectrogram.", + "title": "FFT Hop Size", + "type": "integer" + }, + "f_min": { + "default": 0, + "description": "Advanced. This is the minimum frequency for the lowest frequency bin when calculating the spectrogram.", + "title": "Minimum Frequency", + "type": "integer" + }, + "f_max": { + "default": 8000, + "description": "Advanced. This is the maximum frequency for the highest frequency bin when calculating the spectrogram.", + "title": "Maximum Frequency", + "type": "integer" + }, + "n_mels": { + "default": 80, + "description": "Advanced. This is the number of filters in the Mel-scale spaced filterbank.", + "title": "Number of Mel bins", + "type": "integer" + }, + "spec_type": { + "anyOf": [ + { + "$ref": "#/$defs/AudioSpecTypeEnum" + }, + { + "type": "string" + } + ], + "default": "mel-librosa", + "description": "Advanced. Defines how to calculate the spectrogram. 'mel' uses the TorchAudio implementation for a Mel spectrogram. 'mel-librosa' uses Librosa's implementation. 'linear' calculates a non-Mel linear spectrogram and 'raw' calculates a complex-valued spectrogram. 'linear' and 'raw' are not currently supported by EveryVoice. We recommend using 'mel-librosa'.", + "title": "Spec Type" + }, + "vocoder_segment_size": { + "default": 8192, + "description": "Advanced. The vocoder, or spec-to-wav model is trained by sampling random fixed-size sections of the audio. This value specifies the number of samples in those sections.", + "title": "Vocoder Segment Size", + "type": "integer" + } + }, + "title": "AudioConfig", + "type": "object" + }, + "AudioSpecTypeEnum": { + "enum": [ + "mel", + "mel-librosa", + "linear", + "raw" + ], + "title": "AudioSpecTypeEnum", + "type": "string" + }, + "ContactInformation": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact_name": { + "description": "The name of the contact person or organization responsible for answering questions related to this model.", + "title": "Contact Name", + "type": "string" + }, + "contact_email": { + "description": "The email address of the contact person or organization responsible for answering questions related to this model.", + "format": "email", + "title": "Contact Email", + "type": "string" + } + }, + "required": [ + "contact_name", + "contact_email" + ], + "title": "ContactInformation", + "type": "object" + }, + "DFAlignerExtractionMethod": { + "enum": [ + "beam", + "dijkstra" + ], + "title": "DFAlignerExtractionMethod", + "type": "string" + }, + "DFAlignerModelConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "target_text_representation_level": { + "allOf": [ + { + "$ref": "#/$defs/TargetTrainingTextRepresentationLevel" + } + ], + "default": "characters" + }, + "lstm_dim": { + "default": 512, + "description": "The number of dimensions in the LSTM layers.", + "title": "Lstm Dim", + "type": "integer" + }, + "conv_dim": { + "default": 512, + "description": "The number of dimensions in the convolutional layers.", + "title": "Conv Dim", + "type": "integer" + } + }, + "title": "DFAlignerModelConfig", + "type": "object" + }, + "DFAlignerTrainingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "batch_size": { + "default": 16, + "description": "The number of samples to include in each batch when training. If you are running out of memory, consider lowering your batch_size.", + "title": "Batch Size", + "type": "integer" + }, + "save_top_k_ckpts": { + "default": 5, + "description": "The number of checkpoints to save.", + "title": "Save Top K Ckpts", + "type": "integer" + }, + "ckpt_steps": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The interval (in steps) for saving a checkpoint. By default checkpoints are saved every epoch using the 'ckpt_epochs' hyperparameter", + "title": "Ckpt Steps" + }, + "ckpt_epochs": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "The interval (in epochs) for saving a checkpoint. You can also save checkpoints after n steps by using 'ckpt_steps'", + "title": "Ckpt Epochs" + }, + "val_check_interval": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 500, + "description": "How often to check the validation set. Pass a float in the range [0.0, 1.0] to check after a fraction of the training epoch. Pass an int to check after a fixed number of training batches.", + "title": "Val Check Interval" + }, + "check_val_every_n_epoch": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Run validation after every n epochs. Defaults to 1, but if you have a small dataset you should change this to be larger to speed up training", + "title": "Check Val Every N Epoch" + }, + "max_epochs": { + "default": 1000, + "description": "Stop training after this many epochs", + "title": "Max Epochs", + "type": "integer" + }, + "max_steps": { + "default": 100000, + "description": "Stop training after this many steps", + "title": "Max Steps", + "type": "integer" + }, + "finetune_checkpoint": { + "anyOf": [ + { + "format": "path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Automatically resume training from a checkpoint loaded from this path.", + "title": "Finetune Checkpoint" + }, + "training_filelist": { + "default": "path/to/your/preprocessed/training_filelist.psv", + "description": "The path to a filelist containing samples belonging to your training set.", + "format": "path", + "title": "Training Filelist", + "type": "string" + }, + "validation_filelist": { + "default": "path/to/your/preprocessed/validation_filelist.psv", + "description": "The path to a filelist containing samples belonging to your validation set.", + "format": "path", + "title": "Validation Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The function to use to load the filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "logger": { + "allOf": [ + { + "$ref": "#/$defs/LoggerConfig" + } + ], + "description": "The configuration for the logger." + }, + "val_data_workers": { + "default": 0, + "description": "The number of CPU workers to use when loading data during validation.", + "title": "Val Data Workers", + "type": "integer" + }, + "train_data_workers": { + "default": 4, + "description": "The number of CPU workers to use when loading data during training.", + "title": "Train Data Workers", + "type": "integer" + }, + "optimizer": { + "anyOf": [ + { + "$ref": "#/$defs/AdamOptimizer" + }, + { + "$ref": "#/$defs/AdamWOptimizer" + } + ], + "description": "Optimizer configuration settings.", + "title": "Optimizer" + }, + "binned_sampler": { + "default": true, + "description": "Use a binned length sampler", + "title": "Binned Sampler", + "type": "boolean" + }, + "plot_steps": { + "default": 1000, + "description": "The maximum number of steps to plot", + "title": "Plot Steps", + "type": "integer" + }, + "extraction_method": { + "allOf": [ + { + "$ref": "#/$defs/DFAlignerExtractionMethod" + } + ], + "default": "dijkstra", + "description": "The alignment extraction algorithm to use. 'beam' will be quicker but possibly less accurate than 'dijkstra'" + } + }, + "title": "DFAlignerTrainingConfig", + "type": "object" + }, + "Dataset": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "label": { + "default": "YourDataSet", + "description": "A label for the source of data", + "title": "Label", + "type": "string" + }, + "permissions_obtained": { + "default": false, + "description": "An attestation that permission has been obtained to use this data. You may not use EveryVoice to build a TTS system with data that you do not have permission to use and there are serious possible consequences for doing so. Finding data online does not constitute permission. The speaker should be aware and consent to their data being used in this way.", + "title": "Permissions Obtained", + "type": "boolean" + }, + "data_dir": { + "default": "/please/create/a/path/to/your/dataset/data", + "description": "The path to the directory with your audio files.", + "format": "path", + "title": "Data Dir", + "type": "string" + }, + "filelist": { + "default": "/please/create/a/path/to/your/dataset/filelist", + "description": "The path to your dataset's filelist.", + "format": "path", + "title": "Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The file-loader function to use to load your dataset's filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "sox_effects": { + "default": [ + [ + "channels", + "1" + ] + ], + "description": "Advanced. A list of SoX effects to apply to your audio prior to preprocessing. Run python -c 'import torchaudio; print(torchaudio.sox_effects.effect_names())' to see a list of supported effects.", + "items": {}, + "title": "Sox Effects", + "type": "array" + } + }, + "title": "Dataset", + "type": "object" + }, + "LoggerConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "description": "The logger configures all the information needed for where to store your experiment's logs and checkpoints.\nThe structure of your logs will then be:\n / / \n will be generated by calling each time the LoggerConfig is constructed.", + "properties": { + "name": { + "default": "BaseExperiment", + "description": "The name of the experiment. The structure of your logs will be / / .", + "title": "Experiment Name", + "type": "string" + }, + "save_dir": { + "default": "logs_and_checkpoints", + "description": "The directory to save your checkpoints and logs to.", + "format": "path", + "title": "Save Dir", + "type": "string" + }, + "sub_dir_callable": { + "description": "The function that generates a string to call your runs - by default this is a timestamp. The structure of your logs will be / / where is a timestamp.", + "title": "Sub Dir Callable", + "type": "string" + }, + "version": { + "default": "base", + "description": "The version of your experiment. The structure of your logs will be / / .", + "title": "Version", + "type": "string" + } + }, + "title": "LoggerConfig", + "type": "object" + }, + "PreprocessingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "dataset": { + "default": "YourDataSet", + "description": "The name of the dataset.", + "title": "Dataset", + "type": "string" + }, + "train_split": { + "default": 0.9, + "description": "The amount of the dataset to use for training. The rest will be used as validation. Hold some of the validation set out for a test set if you are performing experiments.", + "maximum": 1.0, + "minimum": 0.0, + "title": "Train Split", + "type": "number" + }, + "dataset_split_seed": { + "default": 1234, + "description": "The seed to use when splitting the dataset into train and validation sets.", + "title": "Dataset Split Seed", + "type": "integer" + }, + "save_dir": { + "default": "preprocessed/YourDataSet", + "description": "The directory to save preprocessed files to.", + "format": "path", + "title": "Save Dir", + "type": "string" + }, + "audio": { + "allOf": [ + { + "$ref": "#/$defs/AudioConfig" + } + ], + "description": "Configuration settings for audio." + }, + "path_to_audio_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path to an audio configuration file.", + "title": "Path To Audio Config File" + }, + "source_data": { + "description": "A list of datasets.", + "items": { + "$ref": "#/$defs/Dataset" + }, + "title": "Source Data", + "type": "array" + } + }, + "title": "PreprocessingConfig", + "type": "object" + }, + "Punctuation": { + "properties": { + "exclamations": { + "default": [ + "!", + "\u00a1" + ], + "description": "Exclamation punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Exclamations", + "type": "array" + }, + "question_symbols": { + "default": [ + "?", + "\u00bf" + ], + "description": "Question/interrogative punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Question Symbols", + "type": "array" + }, + "quotemarks": { + "default": [ + "\"", + "'", + "\u201c", + "\u201d", + "\u00ab", + "\u00bb" + ], + "description": "Quotemark punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Quotemarks", + "type": "array" + }, + "big_breaks": { + "default": [ + ".", + ":", + ";" + ], + "description": "Punctuation symbols indicating a 'big break' used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Big Breaks", + "type": "array" + }, + "small_breaks": { + "default": [ + ",", + "-", + "\u2014" + ], + "description": "Punctuation symbols indicating a 'small break' used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Small Breaks", + "type": "array" + }, + "ellipsis": { + "default": [ + "\u2026" + ], + "description": "Punctuation symbols indicating an ellipsis used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Ellipsis", + "type": "array" + } + }, + "title": "Punctuation", + "type": "object" + }, + "Symbols": { + "additionalProperties": true, + "properties": { + "silence": { + "default": [ + "" + ], + "description": "The symbol(s) used to indicate silence.", + "items": { + "type": "string" + }, + "title": "Silence", + "type": "array" + }, + "punctuation": { + "allOf": [ + { + "$ref": "#/$defs/Punctuation" + } + ], + "description": "EveryVoice will combine punctuation and normalize it into a set of five permissible types of punctuation to help tractable training." + } + }, + "title": "Symbols", + "type": "object" + }, + "TargetTrainingTextRepresentationLevel": { + "enum": [ + "characters", + "phones", + "phonological_features" + ], + "title": "TargetTrainingTextRepresentationLevel", + "type": "string" + }, + "TextConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "symbols": { + "$ref": "#/$defs/Symbols" + }, + "to_replace": { + "additionalProperties": { + "type": "string" + }, + "default": {}, + "title": "To Replace", + "type": "object" + }, + "cleaners": { + "items": { + "type": "string" + }, + "title": "Cleaners", + "type": "array" + } + }, + "title": "TextConfig", + "type": "object" + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact": { + "allOf": [ + { + "$ref": "#/$defs/ContactInformation" + } + ], + "description": "EveryVoice requires a contact name and email to help prevent misuse. Please read our Guide to understand more about the importance of misuse prevention with TTS." + }, + "VERSION": { + "default": "1.0", + "title": "Version", + "type": "string" + }, + "model": { + "allOf": [ + { + "$ref": "#/$defs/DFAlignerModelConfig" + } + ], + "description": "The model configuration settings." + }, + "path_to_model_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Model Config File" + }, + "training": { + "allOf": [ + { + "$ref": "#/$defs/DFAlignerTrainingConfig" + } + ], + "description": "The training configuration hyperparameters." + }, + "path_to_training_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Training Config File" + }, + "preprocessing": { + "allOf": [ + { + "$ref": "#/$defs/PreprocessingConfig" + } + ], + "description": "The preprocessing configuration, including information about audio settings." + }, + "path_to_preprocessing_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Preprocessing Config File" + }, + "text": { + "$ref": "#/$defs/TextConfig" + }, + "path_to_text_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Path To Text Config File" + } + }, + "required": [ + "contact" + ], + "title": "DFAlignerConfig", + "type": "object" +} diff --git a/everyvoice/.schema/everyvoice-shared-data-0.3.json b/everyvoice/.schema/everyvoice-shared-data-0.3.json new file mode 100644 index 00000000..52d3240b --- /dev/null +++ b/everyvoice/.schema/everyvoice-shared-data-0.3.json @@ -0,0 +1,233 @@ +{ + "$defs": { + "AudioConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "min_audio_length": { + "default": 0.4, + "description": "The minimum length of an audio sample in seconds. Audio shorter than this will be ignored during preprocessing.", + "title": "Min Audio Length", + "type": "number" + }, + "max_audio_length": { + "default": 11.0, + "description": "The maximum length of an audio sample in seconds. Audio longer than this will be ignored during preprocessing. Increasing the max_audio_length will result in larger memory usage. If you are running out of memory, consider lowering the max_audio_length.", + "title": "Max Audio Length", + "type": "number" + }, + "max_wav_value": { + "default": 32767.0, + "description": "Advanced. The maximum value allowed to be in your wav files. For 16-bit audio, this should be (2**16)/2 - 1.", + "title": "Max Wav Value", + "type": "number" + }, + "input_sampling_rate": { + "default": 22050, + "description": "The sampling rate describes the number of samples per second of audio. The 'input_sampling_rate' is with respect to your vocoder, or spec-to-wav model. This means that the spectrograms predicted by your text-to-spec model will also be calculated from audio at this sampling rate. If you change this value, your audio will automatically be re-sampled during preprocessing.", + "title": "Input Sampling Rate", + "type": "integer" + }, + "output_sampling_rate": { + "default": 22050, + "description": "Advanced. The sampling rate describes the number of samples per second of audio. The 'output_sampling_rate' is with respect to your vocoder, or spec-to-wav model. This means that the wav files generated by your vocoder or spec-to-wav model will be at this sampling rate. If you change this value, you will also need to change the upsample rates in your vocoder. Your audio will automatically be re-sampled during preprocessing.", + "title": "Output Sampling Rate", + "type": "integer" + }, + "alignment_sampling_rate": { + "default": 22050, + "description": "Advanced. The sampling rate describes the number of samples per second of audio. The 'alignment_sampling_rate' describes the sampling rate used when training an alignment model. If you change this value, your audio will automatically be re-sampled during preprocessing.", + "title": "Alignment Sampling Rate", + "type": "integer" + }, + "target_bit_depth": { + "default": 16, + "description": "Advanced. This is the bit depth of each sample in your audio files.", + "title": "Target Bit Depth", + "type": "integer" + }, + "n_fft": { + "default": 1024, + "description": "Advanced. This is the number of bins used by the Fast Fourier Transform (FFT).", + "title": "FFT Size", + "type": "integer" + }, + "fft_window_size": { + "default": 1024, + "description": "Advanced. This is the window size used by the Fast Fourier Transform (FFT).", + "title": "FFT Window Size", + "type": "integer" + }, + "fft_hop_size": { + "default": 256, + "description": "Advanced. This is the hop size for calculating the Short-Time Fourier Transform (STFT) which calculates a sequence of spectrograms from a single audio file. Another way of putting it is that the hop size is equal to the amount of non-intersecting samples from the audio in each spectrogram.", + "title": "FFT Hop Size", + "type": "integer" + }, + "f_min": { + "default": 0, + "description": "Advanced. This is the minimum frequency for the lowest frequency bin when calculating the spectrogram.", + "title": "Minimum Frequency", + "type": "integer" + }, + "f_max": { + "default": 8000, + "description": "Advanced. This is the maximum frequency for the highest frequency bin when calculating the spectrogram.", + "title": "Maximum Frequency", + "type": "integer" + }, + "n_mels": { + "default": 80, + "description": "Advanced. This is the number of filters in the Mel-scale spaced filterbank.", + "title": "Number of Mel bins", + "type": "integer" + }, + "spec_type": { + "anyOf": [ + { + "$ref": "#/$defs/AudioSpecTypeEnum" + }, + { + "type": "string" + } + ], + "default": "mel-librosa", + "description": "Advanced. Defines how to calculate the spectrogram. 'mel' uses the TorchAudio implementation for a Mel spectrogram. 'mel-librosa' uses Librosa's implementation. 'linear' calculates a non-Mel linear spectrogram and 'raw' calculates a complex-valued spectrogram. 'linear' and 'raw' are not currently supported by EveryVoice. We recommend using 'mel-librosa'.", + "title": "Spec Type" + }, + "vocoder_segment_size": { + "default": 8192, + "description": "Advanced. The vocoder, or spec-to-wav model is trained by sampling random fixed-size sections of the audio. This value specifies the number of samples in those sections.", + "title": "Vocoder Segment Size", + "type": "integer" + } + }, + "title": "AudioConfig", + "type": "object" + }, + "AudioSpecTypeEnum": { + "enum": [ + "mel", + "mel-librosa", + "linear", + "raw" + ], + "title": "AudioSpecTypeEnum", + "type": "string" + }, + "Dataset": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "label": { + "default": "YourDataSet", + "description": "A label for the source of data", + "title": "Label", + "type": "string" + }, + "permissions_obtained": { + "default": false, + "description": "An attestation that permission has been obtained to use this data. You may not use EveryVoice to build a TTS system with data that you do not have permission to use and there are serious possible consequences for doing so. Finding data online does not constitute permission. The speaker should be aware and consent to their data being used in this way.", + "title": "Permissions Obtained", + "type": "boolean" + }, + "data_dir": { + "default": "/please/create/a/path/to/your/dataset/data", + "description": "The path to the directory with your audio files.", + "format": "path", + "title": "Data Dir", + "type": "string" + }, + "filelist": { + "default": "/please/create/a/path/to/your/dataset/filelist", + "description": "The path to your dataset's filelist.", + "format": "path", + "title": "Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The file-loader function to use to load your dataset's filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "sox_effects": { + "default": [ + [ + "channels", + "1" + ] + ], + "description": "Advanced. A list of SoX effects to apply to your audio prior to preprocessing. Run python -c 'import torchaudio; print(torchaudio.sox_effects.effect_names())' to see a list of supported effects.", + "items": {}, + "title": "Sox Effects", + "type": "array" + } + }, + "title": "Dataset", + "type": "object" + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "dataset": { + "default": "YourDataSet", + "description": "The name of the dataset.", + "title": "Dataset", + "type": "string" + }, + "train_split": { + "default": 0.9, + "description": "The amount of the dataset to use for training. The rest will be used as validation. Hold some of the validation set out for a test set if you are performing experiments.", + "maximum": 1.0, + "minimum": 0.0, + "title": "Train Split", + "type": "number" + }, + "dataset_split_seed": { + "default": 1234, + "description": "The seed to use when splitting the dataset into train and validation sets.", + "title": "Dataset Split Seed", + "type": "integer" + }, + "save_dir": { + "default": "preprocessed/YourDataSet", + "description": "The directory to save preprocessed files to.", + "format": "path", + "title": "Save Dir", + "type": "string" + }, + "audio": { + "allOf": [ + { + "$ref": "#/$defs/AudioConfig" + } + ], + "description": "Configuration settings for audio." + }, + "path_to_audio_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path to an audio configuration file.", + "title": "Path To Audio Config File" + }, + "source_data": { + "description": "A list of datasets.", + "items": { + "$ref": "#/$defs/Dataset" + }, + "title": "Source Data", + "type": "array" + } + }, + "title": "PreprocessingConfig", + "type": "object" +} diff --git a/everyvoice/.schema/everyvoice-shared-text-0.3.json b/everyvoice/.schema/everyvoice-shared-text-0.3.json new file mode 100644 index 00000000..0bf1a6f2 --- /dev/null +++ b/everyvoice/.schema/everyvoice-shared-text-0.3.json @@ -0,0 +1,137 @@ +{ + "$defs": { + "Punctuation": { + "properties": { + "exclamations": { + "default": [ + "!", + "\u00a1" + ], + "description": "Exclamation punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Exclamations", + "type": "array" + }, + "question_symbols": { + "default": [ + "?", + "\u00bf" + ], + "description": "Question/interrogative punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Question Symbols", + "type": "array" + }, + "quotemarks": { + "default": [ + "\"", + "'", + "\u201c", + "\u201d", + "\u00ab", + "\u00bb" + ], + "description": "Quotemark punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Quotemarks", + "type": "array" + }, + "big_breaks": { + "default": [ + ".", + ":", + ";" + ], + "description": "Punctuation symbols indicating a 'big break' used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Big Breaks", + "type": "array" + }, + "small_breaks": { + "default": [ + ",", + "-", + "\u2014" + ], + "description": "Punctuation symbols indicating a 'small break' used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Small Breaks", + "type": "array" + }, + "ellipsis": { + "default": [ + "\u2026" + ], + "description": "Punctuation symbols indicating an ellipsis used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Ellipsis", + "type": "array" + } + }, + "title": "Punctuation", + "type": "object" + }, + "Symbols": { + "additionalProperties": true, + "properties": { + "silence": { + "default": [ + "" + ], + "description": "The symbol(s) used to indicate silence.", + "items": { + "type": "string" + }, + "title": "Silence", + "type": "array" + }, + "punctuation": { + "allOf": [ + { + "$ref": "#/$defs/Punctuation" + } + ], + "description": "EveryVoice will combine punctuation and normalize it into a set of five permissible types of punctuation to help tractable training." + } + }, + "title": "Symbols", + "type": "object" + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "symbols": { + "$ref": "#/$defs/Symbols" + }, + "to_replace": { + "additionalProperties": { + "type": "string" + }, + "default": {}, + "title": "To Replace", + "type": "object" + }, + "cleaners": { + "items": { + "type": "string" + }, + "title": "Cleaners", + "type": "array" + } + }, + "title": "TextConfig", + "type": "object" +} diff --git a/everyvoice/.schema/everyvoice-spec-to-wav-0.3.json b/everyvoice/.schema/everyvoice-spec-to-wav-0.3.json new file mode 100644 index 00000000..246c2276 --- /dev/null +++ b/everyvoice/.schema/everyvoice-spec-to-wav-0.3.json @@ -0,0 +1,846 @@ +{ + "$defs": { + "AdamOptimizer": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "learning_rate": { + "default": 0.0001, + "description": "The initial learning rate to use", + "title": "Learning Rate", + "type": "number" + }, + "eps": { + "default": 1e-08, + "description": "Advanced. The value of optimizer constant Epsilon, used for numerical stability.", + "title": "Eps", + "type": "number" + }, + "weight_decay": { + "default": 0.01, + "title": "Weight Decay", + "type": "number" + }, + "betas": { + "default": [ + 0.9, + 0.98 + ], + "description": "Advanced. The values of the Adam Optimizer beta coefficients.", + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "title": "Betas", + "type": "array" + }, + "name": { + "default": "adam", + "description": "The name of the optimizer to use.", + "title": "Name", + "type": "string" + } + }, + "title": "AdamOptimizer", + "type": "object" + }, + "AdamWOptimizer": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "learning_rate": { + "default": 0.0001, + "description": "The initial learning rate to use", + "title": "Learning Rate", + "type": "number" + }, + "eps": { + "default": 1e-08, + "description": "Advanced. The value of optimizer constant Epsilon, used for numerical stability.", + "title": "Eps", + "type": "number" + }, + "weight_decay": { + "default": 0.01, + "title": "Weight Decay", + "type": "number" + }, + "betas": { + "default": [ + 0.9, + 0.98 + ], + "description": "Advanced. The values of the AdamW Optimizer beta coefficients.", + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "title": "Betas", + "type": "array" + }, + "name": { + "default": "adamw", + "description": "The name of the optimizer to use.", + "title": "Name", + "type": "string" + } + }, + "title": "AdamWOptimizer", + "type": "object" + }, + "AudioConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "min_audio_length": { + "default": 0.4, + "description": "The minimum length of an audio sample in seconds. Audio shorter than this will be ignored during preprocessing.", + "title": "Min Audio Length", + "type": "number" + }, + "max_audio_length": { + "default": 11.0, + "description": "The maximum length of an audio sample in seconds. Audio longer than this will be ignored during preprocessing. Increasing the max_audio_length will result in larger memory usage. If you are running out of memory, consider lowering the max_audio_length.", + "title": "Max Audio Length", + "type": "number" + }, + "max_wav_value": { + "default": 32767.0, + "description": "Advanced. The maximum value allowed to be in your wav files. For 16-bit audio, this should be (2**16)/2 - 1.", + "title": "Max Wav Value", + "type": "number" + }, + "input_sampling_rate": { + "default": 22050, + "description": "The sampling rate describes the number of samples per second of audio. The 'input_sampling_rate' is with respect to your vocoder, or spec-to-wav model. This means that the spectrograms predicted by your text-to-spec model will also be calculated from audio at this sampling rate. If you change this value, your audio will automatically be re-sampled during preprocessing.", + "title": "Input Sampling Rate", + "type": "integer" + }, + "output_sampling_rate": { + "default": 22050, + "description": "Advanced. The sampling rate describes the number of samples per second of audio. The 'output_sampling_rate' is with respect to your vocoder, or spec-to-wav model. This means that the wav files generated by your vocoder or spec-to-wav model will be at this sampling rate. If you change this value, you will also need to change the upsample rates in your vocoder. Your audio will automatically be re-sampled during preprocessing.", + "title": "Output Sampling Rate", + "type": "integer" + }, + "alignment_sampling_rate": { + "default": 22050, + "description": "Advanced. The sampling rate describes the number of samples per second of audio. The 'alignment_sampling_rate' describes the sampling rate used when training an alignment model. If you change this value, your audio will automatically be re-sampled during preprocessing.", + "title": "Alignment Sampling Rate", + "type": "integer" + }, + "target_bit_depth": { + "default": 16, + "description": "Advanced. This is the bit depth of each sample in your audio files.", + "title": "Target Bit Depth", + "type": "integer" + }, + "n_fft": { + "default": 1024, + "description": "Advanced. This is the number of bins used by the Fast Fourier Transform (FFT).", + "title": "FFT Size", + "type": "integer" + }, + "fft_window_size": { + "default": 1024, + "description": "Advanced. This is the window size used by the Fast Fourier Transform (FFT).", + "title": "FFT Window Size", + "type": "integer" + }, + "fft_hop_size": { + "default": 256, + "description": "Advanced. This is the hop size for calculating the Short-Time Fourier Transform (STFT) which calculates a sequence of spectrograms from a single audio file. Another way of putting it is that the hop size is equal to the amount of non-intersecting samples from the audio in each spectrogram.", + "title": "FFT Hop Size", + "type": "integer" + }, + "f_min": { + "default": 0, + "description": "Advanced. This is the minimum frequency for the lowest frequency bin when calculating the spectrogram.", + "title": "Minimum Frequency", + "type": "integer" + }, + "f_max": { + "default": 8000, + "description": "Advanced. This is the maximum frequency for the highest frequency bin when calculating the spectrogram.", + "title": "Maximum Frequency", + "type": "integer" + }, + "n_mels": { + "default": 80, + "description": "Advanced. This is the number of filters in the Mel-scale spaced filterbank.", + "title": "Number of Mel bins", + "type": "integer" + }, + "spec_type": { + "anyOf": [ + { + "$ref": "#/$defs/AudioSpecTypeEnum" + }, + { + "type": "string" + } + ], + "default": "mel-librosa", + "description": "Advanced. Defines how to calculate the spectrogram. 'mel' uses the TorchAudio implementation for a Mel spectrogram. 'mel-librosa' uses Librosa's implementation. 'linear' calculates a non-Mel linear spectrogram and 'raw' calculates a complex-valued spectrogram. 'linear' and 'raw' are not currently supported by EveryVoice. We recommend using 'mel-librosa'.", + "title": "Spec Type" + }, + "vocoder_segment_size": { + "default": 8192, + "description": "Advanced. The vocoder, or spec-to-wav model is trained by sampling random fixed-size sections of the audio. This value specifies the number of samples in those sections.", + "title": "Vocoder Segment Size", + "type": "integer" + } + }, + "title": "AudioConfig", + "type": "object" + }, + "AudioSpecTypeEnum": { + "enum": [ + "mel", + "mel-librosa", + "linear", + "raw" + ], + "title": "AudioSpecTypeEnum", + "type": "string" + }, + "ContactInformation": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact_name": { + "description": "The name of the contact person or organization responsible for answering questions related to this model.", + "title": "Contact Name", + "type": "string" + }, + "contact_email": { + "description": "The email address of the contact person or organization responsible for answering questions related to this model.", + "format": "email", + "title": "Contact Email", + "type": "string" + } + }, + "required": [ + "contact_name", + "contact_email" + ], + "title": "ContactInformation", + "type": "object" + }, + "Dataset": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "label": { + "default": "YourDataSet", + "description": "A label for the source of data", + "title": "Label", + "type": "string" + }, + "permissions_obtained": { + "default": false, + "description": "An attestation that permission has been obtained to use this data. You may not use EveryVoice to build a TTS system with data that you do not have permission to use and there are serious possible consequences for doing so. Finding data online does not constitute permission. The speaker should be aware and consent to their data being used in this way.", + "title": "Permissions Obtained", + "type": "boolean" + }, + "data_dir": { + "default": "/please/create/a/path/to/your/dataset/data", + "description": "The path to the directory with your audio files.", + "format": "path", + "title": "Data Dir", + "type": "string" + }, + "filelist": { + "default": "/please/create/a/path/to/your/dataset/filelist", + "description": "The path to your dataset's filelist.", + "format": "path", + "title": "Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The file-loader function to use to load your dataset's filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "sox_effects": { + "default": [ + [ + "channels", + "1" + ] + ], + "description": "Advanced. A list of SoX effects to apply to your audio prior to preprocessing. Run python -c 'import torchaudio; print(torchaudio.sox_effects.effect_names())' to see a list of supported effects.", + "items": {}, + "title": "Sox Effects", + "type": "array" + } + }, + "title": "Dataset", + "type": "object" + }, + "HiFiGANModelConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "resblock": { + "allOf": [ + { + "$ref": "#/$defs/HiFiGANResblock" + } + ], + "default": "1", + "description": "Which resblock to use. See Kong et. al. 2020: https://arxiv.org/abs/2010.05646" + }, + "upsample_rates": { + "default": [ + 8, + 8, + 2, + 2 + ], + "description": "The stride of each convolutional layer in the upsampling module.", + "items": { + "type": "integer" + }, + "title": "Upsample Rates", + "type": "array" + }, + "upsample_kernel_sizes": { + "default": [ + 16, + 16, + 4, + 4 + ], + "description": "The kernel size of each convolutional layer in the upsampling module.", + "items": { + "type": "integer" + }, + "title": "Upsample Kernel Sizes", + "type": "array" + }, + "upsample_initial_channel": { + "default": 512, + "description": "The number of dimensions to project the Mel inputs to before being passed to the resblock.", + "title": "Upsample Initial Channel", + "type": "integer" + }, + "resblock_kernel_sizes": { + "default": [ + 3, + 7, + 11 + ], + "description": "The kernel size of each convolutional layer in the resblock.", + "items": { + "type": "integer" + }, + "title": "Resblock Kernel Sizes", + "type": "array" + }, + "resblock_dilation_sizes": { + "default": [ + [ + 1, + 3, + 5 + ], + [ + 1, + 3, + 5 + ], + [ + 1, + 3, + 5 + ] + ], + "description": "The dilations of each convolution in each layer of the resblock.", + "items": { + "items": { + "type": "integer" + }, + "type": "array" + }, + "title": "Resblock Dilation Sizes", + "type": "array" + }, + "activation_function": { + "description": "The activation function to use.", + "title": "Activation Function", + "type": "string" + }, + "istft_layer": { + "default": false, + "description": "Whether to predict phase and magnitude values and use an inverse Short-Time Fourier Transform instead of predicting a waveform directly. See Kaneko et. al. 2022: https://arxiv.org/abs/2203.02395", + "title": "Istft Layer", + "type": "boolean" + }, + "msd_layers": { + "default": 3, + "description": "The number of layers to use in the Multi-Scale Discriminator.", + "title": "Msd Layers", + "type": "integer" + }, + "mpd_layers": { + "default": [ + 2, + 3, + 5, + 7, + 11 + ], + "description": "The size of each layer in the Multi-Period Discriminator.", + "items": { + "type": "integer" + }, + "title": "Mpd Layers", + "type": "array" + } + }, + "title": "HiFiGANModelConfig", + "type": "object" + }, + "HiFiGANResblock": { + "enum": [ + "1", + "2" + ], + "title": "HiFiGANResblock", + "type": "string" + }, + "HiFiGANTrainTypes": { + "enum": [ + "original", + "wgan" + ], + "title": "HiFiGANTrainTypes", + "type": "string" + }, + "HiFiGANTrainingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "batch_size": { + "default": 16, + "description": "The number of samples to include in each batch when training. If you are running out of memory, consider lowering your batch_size.", + "title": "Batch Size", + "type": "integer" + }, + "save_top_k_ckpts": { + "default": 5, + "description": "The number of checkpoints to save.", + "title": "Save Top K Ckpts", + "type": "integer" + }, + "ckpt_steps": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The interval (in steps) for saving a checkpoint. By default checkpoints are saved every epoch using the 'ckpt_epochs' hyperparameter", + "title": "Ckpt Steps" + }, + "ckpt_epochs": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "The interval (in epochs) for saving a checkpoint. You can also save checkpoints after n steps by using 'ckpt_steps'", + "title": "Ckpt Epochs" + }, + "val_check_interval": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 500, + "description": "How often to check the validation set. Pass a float in the range [0.0, 1.0] to check after a fraction of the training epoch. Pass an int to check after a fixed number of training batches.", + "title": "Val Check Interval" + }, + "check_val_every_n_epoch": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Run validation after every n epochs. Defaults to 1, but if you have a small dataset you should change this to be larger to speed up training", + "title": "Check Val Every N Epoch" + }, + "max_epochs": { + "default": 1000, + "description": "Stop training after this many epochs", + "title": "Max Epochs", + "type": "integer" + }, + "max_steps": { + "default": 100000, + "description": "Stop training after this many steps", + "title": "Max Steps", + "type": "integer" + }, + "finetune_checkpoint": { + "anyOf": [ + { + "format": "path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Automatically resume training from a checkpoint loaded from this path.", + "title": "Finetune Checkpoint" + }, + "training_filelist": { + "default": "path/to/your/preprocessed/training_filelist.psv", + "description": "The path to a filelist containing samples belonging to your training set.", + "format": "path", + "title": "Training Filelist", + "type": "string" + }, + "validation_filelist": { + "default": "path/to/your/preprocessed/validation_filelist.psv", + "description": "The path to a filelist containing samples belonging to your validation set.", + "format": "path", + "title": "Validation Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The function to use to load the filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "logger": { + "allOf": [ + { + "$ref": "#/$defs/LoggerConfig" + } + ], + "description": "The configuration for the logger." + }, + "val_data_workers": { + "default": 0, + "description": "The number of CPU workers to use when loading data during validation.", + "title": "Val Data Workers", + "type": "integer" + }, + "train_data_workers": { + "default": 4, + "description": "The number of CPU workers to use when loading data during training.", + "title": "Train Data Workers", + "type": "integer" + }, + "generator_warmup_steps": { + "default": 0, + "description": "The number of steps to run through before activating the discriminators.", + "title": "Generator Warmup Steps", + "type": "integer" + }, + "gan_type": { + "allOf": [ + { + "$ref": "#/$defs/HiFiGANTrainTypes" + } + ], + "default": "original", + "description": "The type of GAN to use. Can be set to either 'original' for a vanilla GAN, or 'wgan' for a Wasserstein GAN that clips gradients." + }, + "optimizer": { + "anyOf": [ + { + "$ref": "#/$defs/AdamOptimizer" + }, + { + "$ref": "#/$defs/AdamWOptimizer" + }, + { + "$ref": "#/$defs/RMSOptimizer" + } + ], + "description": "Configuration settings for the optimizer.", + "title": "Optimizer" + }, + "wgan_clip_value": { + "default": 0.01, + "description": "The gradient clip value when gan_type='wgan'.", + "title": "Wgan Clip Value", + "type": "number" + }, + "use_weighted_sampler": { + "default": false, + "description": "Whether to use a sampler which oversamples from the minority language or speaker class for balanced training.", + "title": "Use Weighted Sampler", + "type": "boolean" + }, + "finetune": { + "default": false, + "description": "Whether to read spectrograms from 'preprocessed/synthesized_spec' instead of 'preprocessed/spec'. This is used when finetuning a pretrained spec-to-wav (vocoder) model using the outputs of a trained text-to-spec (feature prediction network) model.", + "title": "Finetune", + "type": "boolean" + } + }, + "title": "HiFiGANTrainingConfig", + "type": "object" + }, + "LoggerConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "description": "The logger configures all the information needed for where to store your experiment's logs and checkpoints.\nThe structure of your logs will then be:\n / / \n will be generated by calling each time the LoggerConfig is constructed.", + "properties": { + "name": { + "default": "BaseExperiment", + "description": "The name of the experiment. The structure of your logs will be / / .", + "title": "Experiment Name", + "type": "string" + }, + "save_dir": { + "default": "logs_and_checkpoints", + "description": "The directory to save your checkpoints and logs to.", + "format": "path", + "title": "Save Dir", + "type": "string" + }, + "sub_dir_callable": { + "description": "The function that generates a string to call your runs - by default this is a timestamp. The structure of your logs will be / / where is a timestamp.", + "title": "Sub Dir Callable", + "type": "string" + }, + "version": { + "default": "base", + "description": "The version of your experiment. The structure of your logs will be / / .", + "title": "Version", + "type": "string" + } + }, + "title": "LoggerConfig", + "type": "object" + }, + "PreprocessingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "dataset": { + "default": "YourDataSet", + "description": "The name of the dataset.", + "title": "Dataset", + "type": "string" + }, + "train_split": { + "default": 0.9, + "description": "The amount of the dataset to use for training. The rest will be used as validation. Hold some of the validation set out for a test set if you are performing experiments.", + "maximum": 1.0, + "minimum": 0.0, + "title": "Train Split", + "type": "number" + }, + "dataset_split_seed": { + "default": 1234, + "description": "The seed to use when splitting the dataset into train and validation sets.", + "title": "Dataset Split Seed", + "type": "integer" + }, + "save_dir": { + "default": "preprocessed/YourDataSet", + "description": "The directory to save preprocessed files to.", + "format": "path", + "title": "Save Dir", + "type": "string" + }, + "audio": { + "allOf": [ + { + "$ref": "#/$defs/AudioConfig" + } + ], + "description": "Configuration settings for audio." + }, + "path_to_audio_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path to an audio configuration file.", + "title": "Path To Audio Config File" + }, + "source_data": { + "description": "A list of datasets.", + "items": { + "$ref": "#/$defs/Dataset" + }, + "title": "Source Data", + "type": "array" + } + }, + "title": "PreprocessingConfig", + "type": "object" + }, + "RMSOptimizer": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "learning_rate": { + "default": 0.0001, + "description": "The initial learning rate to use", + "title": "Learning Rate", + "type": "number" + }, + "eps": { + "default": 1e-08, + "description": "Advanced. The value of optimizer constant Epsilon, used for numerical stability.", + "title": "Eps", + "type": "number" + }, + "weight_decay": { + "default": 0.01, + "title": "Weight Decay", + "type": "number" + }, + "alpha": { + "default": 0.99, + "description": "Advanced. The value of RMSProp optimizer alpha smoothing constant.", + "title": "Alpha", + "type": "number" + }, + "name": { + "default": "rms", + "description": "The name of the optimizer to use.", + "title": "Name", + "type": "string" + } + }, + "title": "RMSOptimizer", + "type": "object" + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact": { + "allOf": [ + { + "$ref": "#/$defs/ContactInformation" + } + ], + "description": "EveryVoice requires a contact name and email to help prevent misuse. Please read our Guide to understand more about the importance of misuse prevention with TTS." + }, + "VERSION": { + "default": "1.0", + "title": "Version", + "type": "string" + }, + "model": { + "allOf": [ + { + "$ref": "#/$defs/HiFiGANModelConfig" + } + ], + "description": "The model configuration settings." + }, + "path_to_model_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a model configuration file.", + "title": "Path To Model Config File" + }, + "training": { + "allOf": [ + { + "$ref": "#/$defs/HiFiGANTrainingConfig" + } + ], + "description": "The training configuration hyperparameters." + }, + "path_to_training_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a training configuration file.", + "title": "Path To Training Config File" + }, + "preprocessing": { + "allOf": [ + { + "$ref": "#/$defs/PreprocessingConfig" + } + ], + "description": "The preprocessing configuration, including information about audio settings." + }, + "path_to_preprocessing_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Preprocessing Config File" + } + }, + "required": [ + "contact" + ], + "title": "HiFiGANConfig", + "type": "object" +} diff --git a/everyvoice/.schema/everyvoice-text-to-spec-0.3.json b/everyvoice/.schema/everyvoice-text-to-spec-0.3.json new file mode 100644 index 00000000..2785a682 --- /dev/null +++ b/everyvoice/.schema/everyvoice-text-to-spec-0.3.json @@ -0,0 +1,1122 @@ +{ + "$defs": { + "AudioConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "min_audio_length": { + "default": 0.4, + "description": "The minimum length of an audio sample in seconds. Audio shorter than this will be ignored during preprocessing.", + "title": "Min Audio Length", + "type": "number" + }, + "max_audio_length": { + "default": 11.0, + "description": "The maximum length of an audio sample in seconds. Audio longer than this will be ignored during preprocessing. Increasing the max_audio_length will result in larger memory usage. If you are running out of memory, consider lowering the max_audio_length.", + "title": "Max Audio Length", + "type": "number" + }, + "max_wav_value": { + "default": 32767.0, + "description": "Advanced. The maximum value allowed to be in your wav files. For 16-bit audio, this should be (2**16)/2 - 1.", + "title": "Max Wav Value", + "type": "number" + }, + "input_sampling_rate": { + "default": 22050, + "description": "The sampling rate describes the number of samples per second of audio. The 'input_sampling_rate' is with respect to your vocoder, or spec-to-wav model. This means that the spectrograms predicted by your text-to-spec model will also be calculated from audio at this sampling rate. If you change this value, your audio will automatically be re-sampled during preprocessing.", + "title": "Input Sampling Rate", + "type": "integer" + }, + "output_sampling_rate": { + "default": 22050, + "description": "Advanced. The sampling rate describes the number of samples per second of audio. The 'output_sampling_rate' is with respect to your vocoder, or spec-to-wav model. This means that the wav files generated by your vocoder or spec-to-wav model will be at this sampling rate. If you change this value, you will also need to change the upsample rates in your vocoder. Your audio will automatically be re-sampled during preprocessing.", + "title": "Output Sampling Rate", + "type": "integer" + }, + "alignment_sampling_rate": { + "default": 22050, + "description": "Advanced. The sampling rate describes the number of samples per second of audio. The 'alignment_sampling_rate' describes the sampling rate used when training an alignment model. If you change this value, your audio will automatically be re-sampled during preprocessing.", + "title": "Alignment Sampling Rate", + "type": "integer" + }, + "target_bit_depth": { + "default": 16, + "description": "Advanced. This is the bit depth of each sample in your audio files.", + "title": "Target Bit Depth", + "type": "integer" + }, + "n_fft": { + "default": 1024, + "description": "Advanced. This is the number of bins used by the Fast Fourier Transform (FFT).", + "title": "FFT Size", + "type": "integer" + }, + "fft_window_size": { + "default": 1024, + "description": "Advanced. This is the window size used by the Fast Fourier Transform (FFT).", + "title": "FFT Window Size", + "type": "integer" + }, + "fft_hop_size": { + "default": 256, + "description": "Advanced. This is the hop size for calculating the Short-Time Fourier Transform (STFT) which calculates a sequence of spectrograms from a single audio file. Another way of putting it is that the hop size is equal to the amount of non-intersecting samples from the audio in each spectrogram.", + "title": "FFT Hop Size", + "type": "integer" + }, + "f_min": { + "default": 0, + "description": "Advanced. This is the minimum frequency for the lowest frequency bin when calculating the spectrogram.", + "title": "Minimum Frequency", + "type": "integer" + }, + "f_max": { + "default": 8000, + "description": "Advanced. This is the maximum frequency for the highest frequency bin when calculating the spectrogram.", + "title": "Maximum Frequency", + "type": "integer" + }, + "n_mels": { + "default": 80, + "description": "Advanced. This is the number of filters in the Mel-scale spaced filterbank.", + "title": "Number of Mel bins", + "type": "integer" + }, + "spec_type": { + "anyOf": [ + { + "$ref": "#/$defs/AudioSpecTypeEnum" + }, + { + "type": "string" + } + ], + "default": "mel-librosa", + "description": "Advanced. Defines how to calculate the spectrogram. 'mel' uses the TorchAudio implementation for a Mel spectrogram. 'mel-librosa' uses Librosa's implementation. 'linear' calculates a non-Mel linear spectrogram and 'raw' calculates a complex-valued spectrogram. 'linear' and 'raw' are not currently supported by EveryVoice. We recommend using 'mel-librosa'.", + "title": "Spec Type" + }, + "vocoder_segment_size": { + "default": 8192, + "description": "Advanced. The vocoder, or spec-to-wav model is trained by sampling random fixed-size sections of the audio. This value specifies the number of samples in those sections.", + "title": "Vocoder Segment Size", + "type": "integer" + } + }, + "title": "AudioConfig", + "type": "object" + }, + "AudioSpecTypeEnum": { + "enum": [ + "mel", + "mel-librosa", + "linear", + "raw" + ], + "title": "AudioSpecTypeEnum", + "type": "string" + }, + "ConformerConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "layers": { + "default": 4, + "description": "The number of layers in the Conformer.", + "title": "Layers", + "type": "integer" + }, + "heads": { + "default": 2, + "description": "The number of heads in the multi-headed attention modules.", + "title": "Heads", + "type": "integer" + }, + "input_dim": { + "default": 256, + "description": "The number of hidden dimensions in the input. The input_dim value declared in the encoder and decoder modules must match the input_dim value declared in each variance predictor module.", + "title": "Input Dim", + "type": "integer" + }, + "feedforward_dim": { + "default": 1024, + "description": "The number of dimensions in the feedforward layers.", + "title": "Feedforward Dim", + "type": "integer" + }, + "conv_kernel_size": { + "default": 9, + "description": "The size of the kernel in each convoluational layer of the Conformer.", + "title": "Conv Kernel Size", + "type": "integer" + }, + "dropout": { + "default": 0.2, + "description": "The amount of dropout to apply.", + "title": "Dropout", + "type": "number" + } + }, + "title": "ConformerConfig", + "type": "object" + }, + "ContactInformation": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact_name": { + "description": "The name of the contact person or organization responsible for answering questions related to this model.", + "title": "Contact Name", + "type": "string" + }, + "contact_email": { + "description": "The email address of the contact person or organization responsible for answering questions related to this model.", + "format": "email", + "title": "Contact Email", + "type": "string" + } + }, + "required": [ + "contact_name", + "contact_email" + ], + "title": "ContactInformation", + "type": "object" + }, + "Dataset": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "label": { + "default": "YourDataSet", + "description": "A label for the source of data", + "title": "Label", + "type": "string" + }, + "permissions_obtained": { + "default": false, + "description": "An attestation that permission has been obtained to use this data. You may not use EveryVoice to build a TTS system with data that you do not have permission to use and there are serious possible consequences for doing so. Finding data online does not constitute permission. The speaker should be aware and consent to their data being used in this way.", + "title": "Permissions Obtained", + "type": "boolean" + }, + "data_dir": { + "default": "/please/create/a/path/to/your/dataset/data", + "description": "The path to the directory with your audio files.", + "format": "path", + "title": "Data Dir", + "type": "string" + }, + "filelist": { + "default": "/please/create/a/path/to/your/dataset/filelist", + "description": "The path to your dataset's filelist.", + "format": "path", + "title": "Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The file-loader function to use to load your dataset's filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "sox_effects": { + "default": [ + [ + "channels", + "1" + ] + ], + "description": "Advanced. A list of SoX effects to apply to your audio prior to preprocessing. Run python -c 'import torchaudio; print(torchaudio.sox_effects.effect_names())' to see a list of supported effects.", + "items": {}, + "title": "Sox Effects", + "type": "array" + } + }, + "title": "Dataset", + "type": "object" + }, + "FastSpeech2ModelConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "encoder": { + "allOf": [ + { + "$ref": "#/$defs/ConformerConfig" + } + ], + "description": "The configuration of the encoder module." + }, + "decoder": { + "allOf": [ + { + "$ref": "#/$defs/ConformerConfig" + } + ], + "description": "The configuration of the decoder module." + }, + "variance_predictors": { + "allOf": [ + { + "$ref": "#/$defs/VariancePredictors" + } + ], + "description": "Configuration for energy, duration, and pitch variance predictors." + }, + "target_text_representation_level": { + "allOf": [ + { + "$ref": "#/$defs/TargetTrainingTextRepresentationLevel" + } + ], + "default": "characters" + }, + "learn_alignment": { + "default": true, + "description": "Whether to jointly learn alignments using monotonic alignment search module (See Badlani et. al. 2021: https://arxiv.org/abs/2108.10447). If set to False, you will have to provide text/audio alignments separately before training a text-to-spec (feature prediction) model.", + "title": "Learn Alignment", + "type": "boolean" + }, + "use_global_style_token_module": { + "default": false, + "description": "Whether to use the Global Style Token (GST) module from Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End Speech Synthesis (https://arxiv.org/abs/1803.09017)", + "title": "Use Global Style Token Module", + "type": "boolean" + }, + "max_length": { + "default": 1000, + "description": "The maximum length (i.e. number of symbols) for text inputs.", + "title": "Max Length", + "type": "integer" + }, + "mel_loss": { + "allOf": [ + { + "$ref": "#/$defs/VarianceLossEnum" + } + ], + "default": "mse", + "description": "The loss function to use when calculating Mel spectrogram loss." + }, + "use_postnet": { + "default": true, + "description": "Whether to use a postnet module.", + "title": "Use Postnet", + "type": "boolean" + }, + "multilingual": { + "default": false, + "description": "Whether to train a multilingual model. For this to work, your filelist must contain a column/field for 'language' with values for each utterance.", + "title": "Multilingual", + "type": "boolean" + }, + "multispeaker": { + "default": false, + "description": "Whether to train a multispeaker model. For this to work, your filelist must contain a column/field for 'speaker' with values for each utterance.", + "title": "Multispeaker", + "type": "boolean" + } + }, + "title": "FastSpeech2ModelConfig", + "type": "object" + }, + "FastSpeech2TrainingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "batch_size": { + "default": 16, + "description": "The number of samples to include in each batch when training. If you are running out of memory, consider lowering your batch_size.", + "title": "Batch Size", + "type": "integer" + }, + "save_top_k_ckpts": { + "default": 5, + "description": "The number of checkpoints to save.", + "title": "Save Top K Ckpts", + "type": "integer" + }, + "ckpt_steps": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The interval (in steps) for saving a checkpoint. By default checkpoints are saved every epoch using the 'ckpt_epochs' hyperparameter", + "title": "Ckpt Steps" + }, + "ckpt_epochs": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "The interval (in epochs) for saving a checkpoint. You can also save checkpoints after n steps by using 'ckpt_steps'", + "title": "Ckpt Epochs" + }, + "val_check_interval": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 500, + "description": "How often to check the validation set. Pass a float in the range [0.0, 1.0] to check after a fraction of the training epoch. Pass an int to check after a fixed number of training batches.", + "title": "Val Check Interval" + }, + "check_val_every_n_epoch": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Run validation after every n epochs. Defaults to 1, but if you have a small dataset you should change this to be larger to speed up training", + "title": "Check Val Every N Epoch" + }, + "max_epochs": { + "default": 1000, + "description": "Stop training after this many epochs", + "title": "Max Epochs", + "type": "integer" + }, + "max_steps": { + "default": 100000, + "description": "Stop training after this many steps", + "title": "Max Steps", + "type": "integer" + }, + "finetune_checkpoint": { + "anyOf": [ + { + "format": "path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Automatically resume training from a checkpoint loaded from this path.", + "title": "Finetune Checkpoint" + }, + "training_filelist": { + "default": "path/to/your/preprocessed/training_filelist.psv", + "description": "The path to a filelist containing samples belonging to your training set.", + "format": "path", + "title": "Training Filelist", + "type": "string" + }, + "validation_filelist": { + "default": "path/to/your/preprocessed/validation_filelist.psv", + "description": "The path to a filelist containing samples belonging to your validation set.", + "format": "path", + "title": "Validation Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The function to use to load the filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "logger": { + "allOf": [ + { + "$ref": "#/$defs/LoggerConfig" + } + ], + "description": "The configuration for the logger." + }, + "val_data_workers": { + "default": 0, + "description": "The number of CPU workers to use when loading data during validation.", + "title": "Val Data Workers", + "type": "integer" + }, + "train_data_workers": { + "default": 4, + "description": "The number of CPU workers to use when loading data during training.", + "title": "Train Data Workers", + "type": "integer" + }, + "use_weighted_sampler": { + "default": false, + "description": "Whether to use a sampler which oversamples from the minority language or speaker class for balanced training.", + "title": "Use Weighted Sampler", + "type": "boolean" + }, + "optimizer": { + "allOf": [ + { + "$ref": "#/$defs/NoamOptimizer" + } + ], + "default": { + "learning_rate": 0.001, + "eps": 1e-08, + "weight_decay": 1e-06, + "betas": [ + 0.9, + 0.999 + ], + "name": "noam", + "warmup_steps": 1000 + }, + "description": "The optimizer to use during training." + }, + "vocoder_path": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Vocoder Path" + }, + "mel_loss_weight": { + "default": 1.0, + "description": "Multiply the spec loss by this weight", + "title": "Mel Loss Weight", + "type": "number" + }, + "postnet_loss_weight": { + "default": 1.0, + "description": "Multiply the postnet loss by this weight", + "title": "Postnet Loss Weight", + "type": "number" + }, + "pitch_loss_weight": { + "default": 0.1, + "description": "Multiply the pitch loss by this weight", + "title": "Pitch Loss Weight", + "type": "number" + }, + "energy_loss_weight": { + "default": 0.1, + "description": "Multiply the energy loss by this weight", + "title": "Energy Loss Weight", + "type": "number" + }, + "duration_loss_weight": { + "default": 0.1, + "description": "Multiply the duration loss by this weight", + "title": "Duration Loss Weight", + "type": "number" + }, + "attn_ctc_loss_weight": { + "default": 0.1, + "description": "Multiply the Attention CTC loss by this weight", + "title": "Attn Ctc Loss Weight", + "type": "number" + }, + "attn_bin_loss_weight": { + "default": 0.1, + "description": "Multiply the Attention Binarization loss by this weight", + "title": "Attn Bin Loss Weight", + "type": "number" + }, + "attn_bin_loss_warmup_epochs": { + "default": 100, + "description": "Scale the Attention Binarization loss by (current_epoch / attn_bin_loss_warmup_epochs) until the number of epochs defined by attn_bin_loss_warmup_epochs is reached.", + "minimum": 1, + "title": "Attn Bin Loss Warmup Epochs", + "type": "integer" + } + }, + "title": "FastSpeech2TrainingConfig", + "type": "object" + }, + "LoggerConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "description": "The logger configures all the information needed for where to store your experiment's logs and checkpoints.\nThe structure of your logs will then be:\n / / \n will be generated by calling each time the LoggerConfig is constructed.", + "properties": { + "name": { + "default": "BaseExperiment", + "description": "The name of the experiment. The structure of your logs will be / / .", + "title": "Experiment Name", + "type": "string" + }, + "save_dir": { + "default": "logs_and_checkpoints", + "description": "The directory to save your checkpoints and logs to.", + "format": "path", + "title": "Save Dir", + "type": "string" + }, + "sub_dir_callable": { + "description": "The function that generates a string to call your runs - by default this is a timestamp. The structure of your logs will be / / where is a timestamp.", + "title": "Sub Dir Callable", + "type": "string" + }, + "version": { + "default": "base", + "description": "The version of your experiment. The structure of your logs will be / / .", + "title": "Version", + "type": "string" + } + }, + "title": "LoggerConfig", + "type": "object" + }, + "NoamOptimizer": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "learning_rate": { + "default": 0.0001, + "description": "The initial learning rate to use", + "title": "Learning Rate", + "type": "number" + }, + "eps": { + "default": 1e-08, + "description": "Advanced. The value of optimizer constant Epsilon, used for numerical stability.", + "title": "Eps", + "type": "number" + }, + "weight_decay": { + "default": 0.01, + "title": "Weight Decay", + "type": "number" + }, + "betas": { + "default": [ + 0.9, + 0.98 + ], + "description": "Advanced. The values of the Adam Optimizer beta coefficients.", + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "title": "Betas", + "type": "array" + }, + "name": { + "default": "noam", + "description": "The name of the optimizer to use.", + "title": "Name", + "type": "string" + }, + "warmup_steps": { + "default": 1000, + "description": "The number of steps to increase the learning rate before starting to decrease it.", + "title": "Warmup Steps", + "type": "integer" + } + }, + "title": "NoamOptimizer", + "type": "object" + }, + "PreprocessingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "dataset": { + "default": "YourDataSet", + "description": "The name of the dataset.", + "title": "Dataset", + "type": "string" + }, + "train_split": { + "default": 0.9, + "description": "The amount of the dataset to use for training. The rest will be used as validation. Hold some of the validation set out for a test set if you are performing experiments.", + "maximum": 1.0, + "minimum": 0.0, + "title": "Train Split", + "type": "number" + }, + "dataset_split_seed": { + "default": 1234, + "description": "The seed to use when splitting the dataset into train and validation sets.", + "title": "Dataset Split Seed", + "type": "integer" + }, + "save_dir": { + "default": "preprocessed/YourDataSet", + "description": "The directory to save preprocessed files to.", + "format": "path", + "title": "Save Dir", + "type": "string" + }, + "audio": { + "allOf": [ + { + "$ref": "#/$defs/AudioConfig" + } + ], + "description": "Configuration settings for audio." + }, + "path_to_audio_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path to an audio configuration file.", + "title": "Path To Audio Config File" + }, + "source_data": { + "description": "A list of datasets.", + "items": { + "$ref": "#/$defs/Dataset" + }, + "title": "Source Data", + "type": "array" + } + }, + "title": "PreprocessingConfig", + "type": "object" + }, + "Punctuation": { + "properties": { + "exclamations": { + "default": [ + "!", + "\u00a1" + ], + "description": "Exclamation punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Exclamations", + "type": "array" + }, + "question_symbols": { + "default": [ + "?", + "\u00bf" + ], + "description": "Question/interrogative punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Question Symbols", + "type": "array" + }, + "quotemarks": { + "default": [ + "\"", + "'", + "\u201c", + "\u201d", + "\u00ab", + "\u00bb" + ], + "description": "Quotemark punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Quotemarks", + "type": "array" + }, + "big_breaks": { + "default": [ + ".", + ":", + ";" + ], + "description": "Punctuation symbols indicating a 'big break' used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Big Breaks", + "type": "array" + }, + "small_breaks": { + "default": [ + ",", + "-", + "\u2014" + ], + "description": "Punctuation symbols indicating a 'small break' used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Small Breaks", + "type": "array" + }, + "ellipsis": { + "default": [ + "\u2026" + ], + "description": "Punctuation symbols indicating an ellipsis used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Ellipsis", + "type": "array" + } + }, + "title": "Punctuation", + "type": "object" + }, + "Symbols": { + "additionalProperties": true, + "properties": { + "silence": { + "default": [ + "" + ], + "description": "The symbol(s) used to indicate silence.", + "items": { + "type": "string" + }, + "title": "Silence", + "type": "array" + }, + "punctuation": { + "allOf": [ + { + "$ref": "#/$defs/Punctuation" + } + ], + "description": "EveryVoice will combine punctuation and normalize it into a set of five permissible types of punctuation to help tractable training." + } + }, + "title": "Symbols", + "type": "object" + }, + "TargetTrainingTextRepresentationLevel": { + "enum": [ + "characters", + "phones", + "phonological_features" + ], + "title": "TargetTrainingTextRepresentationLevel", + "type": "string" + }, + "TextConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "symbols": { + "$ref": "#/$defs/Symbols" + }, + "to_replace": { + "additionalProperties": { + "type": "string" + }, + "default": {}, + "title": "To Replace", + "type": "object" + }, + "cleaners": { + "items": { + "type": "string" + }, + "title": "Cleaners", + "type": "array" + } + }, + "title": "TextConfig", + "type": "object" + }, + "VarianceLevelEnum": { + "enum": [ + "phone", + "frame" + ], + "title": "VarianceLevelEnum", + "type": "string" + }, + "VarianceLossEnum": { + "enum": [ + "mse", + "mae" + ], + "title": "VarianceLossEnum", + "type": "string" + }, + "VariancePredictorBase": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "loss": { + "allOf": [ + { + "$ref": "#/$defs/VarianceLossEnum" + } + ], + "default": "mse", + "description": "The loss function to use when calculate variance loss. Either 'mse' or 'mae'." + }, + "n_layers": { + "default": 5, + "description": "The number of layers in the variance predictor module.", + "title": "N Layers", + "type": "integer" + }, + "kernel_size": { + "default": 3, + "description": "The kernel size of each convolutional layer in the variance predictor module.", + "title": "Kernel Size", + "type": "integer" + }, + "dropout": { + "default": 0.5, + "description": "The amount of dropout to apply.", + "title": "Dropout", + "type": "number" + }, + "input_dim": { + "default": 256, + "description": "The number of hidden dimensions in the input. This must match the input_dim value declared in the encoder and decoder modules.", + "title": "Input Dim", + "type": "integer" + }, + "n_bins": { + "default": 256, + "description": "The number of bins to use in the variance predictor module.", + "title": "N Bins", + "type": "integer" + }, + "depthwise": { + "default": true, + "description": "Whether to use depthwise separable convolutions.", + "title": "Depthwise", + "type": "boolean" + } + }, + "title": "VariancePredictorBase", + "type": "object" + }, + "VariancePredictorConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "loss": { + "allOf": [ + { + "$ref": "#/$defs/VarianceLossEnum" + } + ], + "default": "mse", + "description": "The loss function to use when calculate variance loss. Either 'mse' or 'mae'." + }, + "n_layers": { + "default": 5, + "description": "The number of layers in the variance predictor module.", + "title": "N Layers", + "type": "integer" + }, + "kernel_size": { + "default": 3, + "description": "The kernel size of each convolutional layer in the variance predictor module.", + "title": "Kernel Size", + "type": "integer" + }, + "dropout": { + "default": 0.5, + "description": "The amount of dropout to apply.", + "title": "Dropout", + "type": "number" + }, + "input_dim": { + "default": 256, + "description": "The number of hidden dimensions in the input. This must match the input_dim value declared in the encoder and decoder modules.", + "title": "Input Dim", + "type": "integer" + }, + "n_bins": { + "default": 256, + "description": "The number of bins to use in the variance predictor module.", + "title": "N Bins", + "type": "integer" + }, + "depthwise": { + "default": true, + "description": "Whether to use depthwise separable convolutions.", + "title": "Depthwise", + "type": "boolean" + }, + "level": { + "allOf": [ + { + "$ref": "#/$defs/VarianceLevelEnum" + } + ], + "default": "phone", + "description": "The level for the variance predictor to use. 'frame' will make predictions at the frame level. 'phone' will average predictions across all frames in each phone." + } + }, + "title": "VariancePredictorConfig", + "type": "object" + }, + "VariancePredictors": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "energy": { + "allOf": [ + { + "$ref": "#/$defs/VariancePredictorConfig" + } + ], + "description": "The variance predictor for energy" + }, + "duration": { + "allOf": [ + { + "$ref": "#/$defs/VariancePredictorBase" + } + ], + "description": "The variance predictor for duration" + }, + "pitch": { + "allOf": [ + { + "$ref": "#/$defs/VariancePredictorConfig" + } + ], + "description": "The variance predictor for pitch" + } + }, + "title": "VariancePredictors", + "type": "object" + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact": { + "allOf": [ + { + "$ref": "#/$defs/ContactInformation" + } + ], + "description": "EveryVoice requires a contact name and email to help prevent misuse. Please read our Guide to understand more about the importance of misuse prevention with TTS." + }, + "VERSION": { + "default": "1.1", + "title": "Version", + "type": "string" + }, + "model": { + "allOf": [ + { + "$ref": "#/$defs/FastSpeech2ModelConfig" + } + ], + "description": "The model configuration settings." + }, + "path_to_model_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a model configuration file.", + "title": "Path To Model Config File" + }, + "training": { + "allOf": [ + { + "$ref": "#/$defs/FastSpeech2TrainingConfig" + } + ], + "description": "The training configuration hyperparameters." + }, + "path_to_training_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a training configuration file.", + "title": "Path To Training Config File" + }, + "preprocessing": { + "allOf": [ + { + "$ref": "#/$defs/PreprocessingConfig" + } + ], + "description": "The preprocessing configuration, including information about audio settings." + }, + "path_to_preprocessing_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Preprocessing Config File" + }, + "text": { + "allOf": [ + { + "$ref": "#/$defs/TextConfig" + } + ], + "description": "The text configuration." + }, + "path_to_text_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a text configuration file.", + "title": "Path To Text Config File" + } + }, + "required": [ + "contact" + ], + "title": "FastSpeech2Config", + "type": "object" +} diff --git a/everyvoice/.schema/everyvoice-text-to-wav-0.3.json b/everyvoice/.schema/everyvoice-text-to-wav-0.3.json new file mode 100644 index 00000000..0ffa2bdb --- /dev/null +++ b/everyvoice/.schema/everyvoice-text-to-wav-0.3.json @@ -0,0 +1,2579 @@ +{ + "$defs": { + "AdamOptimizer": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "learning_rate": { + "default": 0.0001, + "description": "The initial learning rate to use", + "title": "Learning Rate", + "type": "number" + }, + "eps": { + "default": 1e-08, + "description": "Advanced. The value of optimizer constant Epsilon, used for numerical stability.", + "title": "Eps", + "type": "number" + }, + "weight_decay": { + "default": 0.01, + "title": "Weight Decay", + "type": "number" + }, + "betas": { + "default": [ + 0.9, + 0.98 + ], + "description": "Advanced. The values of the Adam Optimizer beta coefficients.", + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "title": "Betas", + "type": "array" + }, + "name": { + "default": "adam", + "description": "The name of the optimizer to use.", + "title": "Name", + "type": "string" + } + }, + "title": "AdamOptimizer", + "type": "object" + }, + "AdamWOptimizer": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "learning_rate": { + "default": 0.0001, + "description": "The initial learning rate to use", + "title": "Learning Rate", + "type": "number" + }, + "eps": { + "default": 1e-08, + "description": "Advanced. The value of optimizer constant Epsilon, used for numerical stability.", + "title": "Eps", + "type": "number" + }, + "weight_decay": { + "default": 0.01, + "title": "Weight Decay", + "type": "number" + }, + "betas": { + "default": [ + 0.9, + 0.98 + ], + "description": "Advanced. The values of the AdamW Optimizer beta coefficients.", + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "title": "Betas", + "type": "array" + }, + "name": { + "default": "adamw", + "description": "The name of the optimizer to use.", + "title": "Name", + "type": "string" + } + }, + "title": "AdamWOptimizer", + "type": "object" + }, + "AlignerConfigNoContact": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact": { + "anyOf": [ + { + "$ref": "#/$defs/ContactInformation" + }, + { + "type": "null" + } + ], + "default": null + }, + "VERSION": { + "default": "1.0", + "title": "Version", + "type": "string" + }, + "model": { + "allOf": [ + { + "$ref": "#/$defs/DFAlignerModelConfig" + } + ], + "description": "The model configuration settings." + }, + "path_to_model_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Model Config File" + }, + "training": { + "allOf": [ + { + "$ref": "#/$defs/DFAlignerTrainingConfig" + } + ], + "description": "The training configuration hyperparameters." + }, + "path_to_training_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Training Config File" + }, + "preprocessing": { + "allOf": [ + { + "$ref": "#/$defs/PreprocessingConfig" + } + ], + "description": "The preprocessing configuration, including information about audio settings." + }, + "path_to_preprocessing_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Preprocessing Config File" + }, + "text": { + "$ref": "#/$defs/TextConfig" + }, + "path_to_text_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Path To Text Config File" + } + }, + "title": "AlignerConfigNoContact", + "type": "object" + }, + "AudioConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "min_audio_length": { + "default": 0.4, + "description": "The minimum length of an audio sample in seconds. Audio shorter than this will be ignored during preprocessing.", + "title": "Min Audio Length", + "type": "number" + }, + "max_audio_length": { + "default": 11.0, + "description": "The maximum length of an audio sample in seconds. Audio longer than this will be ignored during preprocessing. Increasing the max_audio_length will result in larger memory usage. If you are running out of memory, consider lowering the max_audio_length.", + "title": "Max Audio Length", + "type": "number" + }, + "max_wav_value": { + "default": 32767.0, + "description": "Advanced. The maximum value allowed to be in your wav files. For 16-bit audio, this should be (2**16)/2 - 1.", + "title": "Max Wav Value", + "type": "number" + }, + "input_sampling_rate": { + "default": 22050, + "description": "The sampling rate describes the number of samples per second of audio. The 'input_sampling_rate' is with respect to your vocoder, or spec-to-wav model. This means that the spectrograms predicted by your text-to-spec model will also be calculated from audio at this sampling rate. If you change this value, your audio will automatically be re-sampled during preprocessing.", + "title": "Input Sampling Rate", + "type": "integer" + }, + "output_sampling_rate": { + "default": 22050, + "description": "Advanced. The sampling rate describes the number of samples per second of audio. The 'output_sampling_rate' is with respect to your vocoder, or spec-to-wav model. This means that the wav files generated by your vocoder or spec-to-wav model will be at this sampling rate. If you change this value, you will also need to change the upsample rates in your vocoder. Your audio will automatically be re-sampled during preprocessing.", + "title": "Output Sampling Rate", + "type": "integer" + }, + "alignment_sampling_rate": { + "default": 22050, + "description": "Advanced. The sampling rate describes the number of samples per second of audio. The 'alignment_sampling_rate' describes the sampling rate used when training an alignment model. If you change this value, your audio will automatically be re-sampled during preprocessing.", + "title": "Alignment Sampling Rate", + "type": "integer" + }, + "target_bit_depth": { + "default": 16, + "description": "Advanced. This is the bit depth of each sample in your audio files.", + "title": "Target Bit Depth", + "type": "integer" + }, + "n_fft": { + "default": 1024, + "description": "Advanced. This is the number of bins used by the Fast Fourier Transform (FFT).", + "title": "FFT Size", + "type": "integer" + }, + "fft_window_size": { + "default": 1024, + "description": "Advanced. This is the window size used by the Fast Fourier Transform (FFT).", + "title": "FFT Window Size", + "type": "integer" + }, + "fft_hop_size": { + "default": 256, + "description": "Advanced. This is the hop size for calculating the Short-Time Fourier Transform (STFT) which calculates a sequence of spectrograms from a single audio file. Another way of putting it is that the hop size is equal to the amount of non-intersecting samples from the audio in each spectrogram.", + "title": "FFT Hop Size", + "type": "integer" + }, + "f_min": { + "default": 0, + "description": "Advanced. This is the minimum frequency for the lowest frequency bin when calculating the spectrogram.", + "title": "Minimum Frequency", + "type": "integer" + }, + "f_max": { + "default": 8000, + "description": "Advanced. This is the maximum frequency for the highest frequency bin when calculating the spectrogram.", + "title": "Maximum Frequency", + "type": "integer" + }, + "n_mels": { + "default": 80, + "description": "Advanced. This is the number of filters in the Mel-scale spaced filterbank.", + "title": "Number of Mel bins", + "type": "integer" + }, + "spec_type": { + "anyOf": [ + { + "$ref": "#/$defs/AudioSpecTypeEnum" + }, + { + "type": "string" + } + ], + "default": "mel-librosa", + "description": "Advanced. Defines how to calculate the spectrogram. 'mel' uses the TorchAudio implementation for a Mel spectrogram. 'mel-librosa' uses Librosa's implementation. 'linear' calculates a non-Mel linear spectrogram and 'raw' calculates a complex-valued spectrogram. 'linear' and 'raw' are not currently supported by EveryVoice. We recommend using 'mel-librosa'.", + "title": "Spec Type" + }, + "vocoder_segment_size": { + "default": 8192, + "description": "Advanced. The vocoder, or spec-to-wav model is trained by sampling random fixed-size sections of the audio. This value specifies the number of samples in those sections.", + "title": "Vocoder Segment Size", + "type": "integer" + } + }, + "title": "AudioConfig", + "type": "object" + }, + "AudioSpecTypeEnum": { + "enum": [ + "mel", + "mel-librosa", + "linear", + "raw" + ], + "title": "AudioSpecTypeEnum", + "type": "string" + }, + "ConformerConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "layers": { + "default": 4, + "description": "The number of layers in the Conformer.", + "title": "Layers", + "type": "integer" + }, + "heads": { + "default": 2, + "description": "The number of heads in the multi-headed attention modules.", + "title": "Heads", + "type": "integer" + }, + "input_dim": { + "default": 256, + "description": "The number of hidden dimensions in the input. The input_dim value declared in the encoder and decoder modules must match the input_dim value declared in each variance predictor module.", + "title": "Input Dim", + "type": "integer" + }, + "feedforward_dim": { + "default": 1024, + "description": "The number of dimensions in the feedforward layers.", + "title": "Feedforward Dim", + "type": "integer" + }, + "conv_kernel_size": { + "default": 9, + "description": "The size of the kernel in each convoluational layer of the Conformer.", + "title": "Conv Kernel Size", + "type": "integer" + }, + "dropout": { + "default": 0.2, + "description": "The amount of dropout to apply.", + "title": "Dropout", + "type": "number" + } + }, + "title": "ConformerConfig", + "type": "object" + }, + "ContactInformation": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact_name": { + "description": "The name of the contact person or organization responsible for answering questions related to this model.", + "title": "Contact Name", + "type": "string" + }, + "contact_email": { + "description": "The email address of the contact person or organization responsible for answering questions related to this model.", + "format": "email", + "title": "Contact Email", + "type": "string" + } + }, + "required": [ + "contact_name", + "contact_email" + ], + "title": "ContactInformation", + "type": "object" + }, + "DFAlignerConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact": { + "allOf": [ + { + "$ref": "#/$defs/ContactInformation" + } + ], + "description": "EveryVoice requires a contact name and email to help prevent misuse. Please read our Guide to understand more about the importance of misuse prevention with TTS." + }, + "VERSION": { + "default": "1.0", + "title": "Version", + "type": "string" + }, + "model": { + "allOf": [ + { + "$ref": "#/$defs/DFAlignerModelConfig" + } + ], + "description": "The model configuration settings." + }, + "path_to_model_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Model Config File" + }, + "training": { + "allOf": [ + { + "$ref": "#/$defs/DFAlignerTrainingConfig" + } + ], + "description": "The training configuration hyperparameters." + }, + "path_to_training_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Training Config File" + }, + "preprocessing": { + "allOf": [ + { + "$ref": "#/$defs/PreprocessingConfig" + } + ], + "description": "The preprocessing configuration, including information about audio settings." + }, + "path_to_preprocessing_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Preprocessing Config File" + }, + "text": { + "$ref": "#/$defs/TextConfig" + }, + "path_to_text_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Path To Text Config File" + } + }, + "required": [ + "contact" + ], + "title": "DFAlignerConfig", + "type": "object" + }, + "DFAlignerExtractionMethod": { + "enum": [ + "beam", + "dijkstra" + ], + "title": "DFAlignerExtractionMethod", + "type": "string" + }, + "DFAlignerModelConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "target_text_representation_level": { + "allOf": [ + { + "$ref": "#/$defs/TargetTrainingTextRepresentationLevel" + } + ], + "default": "characters" + }, + "lstm_dim": { + "default": 512, + "description": "The number of dimensions in the LSTM layers.", + "title": "Lstm Dim", + "type": "integer" + }, + "conv_dim": { + "default": 512, + "description": "The number of dimensions in the convolutional layers.", + "title": "Conv Dim", + "type": "integer" + } + }, + "title": "DFAlignerModelConfig", + "type": "object" + }, + "DFAlignerTrainingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "batch_size": { + "default": 16, + "description": "The number of samples to include in each batch when training. If you are running out of memory, consider lowering your batch_size.", + "title": "Batch Size", + "type": "integer" + }, + "save_top_k_ckpts": { + "default": 5, + "description": "The number of checkpoints to save.", + "title": "Save Top K Ckpts", + "type": "integer" + }, + "ckpt_steps": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The interval (in steps) for saving a checkpoint. By default checkpoints are saved every epoch using the 'ckpt_epochs' hyperparameter", + "title": "Ckpt Steps" + }, + "ckpt_epochs": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "The interval (in epochs) for saving a checkpoint. You can also save checkpoints after n steps by using 'ckpt_steps'", + "title": "Ckpt Epochs" + }, + "val_check_interval": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 500, + "description": "How often to check the validation set. Pass a float in the range [0.0, 1.0] to check after a fraction of the training epoch. Pass an int to check after a fixed number of training batches.", + "title": "Val Check Interval" + }, + "check_val_every_n_epoch": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Run validation after every n epochs. Defaults to 1, but if you have a small dataset you should change this to be larger to speed up training", + "title": "Check Val Every N Epoch" + }, + "max_epochs": { + "default": 1000, + "description": "Stop training after this many epochs", + "title": "Max Epochs", + "type": "integer" + }, + "max_steps": { + "default": 100000, + "description": "Stop training after this many steps", + "title": "Max Steps", + "type": "integer" + }, + "finetune_checkpoint": { + "anyOf": [ + { + "format": "path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Automatically resume training from a checkpoint loaded from this path.", + "title": "Finetune Checkpoint" + }, + "training_filelist": { + "default": "path/to/your/preprocessed/training_filelist.psv", + "description": "The path to a filelist containing samples belonging to your training set.", + "format": "path", + "title": "Training Filelist", + "type": "string" + }, + "validation_filelist": { + "default": "path/to/your/preprocessed/validation_filelist.psv", + "description": "The path to a filelist containing samples belonging to your validation set.", + "format": "path", + "title": "Validation Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The function to use to load the filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "logger": { + "allOf": [ + { + "$ref": "#/$defs/LoggerConfig" + } + ], + "description": "The configuration for the logger." + }, + "val_data_workers": { + "default": 0, + "description": "The number of CPU workers to use when loading data during validation.", + "title": "Val Data Workers", + "type": "integer" + }, + "train_data_workers": { + "default": 4, + "description": "The number of CPU workers to use when loading data during training.", + "title": "Train Data Workers", + "type": "integer" + }, + "optimizer": { + "anyOf": [ + { + "$ref": "#/$defs/AdamOptimizer" + }, + { + "$ref": "#/$defs/AdamWOptimizer" + } + ], + "description": "Optimizer configuration settings.", + "title": "Optimizer" + }, + "binned_sampler": { + "default": true, + "description": "Use a binned length sampler", + "title": "Binned Sampler", + "type": "boolean" + }, + "plot_steps": { + "default": 1000, + "description": "The maximum number of steps to plot", + "title": "Plot Steps", + "type": "integer" + }, + "extraction_method": { + "allOf": [ + { + "$ref": "#/$defs/DFAlignerExtractionMethod" + } + ], + "default": "dijkstra", + "description": "The alignment extraction algorithm to use. 'beam' will be quicker but possibly less accurate than 'dijkstra'" + } + }, + "title": "DFAlignerTrainingConfig", + "type": "object" + }, + "Dataset": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "label": { + "default": "YourDataSet", + "description": "A label for the source of data", + "title": "Label", + "type": "string" + }, + "permissions_obtained": { + "default": false, + "description": "An attestation that permission has been obtained to use this data. You may not use EveryVoice to build a TTS system with data that you do not have permission to use and there are serious possible consequences for doing so. Finding data online does not constitute permission. The speaker should be aware and consent to their data being used in this way.", + "title": "Permissions Obtained", + "type": "boolean" + }, + "data_dir": { + "default": "/please/create/a/path/to/your/dataset/data", + "description": "The path to the directory with your audio files.", + "format": "path", + "title": "Data Dir", + "type": "string" + }, + "filelist": { + "default": "/please/create/a/path/to/your/dataset/filelist", + "description": "The path to your dataset's filelist.", + "format": "path", + "title": "Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The file-loader function to use to load your dataset's filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "sox_effects": { + "default": [ + [ + "channels", + "1" + ] + ], + "description": "Advanced. A list of SoX effects to apply to your audio prior to preprocessing. Run python -c 'import torchaudio; print(torchaudio.sox_effects.effect_names())' to see a list of supported effects.", + "items": {}, + "title": "Sox Effects", + "type": "array" + } + }, + "title": "Dataset", + "type": "object" + }, + "E2ETrainingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "batch_size": { + "default": 16, + "description": "The number of samples to include in each batch when training. If you are running out of memory, consider lowering your batch_size.", + "title": "Batch Size", + "type": "integer" + }, + "save_top_k_ckpts": { + "default": 5, + "description": "The number of checkpoints to save.", + "title": "Save Top K Ckpts", + "type": "integer" + }, + "ckpt_steps": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The interval (in steps) for saving a checkpoint. By default checkpoints are saved every epoch using the 'ckpt_epochs' hyperparameter", + "title": "Ckpt Steps" + }, + "ckpt_epochs": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "The interval (in epochs) for saving a checkpoint. You can also save checkpoints after n steps by using 'ckpt_steps'", + "title": "Ckpt Epochs" + }, + "val_check_interval": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 500, + "description": "How often to check the validation set. Pass a float in the range [0.0, 1.0] to check after a fraction of the training epoch. Pass an int to check after a fixed number of training batches.", + "title": "Val Check Interval" + }, + "check_val_every_n_epoch": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Run validation after every n epochs. Defaults to 1, but if you have a small dataset you should change this to be larger to speed up training", + "title": "Check Val Every N Epoch" + }, + "max_epochs": { + "default": 1000, + "description": "Stop training after this many epochs", + "title": "Max Epochs", + "type": "integer" + }, + "max_steps": { + "default": 100000, + "description": "Stop training after this many steps", + "title": "Max Steps", + "type": "integer" + }, + "finetune_checkpoint": { + "anyOf": [ + { + "format": "path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Automatically resume training from a checkpoint loaded from this path.", + "title": "Finetune Checkpoint" + }, + "training_filelist": { + "default": "path/to/your/preprocessed/training_filelist.psv", + "description": "The path to a filelist containing samples belonging to your training set.", + "format": "path", + "title": "Training Filelist", + "type": "string" + }, + "validation_filelist": { + "default": "path/to/your/preprocessed/validation_filelist.psv", + "description": "The path to a filelist containing samples belonging to your validation set.", + "format": "path", + "title": "Validation Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The function to use to load the filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "logger": { + "allOf": [ + { + "$ref": "#/$defs/LoggerConfig" + } + ], + "description": "The configuration for the logger." + }, + "val_data_workers": { + "default": 0, + "description": "The number of CPU workers to use when loading data during validation.", + "title": "Val Data Workers", + "type": "integer" + }, + "train_data_workers": { + "default": 4, + "description": "The number of CPU workers to use when loading data during training.", + "title": "Train Data Workers", + "type": "integer" + }, + "feature_prediction_checkpoint": { + "anyOf": [ + { + "format": "path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Feature Prediction Checkpoint" + }, + "vocoder_checkpoint": { + "anyOf": [ + { + "format": "path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Vocoder Checkpoint" + } + }, + "title": "E2ETrainingConfig", + "type": "object" + }, + "FastSpeech2Config": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact": { + "allOf": [ + { + "$ref": "#/$defs/ContactInformation" + } + ], + "description": "EveryVoice requires a contact name and email to help prevent misuse. Please read our Guide to understand more about the importance of misuse prevention with TTS." + }, + "VERSION": { + "default": "1.1", + "title": "Version", + "type": "string" + }, + "model": { + "allOf": [ + { + "$ref": "#/$defs/FastSpeech2ModelConfig" + } + ], + "description": "The model configuration settings." + }, + "path_to_model_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a model configuration file.", + "title": "Path To Model Config File" + }, + "training": { + "allOf": [ + { + "$ref": "#/$defs/FastSpeech2TrainingConfig" + } + ], + "description": "The training configuration hyperparameters." + }, + "path_to_training_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a training configuration file.", + "title": "Path To Training Config File" + }, + "preprocessing": { + "allOf": [ + { + "$ref": "#/$defs/PreprocessingConfig" + } + ], + "description": "The preprocessing configuration, including information about audio settings." + }, + "path_to_preprocessing_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Preprocessing Config File" + }, + "text": { + "allOf": [ + { + "$ref": "#/$defs/TextConfig" + } + ], + "description": "The text configuration." + }, + "path_to_text_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a text configuration file.", + "title": "Path To Text Config File" + } + }, + "required": [ + "contact" + ], + "title": "FastSpeech2Config", + "type": "object" + }, + "FastSpeech2ModelConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "encoder": { + "allOf": [ + { + "$ref": "#/$defs/ConformerConfig" + } + ], + "description": "The configuration of the encoder module." + }, + "decoder": { + "allOf": [ + { + "$ref": "#/$defs/ConformerConfig" + } + ], + "description": "The configuration of the decoder module." + }, + "variance_predictors": { + "allOf": [ + { + "$ref": "#/$defs/VariancePredictors" + } + ], + "description": "Configuration for energy, duration, and pitch variance predictors." + }, + "target_text_representation_level": { + "allOf": [ + { + "$ref": "#/$defs/TargetTrainingTextRepresentationLevel" + } + ], + "default": "characters" + }, + "learn_alignment": { + "default": true, + "description": "Whether to jointly learn alignments using monotonic alignment search module (See Badlani et. al. 2021: https://arxiv.org/abs/2108.10447). If set to False, you will have to provide text/audio alignments separately before training a text-to-spec (feature prediction) model.", + "title": "Learn Alignment", + "type": "boolean" + }, + "use_global_style_token_module": { + "default": false, + "description": "Whether to use the Global Style Token (GST) module from Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End Speech Synthesis (https://arxiv.org/abs/1803.09017)", + "title": "Use Global Style Token Module", + "type": "boolean" + }, + "max_length": { + "default": 1000, + "description": "The maximum length (i.e. number of symbols) for text inputs.", + "title": "Max Length", + "type": "integer" + }, + "mel_loss": { + "allOf": [ + { + "$ref": "#/$defs/VarianceLossEnum" + } + ], + "default": "mse", + "description": "The loss function to use when calculating Mel spectrogram loss." + }, + "use_postnet": { + "default": true, + "description": "Whether to use a postnet module.", + "title": "Use Postnet", + "type": "boolean" + }, + "multilingual": { + "default": false, + "description": "Whether to train a multilingual model. For this to work, your filelist must contain a column/field for 'language' with values for each utterance.", + "title": "Multilingual", + "type": "boolean" + }, + "multispeaker": { + "default": false, + "description": "Whether to train a multispeaker model. For this to work, your filelist must contain a column/field for 'speaker' with values for each utterance.", + "title": "Multispeaker", + "type": "boolean" + } + }, + "title": "FastSpeech2ModelConfig", + "type": "object" + }, + "FastSpeech2TrainingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "batch_size": { + "default": 16, + "description": "The number of samples to include in each batch when training. If you are running out of memory, consider lowering your batch_size.", + "title": "Batch Size", + "type": "integer" + }, + "save_top_k_ckpts": { + "default": 5, + "description": "The number of checkpoints to save.", + "title": "Save Top K Ckpts", + "type": "integer" + }, + "ckpt_steps": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The interval (in steps) for saving a checkpoint. By default checkpoints are saved every epoch using the 'ckpt_epochs' hyperparameter", + "title": "Ckpt Steps" + }, + "ckpt_epochs": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "The interval (in epochs) for saving a checkpoint. You can also save checkpoints after n steps by using 'ckpt_steps'", + "title": "Ckpt Epochs" + }, + "val_check_interval": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 500, + "description": "How often to check the validation set. Pass a float in the range [0.0, 1.0] to check after a fraction of the training epoch. Pass an int to check after a fixed number of training batches.", + "title": "Val Check Interval" + }, + "check_val_every_n_epoch": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Run validation after every n epochs. Defaults to 1, but if you have a small dataset you should change this to be larger to speed up training", + "title": "Check Val Every N Epoch" + }, + "max_epochs": { + "default": 1000, + "description": "Stop training after this many epochs", + "title": "Max Epochs", + "type": "integer" + }, + "max_steps": { + "default": 100000, + "description": "Stop training after this many steps", + "title": "Max Steps", + "type": "integer" + }, + "finetune_checkpoint": { + "anyOf": [ + { + "format": "path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Automatically resume training from a checkpoint loaded from this path.", + "title": "Finetune Checkpoint" + }, + "training_filelist": { + "default": "path/to/your/preprocessed/training_filelist.psv", + "description": "The path to a filelist containing samples belonging to your training set.", + "format": "path", + "title": "Training Filelist", + "type": "string" + }, + "validation_filelist": { + "default": "path/to/your/preprocessed/validation_filelist.psv", + "description": "The path to a filelist containing samples belonging to your validation set.", + "format": "path", + "title": "Validation Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The function to use to load the filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "logger": { + "allOf": [ + { + "$ref": "#/$defs/LoggerConfig" + } + ], + "description": "The configuration for the logger." + }, + "val_data_workers": { + "default": 0, + "description": "The number of CPU workers to use when loading data during validation.", + "title": "Val Data Workers", + "type": "integer" + }, + "train_data_workers": { + "default": 4, + "description": "The number of CPU workers to use when loading data during training.", + "title": "Train Data Workers", + "type": "integer" + }, + "use_weighted_sampler": { + "default": false, + "description": "Whether to use a sampler which oversamples from the minority language or speaker class for balanced training.", + "title": "Use Weighted Sampler", + "type": "boolean" + }, + "optimizer": { + "allOf": [ + { + "$ref": "#/$defs/NoamOptimizer" + } + ], + "default": { + "learning_rate": 0.001, + "eps": 1e-08, + "weight_decay": 1e-06, + "betas": [ + 0.9, + 0.999 + ], + "name": "noam", + "warmup_steps": 1000 + }, + "description": "The optimizer to use during training." + }, + "vocoder_path": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Vocoder Path" + }, + "mel_loss_weight": { + "default": 1.0, + "description": "Multiply the spec loss by this weight", + "title": "Mel Loss Weight", + "type": "number" + }, + "postnet_loss_weight": { + "default": 1.0, + "description": "Multiply the postnet loss by this weight", + "title": "Postnet Loss Weight", + "type": "number" + }, + "pitch_loss_weight": { + "default": 0.1, + "description": "Multiply the pitch loss by this weight", + "title": "Pitch Loss Weight", + "type": "number" + }, + "energy_loss_weight": { + "default": 0.1, + "description": "Multiply the energy loss by this weight", + "title": "Energy Loss Weight", + "type": "number" + }, + "duration_loss_weight": { + "default": 0.1, + "description": "Multiply the duration loss by this weight", + "title": "Duration Loss Weight", + "type": "number" + }, + "attn_ctc_loss_weight": { + "default": 0.1, + "description": "Multiply the Attention CTC loss by this weight", + "title": "Attn Ctc Loss Weight", + "type": "number" + }, + "attn_bin_loss_weight": { + "default": 0.1, + "description": "Multiply the Attention Binarization loss by this weight", + "title": "Attn Bin Loss Weight", + "type": "number" + }, + "attn_bin_loss_warmup_epochs": { + "default": 100, + "description": "Scale the Attention Binarization loss by (current_epoch / attn_bin_loss_warmup_epochs) until the number of epochs defined by attn_bin_loss_warmup_epochs is reached.", + "minimum": 1, + "title": "Attn Bin Loss Warmup Epochs", + "type": "integer" + } + }, + "title": "FastSpeech2TrainingConfig", + "type": "object" + }, + "FeaturePredictionConfigNoContact": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact": { + "anyOf": [ + { + "$ref": "#/$defs/ContactInformation" + }, + { + "type": "null" + } + ], + "default": null + }, + "VERSION": { + "default": "1.1", + "title": "Version", + "type": "string" + }, + "model": { + "allOf": [ + { + "$ref": "#/$defs/FastSpeech2ModelConfig" + } + ], + "description": "The model configuration settings." + }, + "path_to_model_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a model configuration file.", + "title": "Path To Model Config File" + }, + "training": { + "allOf": [ + { + "$ref": "#/$defs/FastSpeech2TrainingConfig" + } + ], + "description": "The training configuration hyperparameters." + }, + "path_to_training_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a training configuration file.", + "title": "Path To Training Config File" + }, + "preprocessing": { + "allOf": [ + { + "$ref": "#/$defs/PreprocessingConfig" + } + ], + "description": "The preprocessing configuration, including information about audio settings." + }, + "path_to_preprocessing_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Preprocessing Config File" + }, + "text": { + "allOf": [ + { + "$ref": "#/$defs/TextConfig" + } + ], + "description": "The text configuration." + }, + "path_to_text_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a text configuration file.", + "title": "Path To Text Config File" + } + }, + "title": "FeaturePredictionConfigNoContact", + "type": "object" + }, + "HiFiGANConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact": { + "allOf": [ + { + "$ref": "#/$defs/ContactInformation" + } + ], + "description": "EveryVoice requires a contact name and email to help prevent misuse. Please read our Guide to understand more about the importance of misuse prevention with TTS." + }, + "VERSION": { + "default": "1.0", + "title": "Version", + "type": "string" + }, + "model": { + "allOf": [ + { + "$ref": "#/$defs/HiFiGANModelConfig" + } + ], + "description": "The model configuration settings." + }, + "path_to_model_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a model configuration file.", + "title": "Path To Model Config File" + }, + "training": { + "allOf": [ + { + "$ref": "#/$defs/HiFiGANTrainingConfig" + } + ], + "description": "The training configuration hyperparameters." + }, + "path_to_training_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a training configuration file.", + "title": "Path To Training Config File" + }, + "preprocessing": { + "allOf": [ + { + "$ref": "#/$defs/PreprocessingConfig" + } + ], + "description": "The preprocessing configuration, including information about audio settings." + }, + "path_to_preprocessing_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Preprocessing Config File" + } + }, + "required": [ + "contact" + ], + "title": "HiFiGANConfig", + "type": "object" + }, + "HiFiGANModelConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "resblock": { + "allOf": [ + { + "$ref": "#/$defs/HiFiGANResblock" + } + ], + "default": "1", + "description": "Which resblock to use. See Kong et. al. 2020: https://arxiv.org/abs/2010.05646" + }, + "upsample_rates": { + "default": [ + 8, + 8, + 2, + 2 + ], + "description": "The stride of each convolutional layer in the upsampling module.", + "items": { + "type": "integer" + }, + "title": "Upsample Rates", + "type": "array" + }, + "upsample_kernel_sizes": { + "default": [ + 16, + 16, + 4, + 4 + ], + "description": "The kernel size of each convolutional layer in the upsampling module.", + "items": { + "type": "integer" + }, + "title": "Upsample Kernel Sizes", + "type": "array" + }, + "upsample_initial_channel": { + "default": 512, + "description": "The number of dimensions to project the Mel inputs to before being passed to the resblock.", + "title": "Upsample Initial Channel", + "type": "integer" + }, + "resblock_kernel_sizes": { + "default": [ + 3, + 7, + 11 + ], + "description": "The kernel size of each convolutional layer in the resblock.", + "items": { + "type": "integer" + }, + "title": "Resblock Kernel Sizes", + "type": "array" + }, + "resblock_dilation_sizes": { + "default": [ + [ + 1, + 3, + 5 + ], + [ + 1, + 3, + 5 + ], + [ + 1, + 3, + 5 + ] + ], + "description": "The dilations of each convolution in each layer of the resblock.", + "items": { + "items": { + "type": "integer" + }, + "type": "array" + }, + "title": "Resblock Dilation Sizes", + "type": "array" + }, + "activation_function": { + "description": "The activation function to use.", + "title": "Activation Function", + "type": "string" + }, + "istft_layer": { + "default": false, + "description": "Whether to predict phase and magnitude values and use an inverse Short-Time Fourier Transform instead of predicting a waveform directly. See Kaneko et. al. 2022: https://arxiv.org/abs/2203.02395", + "title": "Istft Layer", + "type": "boolean" + }, + "msd_layers": { + "default": 3, + "description": "The number of layers to use in the Multi-Scale Discriminator.", + "title": "Msd Layers", + "type": "integer" + }, + "mpd_layers": { + "default": [ + 2, + 3, + 5, + 7, + 11 + ], + "description": "The size of each layer in the Multi-Period Discriminator.", + "items": { + "type": "integer" + }, + "title": "Mpd Layers", + "type": "array" + } + }, + "title": "HiFiGANModelConfig", + "type": "object" + }, + "HiFiGANResblock": { + "enum": [ + "1", + "2" + ], + "title": "HiFiGANResblock", + "type": "string" + }, + "HiFiGANTrainTypes": { + "enum": [ + "original", + "wgan" + ], + "title": "HiFiGANTrainTypes", + "type": "string" + }, + "HiFiGANTrainingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "batch_size": { + "default": 16, + "description": "The number of samples to include in each batch when training. If you are running out of memory, consider lowering your batch_size.", + "title": "Batch Size", + "type": "integer" + }, + "save_top_k_ckpts": { + "default": 5, + "description": "The number of checkpoints to save.", + "title": "Save Top K Ckpts", + "type": "integer" + }, + "ckpt_steps": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The interval (in steps) for saving a checkpoint. By default checkpoints are saved every epoch using the 'ckpt_epochs' hyperparameter", + "title": "Ckpt Steps" + }, + "ckpt_epochs": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "The interval (in epochs) for saving a checkpoint. You can also save checkpoints after n steps by using 'ckpt_steps'", + "title": "Ckpt Epochs" + }, + "val_check_interval": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 500, + "description": "How often to check the validation set. Pass a float in the range [0.0, 1.0] to check after a fraction of the training epoch. Pass an int to check after a fixed number of training batches.", + "title": "Val Check Interval" + }, + "check_val_every_n_epoch": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Run validation after every n epochs. Defaults to 1, but if you have a small dataset you should change this to be larger to speed up training", + "title": "Check Val Every N Epoch" + }, + "max_epochs": { + "default": 1000, + "description": "Stop training after this many epochs", + "title": "Max Epochs", + "type": "integer" + }, + "max_steps": { + "default": 100000, + "description": "Stop training after this many steps", + "title": "Max Steps", + "type": "integer" + }, + "finetune_checkpoint": { + "anyOf": [ + { + "format": "path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Automatically resume training from a checkpoint loaded from this path.", + "title": "Finetune Checkpoint" + }, + "training_filelist": { + "default": "path/to/your/preprocessed/training_filelist.psv", + "description": "The path to a filelist containing samples belonging to your training set.", + "format": "path", + "title": "Training Filelist", + "type": "string" + }, + "validation_filelist": { + "default": "path/to/your/preprocessed/validation_filelist.psv", + "description": "The path to a filelist containing samples belonging to your validation set.", + "format": "path", + "title": "Validation Filelist", + "type": "string" + }, + "filelist_loader": { + "description": "Advanced. The function to use to load the filelist.", + "title": "Filelist Loader", + "type": "string" + }, + "logger": { + "allOf": [ + { + "$ref": "#/$defs/LoggerConfig" + } + ], + "description": "The configuration for the logger." + }, + "val_data_workers": { + "default": 0, + "description": "The number of CPU workers to use when loading data during validation.", + "title": "Val Data Workers", + "type": "integer" + }, + "train_data_workers": { + "default": 4, + "description": "The number of CPU workers to use when loading data during training.", + "title": "Train Data Workers", + "type": "integer" + }, + "generator_warmup_steps": { + "default": 0, + "description": "The number of steps to run through before activating the discriminators.", + "title": "Generator Warmup Steps", + "type": "integer" + }, + "gan_type": { + "allOf": [ + { + "$ref": "#/$defs/HiFiGANTrainTypes" + } + ], + "default": "original", + "description": "The type of GAN to use. Can be set to either 'original' for a vanilla GAN, or 'wgan' for a Wasserstein GAN that clips gradients." + }, + "optimizer": { + "anyOf": [ + { + "$ref": "#/$defs/AdamOptimizer" + }, + { + "$ref": "#/$defs/AdamWOptimizer" + }, + { + "$ref": "#/$defs/RMSOptimizer" + } + ], + "description": "Configuration settings for the optimizer.", + "title": "Optimizer" + }, + "wgan_clip_value": { + "default": 0.01, + "description": "The gradient clip value when gan_type='wgan'.", + "title": "Wgan Clip Value", + "type": "number" + }, + "use_weighted_sampler": { + "default": false, + "description": "Whether to use a sampler which oversamples from the minority language or speaker class for balanced training.", + "title": "Use Weighted Sampler", + "type": "boolean" + }, + "finetune": { + "default": false, + "description": "Whether to read spectrograms from 'preprocessed/synthesized_spec' instead of 'preprocessed/spec'. This is used when finetuning a pretrained spec-to-wav (vocoder) model using the outputs of a trained text-to-spec (feature prediction network) model.", + "title": "Finetune", + "type": "boolean" + } + }, + "title": "HiFiGANTrainingConfig", + "type": "object" + }, + "LoggerConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "description": "The logger configures all the information needed for where to store your experiment's logs and checkpoints.\nThe structure of your logs will then be:\n / / \n will be generated by calling each time the LoggerConfig is constructed.", + "properties": { + "name": { + "default": "BaseExperiment", + "description": "The name of the experiment. The structure of your logs will be / / .", + "title": "Experiment Name", + "type": "string" + }, + "save_dir": { + "default": "logs_and_checkpoints", + "description": "The directory to save your checkpoints and logs to.", + "format": "path", + "title": "Save Dir", + "type": "string" + }, + "sub_dir_callable": { + "description": "The function that generates a string to call your runs - by default this is a timestamp. The structure of your logs will be / / where is a timestamp.", + "title": "Sub Dir Callable", + "type": "string" + }, + "version": { + "default": "base", + "description": "The version of your experiment. The structure of your logs will be / / .", + "title": "Version", + "type": "string" + } + }, + "title": "LoggerConfig", + "type": "object" + }, + "NoamOptimizer": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "learning_rate": { + "default": 0.0001, + "description": "The initial learning rate to use", + "title": "Learning Rate", + "type": "number" + }, + "eps": { + "default": 1e-08, + "description": "Advanced. The value of optimizer constant Epsilon, used for numerical stability.", + "title": "Eps", + "type": "number" + }, + "weight_decay": { + "default": 0.01, + "title": "Weight Decay", + "type": "number" + }, + "betas": { + "default": [ + 0.9, + 0.98 + ], + "description": "Advanced. The values of the Adam Optimizer beta coefficients.", + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "title": "Betas", + "type": "array" + }, + "name": { + "default": "noam", + "description": "The name of the optimizer to use.", + "title": "Name", + "type": "string" + }, + "warmup_steps": { + "default": 1000, + "description": "The number of steps to increase the learning rate before starting to decrease it.", + "title": "Warmup Steps", + "type": "integer" + } + }, + "title": "NoamOptimizer", + "type": "object" + }, + "PreprocessingConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "dataset": { + "default": "YourDataSet", + "description": "The name of the dataset.", + "title": "Dataset", + "type": "string" + }, + "train_split": { + "default": 0.9, + "description": "The amount of the dataset to use for training. The rest will be used as validation. Hold some of the validation set out for a test set if you are performing experiments.", + "maximum": 1.0, + "minimum": 0.0, + "title": "Train Split", + "type": "number" + }, + "dataset_split_seed": { + "default": 1234, + "description": "The seed to use when splitting the dataset into train and validation sets.", + "title": "Dataset Split Seed", + "type": "integer" + }, + "save_dir": { + "default": "preprocessed/YourDataSet", + "description": "The directory to save preprocessed files to.", + "format": "path", + "title": "Save Dir", + "type": "string" + }, + "audio": { + "allOf": [ + { + "$ref": "#/$defs/AudioConfig" + } + ], + "description": "Configuration settings for audio." + }, + "path_to_audio_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path to an audio configuration file.", + "title": "Path To Audio Config File" + }, + "source_data": { + "description": "A list of datasets.", + "items": { + "$ref": "#/$defs/Dataset" + }, + "title": "Source Data", + "type": "array" + } + }, + "title": "PreprocessingConfig", + "type": "object" + }, + "Punctuation": { + "properties": { + "exclamations": { + "default": [ + "!", + "\u00a1" + ], + "description": "Exclamation punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Exclamations", + "type": "array" + }, + "question_symbols": { + "default": [ + "?", + "\u00bf" + ], + "description": "Question/interrogative punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Question Symbols", + "type": "array" + }, + "quotemarks": { + "default": [ + "\"", + "'", + "\u201c", + "\u201d", + "\u00ab", + "\u00bb" + ], + "description": "Quotemark punctuation symbols used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Quotemarks", + "type": "array" + }, + "big_breaks": { + "default": [ + ".", + ":", + ";" + ], + "description": "Punctuation symbols indicating a 'big break' used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Big Breaks", + "type": "array" + }, + "small_breaks": { + "default": [ + ",", + "-", + "\u2014" + ], + "description": "Punctuation symbols indicating a 'small break' used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Small Breaks", + "type": "array" + }, + "ellipsis": { + "default": [ + "\u2026" + ], + "description": "Punctuation symbols indicating an ellipsis used in your datasets. Replaces these symbols with internally.", + "items": { + "type": "string" + }, + "title": "Ellipsis", + "type": "array" + } + }, + "title": "Punctuation", + "type": "object" + }, + "RMSOptimizer": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "learning_rate": { + "default": 0.0001, + "description": "The initial learning rate to use", + "title": "Learning Rate", + "type": "number" + }, + "eps": { + "default": 1e-08, + "description": "Advanced. The value of optimizer constant Epsilon, used for numerical stability.", + "title": "Eps", + "type": "number" + }, + "weight_decay": { + "default": 0.01, + "title": "Weight Decay", + "type": "number" + }, + "alpha": { + "default": 0.99, + "description": "Advanced. The value of RMSProp optimizer alpha smoothing constant.", + "title": "Alpha", + "type": "number" + }, + "name": { + "default": "rms", + "description": "The name of the optimizer to use.", + "title": "Name", + "type": "string" + } + }, + "title": "RMSOptimizer", + "type": "object" + }, + "Symbols": { + "additionalProperties": true, + "properties": { + "silence": { + "default": [ + "" + ], + "description": "The symbol(s) used to indicate silence.", + "items": { + "type": "string" + }, + "title": "Silence", + "type": "array" + }, + "punctuation": { + "allOf": [ + { + "$ref": "#/$defs/Punctuation" + } + ], + "description": "EveryVoice will combine punctuation and normalize it into a set of five permissible types of punctuation to help tractable training." + } + }, + "title": "Symbols", + "type": "object" + }, + "TargetTrainingTextRepresentationLevel": { + "enum": [ + "characters", + "phones", + "phonological_features" + ], + "title": "TargetTrainingTextRepresentationLevel", + "type": "string" + }, + "TextConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "symbols": { + "$ref": "#/$defs/Symbols" + }, + "to_replace": { + "additionalProperties": { + "type": "string" + }, + "default": {}, + "title": "To Replace", + "type": "object" + }, + "cleaners": { + "items": { + "type": "string" + }, + "title": "Cleaners", + "type": "array" + } + }, + "title": "TextConfig", + "type": "object" + }, + "VarianceLevelEnum": { + "enum": [ + "phone", + "frame" + ], + "title": "VarianceLevelEnum", + "type": "string" + }, + "VarianceLossEnum": { + "enum": [ + "mse", + "mae" + ], + "title": "VarianceLossEnum", + "type": "string" + }, + "VariancePredictorBase": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "loss": { + "allOf": [ + { + "$ref": "#/$defs/VarianceLossEnum" + } + ], + "default": "mse", + "description": "The loss function to use when calculate variance loss. Either 'mse' or 'mae'." + }, + "n_layers": { + "default": 5, + "description": "The number of layers in the variance predictor module.", + "title": "N Layers", + "type": "integer" + }, + "kernel_size": { + "default": 3, + "description": "The kernel size of each convolutional layer in the variance predictor module.", + "title": "Kernel Size", + "type": "integer" + }, + "dropout": { + "default": 0.5, + "description": "The amount of dropout to apply.", + "title": "Dropout", + "type": "number" + }, + "input_dim": { + "default": 256, + "description": "The number of hidden dimensions in the input. This must match the input_dim value declared in the encoder and decoder modules.", + "title": "Input Dim", + "type": "integer" + }, + "n_bins": { + "default": 256, + "description": "The number of bins to use in the variance predictor module.", + "title": "N Bins", + "type": "integer" + }, + "depthwise": { + "default": true, + "description": "Whether to use depthwise separable convolutions.", + "title": "Depthwise", + "type": "boolean" + } + }, + "title": "VariancePredictorBase", + "type": "object" + }, + "VariancePredictorConfig": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "loss": { + "allOf": [ + { + "$ref": "#/$defs/VarianceLossEnum" + } + ], + "default": "mse", + "description": "The loss function to use when calculate variance loss. Either 'mse' or 'mae'." + }, + "n_layers": { + "default": 5, + "description": "The number of layers in the variance predictor module.", + "title": "N Layers", + "type": "integer" + }, + "kernel_size": { + "default": 3, + "description": "The kernel size of each convolutional layer in the variance predictor module.", + "title": "Kernel Size", + "type": "integer" + }, + "dropout": { + "default": 0.5, + "description": "The amount of dropout to apply.", + "title": "Dropout", + "type": "number" + }, + "input_dim": { + "default": 256, + "description": "The number of hidden dimensions in the input. This must match the input_dim value declared in the encoder and decoder modules.", + "title": "Input Dim", + "type": "integer" + }, + "n_bins": { + "default": 256, + "description": "The number of bins to use in the variance predictor module.", + "title": "N Bins", + "type": "integer" + }, + "depthwise": { + "default": true, + "description": "Whether to use depthwise separable convolutions.", + "title": "Depthwise", + "type": "boolean" + }, + "level": { + "allOf": [ + { + "$ref": "#/$defs/VarianceLevelEnum" + } + ], + "default": "phone", + "description": "The level for the variance predictor to use. 'frame' will make predictions at the frame level. 'phone' will average predictions across all frames in each phone." + } + }, + "title": "VariancePredictorConfig", + "type": "object" + }, + "VariancePredictors": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "energy": { + "allOf": [ + { + "$ref": "#/$defs/VariancePredictorConfig" + } + ], + "description": "The variance predictor for energy" + }, + "duration": { + "allOf": [ + { + "$ref": "#/$defs/VariancePredictorBase" + } + ], + "description": "The variance predictor for duration" + }, + "pitch": { + "allOf": [ + { + "$ref": "#/$defs/VariancePredictorConfig" + } + ], + "description": "The variance predictor for pitch" + } + }, + "title": "VariancePredictors", + "type": "object" + }, + "VocoderConfigNoContact": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact": { + "anyOf": [ + { + "$ref": "#/$defs/ContactInformation" + }, + { + "type": "null" + } + ], + "default": null + }, + "VERSION": { + "default": "1.0", + "title": "Version", + "type": "string" + }, + "model": { + "allOf": [ + { + "$ref": "#/$defs/HiFiGANModelConfig" + } + ], + "description": "The model configuration settings." + }, + "path_to_model_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a model configuration file.", + "title": "Path To Model Config File" + }, + "training": { + "allOf": [ + { + "$ref": "#/$defs/HiFiGANTrainingConfig" + } + ], + "description": "The training configuration hyperparameters." + }, + "path_to_training_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a training configuration file.", + "title": "Path To Training Config File" + }, + "preprocessing": { + "allOf": [ + { + "$ref": "#/$defs/PreprocessingConfig" + } + ], + "description": "The preprocessing configuration, including information about audio settings." + }, + "path_to_preprocessing_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The path of a preprocessing configuration file.", + "title": "Path To Preprocessing Config File" + } + }, + "title": "VocoderConfigNoContact", + "type": "object" + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "contact": { + "allOf": [ + { + "$ref": "#/$defs/ContactInformation" + } + ], + "description": "EveryVoice requires a contact name and email to help prevent misuse. Please read our Guide to understand more about the importance of misuse prevention with TTS." + }, + "aligner": { + "anyOf": [ + { + "$ref": "#/$defs/DFAlignerConfig" + }, + { + "$ref": "#/$defs/AlignerConfigNoContact" + } + ], + "title": "Aligner" + }, + "path_to_aligner_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Path To Aligner Config File" + }, + "feature_prediction": { + "anyOf": [ + { + "$ref": "#/$defs/FastSpeech2Config" + }, + { + "$ref": "#/$defs/FeaturePredictionConfigNoContact" + } + ], + "title": "Feature Prediction" + }, + "path_to_feature_prediction_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Path To Feature Prediction Config File" + }, + "vocoder": { + "anyOf": [ + { + "$ref": "#/$defs/HiFiGANConfig" + }, + { + "$ref": "#/$defs/VocoderConfigNoContact" + } + ], + "title": "Vocoder" + }, + "path_to_vocoder_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Path To Vocoder Config File" + }, + "training": { + "$ref": "#/$defs/E2ETrainingConfig" + }, + "path_to_training_config_file": { + "anyOf": [ + { + "format": "file-path", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Path To Training Config File" + } + }, + "required": [ + "contact" + ], + "title": "EveryVoiceConfig", + "type": "object" +} diff --git a/everyvoice/_version.py b/everyvoice/_version.py index ec079d72..f36068aa 100644 --- a/everyvoice/_version.py +++ b/everyvoice/_version.py @@ -2,4 +2,4 @@ # [PEP 440 – Version Identification and Dependency Specification](https://peps.python.org/pep-0440/) # noqa: E501 # [Specifying Your Project’s Version](https://setuptools.pypa.io/en/latest/userguide/distribution.html) # noqa: E501 # [N!]N(.N)*[{a|b|rc}N][.postN][.devN] -VERSION = "0.2.0a1" +VERSION = "0.3.0" From ee455b3d85bf7dbc524da7c2d34940405dbc3709 Mon Sep 17 00:00:00 2001 From: Aidan Pine Date: Tue, 14 Jan 2025 13:18:43 -0800 Subject: [PATCH 6/7] fix(tests): allow model to be 1.0 or 1.1 --- everyvoice/tests/test_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/everyvoice/tests/test_model.py b/everyvoice/tests/test_model.py index fae447c5..738b0feb 100644 --- a/everyvoice/tests/test_model.py +++ b/everyvoice/tests/test_model.py @@ -404,7 +404,7 @@ def test_missing_model_version(self): torch.save(m, ckpt_fn) with mute_logger("everyvoice.config.text_config"): model = ModelType.load_from_checkpoint(ckpt_fn) - self.assertEqual(model._VERSION, "1.0") + self.assertIn(model._VERSION, ["1.0", "1.1"]) def test_newer_model_version(self): """ From 6b8cb165a98712e7347f8f0bd1b97486a65aef3a Mon Sep 17 00:00:00 2001 From: Aidan Pine Date: Mon, 20 Jan 2025 09:14:34 -0800 Subject: [PATCH 7/7] docs: add new fs2 license info to top-level license --- LICENSE | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/LICENSE b/LICENSE index 56489d71..8fce492d 100644 --- a/LICENSE +++ b/LICENSE @@ -105,6 +105,38 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +For everyvoice/model/feature_prediction/FastSpeech2_lightning/fs2/gst/attn.py: + +Copyright (c) 2019, Shigeki Karita. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +For everyvoice/model/feature_prediction/FastSpeech2_lightning/fs2/gst/model.py (sourced from ESPNet2): + +Copyright (c) 2020, Nagoya University (Tomoki Hayashi). All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + For files in everyvoice/model/aligner/DeepForcedAligner: