From a813186f6463b4d3f6d73460dd1a57b2e975b803 Mon Sep 17 00:00:00 2001 From: zr_jin Date: Mon, 5 Feb 2024 12:47:52 +0800 Subject: [PATCH] minor fix for docstr and default param. (#1490) * Update train.py and README.md --- README.md | 3 +++ egs/aishell/ASR/whisper/train.py | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index cc817702b6..7700661667 100644 --- a/README.md +++ b/README.md @@ -74,6 +74,9 @@ The [LibriSpeech][librispeech] recipe supports the most comprehensive set of mod - LSTM-based Predictor - [Stateless Predictor](https://research.google/pubs/rnn-transducer-with-stateless-prediction-network/) +#### Whisper + - [OpenAi Whisper](https://arxiv.org/abs/2212.04356) (We support fine-tuning on AiShell-1.) + If you are willing to contribute to icefall, please refer to [contributing](https://icefall.readthedocs.io/en/latest/contributing/index.html) for more details. We would like to highlight the performance of some of the recipes here. diff --git a/egs/aishell/ASR/whisper/train.py b/egs/aishell/ASR/whisper/train.py index d16793eb28..073b237137 100755 --- a/egs/aishell/ASR/whisper/train.py +++ b/egs/aishell/ASR/whisper/train.py @@ -19,7 +19,7 @@ Usage: #fine-tuning with deepspeed zero stage 1 -torchrun --nproc-per-node 8 ./whisper/train.py \ +torchrun --nproc_per_node 8 ./whisper/train.py \ --max-duration 200 \ --exp-dir whisper/exp_large_v2 \ --model-name large-v2 \ @@ -28,7 +28,7 @@ --deepspeed_config ./whisper/ds_config_zero1.json # fine-tuning with ddp -torchrun --nproc-per-node 8 ./whisper/train.py \ +torchrun --nproc_per_node 8 ./whisper/train.py \ --max-duration 200 \ --exp-dir whisper/exp_medium \ --manifest-dir data/fbank_whisper \ @@ -136,7 +136,7 @@ def get_parser(): parser.add_argument( "--exp-dir", type=str, - default="pruned_transducer_stateless7/exp", + default="whisper/exp", help="""The experiment dir. It specifies the directory where all training related files, e.g., checkpoints, log, etc, are saved