From ad3a4de14d06093a469559a9899c45e7acd0a623 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 22:40:58 -0700 Subject: [PATCH] bugfix: trainer.gpus, trainer.strategy, trainer.accelerator (#7621) (#7642) * [TTS] bugfix for Tacotron2 tutorial due to PTL 2.0 * trainer.gpus -> trainer.devices * fixed related tutorial bugs --------- Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Signed-off-by: Sasha Meister --- docs/source/asr/speaker_diarization/datasets.rst | 2 +- examples/asr/experimental/k2/align_speech_parallel.py | 2 +- examples/asr/experimental/k2/speech_to_text_bpe.py | 2 +- .../asr/speech_translation/speech_to_text_transformer.py | 2 +- .../multi_label_intent_slot_classification.py | 2 +- tutorials/tts/FastPitch_MixerTTS_Training.ipynb | 5 +++-- tutorials/tts/Tacotron2_Training.ipynb | 4 ++-- tutorials/tts/Vits_Training.ipynb | 2 +- 8 files changed, 11 insertions(+), 10 deletions(-) diff --git a/docs/source/asr/speaker_diarization/datasets.rst b/docs/source/asr/speaker_diarization/datasets.rst index ff73dad8601a..9f1a43a58f11 100644 --- a/docs/source/asr/speaker_diarization/datasets.rst +++ b/docs/source/asr/speaker_diarization/datasets.rst @@ -107,7 +107,7 @@ Prepare the msdd training dataset for both train and validation. After the train .. code-block:: bash python ./multiscale_diar_decoder.py --config-path='../conf/neural_diarizer' --config-name='msdd_5scl_15_05_50Povl_256x3x32x2.yaml' \ - trainer.gpus=1 \ + trainer.devices=1 \ trainer.max_epochs=20 \ model.base.diarizer.speaker_embeddings.model_path="titanet_large" \ model.train_ds.manifest_filepath="" \ diff --git a/examples/asr/experimental/k2/align_speech_parallel.py b/examples/asr/experimental/k2/align_speech_parallel.py index dcccee48ee27..8ddf036f3e38 100644 --- a/examples/asr/experimental/k2/align_speech_parallel.py +++ b/examples/asr/experimental/k2/align_speech_parallel.py @@ -46,7 +46,7 @@ python align_speech_parallel.py \ trainer.precision=16 \ - trainer.gpus=2 \ + trainer.devices=2 \ ... You may control the dataloader's config by setting the predict_ds: diff --git a/examples/asr/experimental/k2/speech_to_text_bpe.py b/examples/asr/experimental/k2/speech_to_text_bpe.py index 5eefdfaf1fe3..ee3924c7b8ac 100644 --- a/examples/asr/experimental/k2/speech_to_text_bpe.py +++ b/examples/asr/experimental/k2/speech_to_text_bpe.py @@ -50,7 +50,7 @@ model.validation_ds.manifest_filepath= \ model.tokenizer.dir= \ model.tokenizer.type= \ - trainer.gpus=-1 \ + trainer.devices=-1 \ trainer.accelerator="ddp" \ trainer.max_epochs=100 \ model.optim.name="adamw" \ diff --git a/examples/asr/speech_translation/speech_to_text_transformer.py b/examples/asr/speech_translation/speech_to_text_transformer.py index 0c0882859b88..dce19df87a72 100644 --- a/examples/asr/speech_translation/speech_to_text_transformer.py +++ b/examples/asr/speech_translation/speech_to_text_transformer.py @@ -24,7 +24,7 @@ model.tokenizer.dir= \ model.tokenizer.model_path= \ model.tokenizer.type= \ - trainer.gpus=-1 \ + trainer.devices=-1 \ trainer.accelerator="ddp" \ trainer.max_epochs=100 \ model.optim.name="adamw" \ diff --git a/examples/nlp/intent_slot_classification/multi_label_intent_slot_classification.py b/examples/nlp/intent_slot_classification/multi_label_intent_slot_classification.py index bed58ecc43dc..2441885e2ed2 100644 --- a/examples/nlp/intent_slot_classification/multi_label_intent_slot_classification.py +++ b/examples/nlp/intent_slot_classification/multi_label_intent_slot_classification.py @@ -19,7 +19,7 @@ model.data_dir=/home/user/multiatis \ model.validation_ds.prefix=dev \ model.test_ds.prefix=dev \ - trainer.gpus=[0] \ + trainer.devices=[0] \ +trainer.fast_dev_run=true \ exp_manager.exp_dir=checkpoints diff --git a/tutorials/tts/FastPitch_MixerTTS_Training.ipynb b/tutorials/tts/FastPitch_MixerTTS_Training.ipynb index cfcd607ee93c..747ecfa43127 100644 --- a/tutorials/tts/FastPitch_MixerTTS_Training.ipynb +++ b/tutorials/tts/FastPitch_MixerTTS_Training.ipynb @@ -515,7 +515,7 @@ " model.train_ds.dataloader_params.batch_size=24 \\\n", " model.validation_ds.dataloader_params.batch_size=24 \\\n", " exp_manager.exp_dir=./fastpitch_log_dir \\\n", - " model.n_speakers=1 trainer.devices=1 trainer.strategy=null \\\n", + " model.n_speakers=1 trainer.devices=1 trainer.strategy=\"ddp_find_unused_parameters_true\" \\\n", ")" ] }, @@ -565,7 +565,8 @@ "model.train_ds.dataloader_params.num_workers=0 \\\n", "model.validation_ds.dataloader_params.num_workers=0 \\\n", "trainer.max_epochs=3 \\\n", - "trainer.strategy=null \\\n", + "trainer.accelerator=\"gpu\" \\\n", + "trainer.strategy=\"ddp_find_unused_parameters_true\" \\\n", "trainer.check_val_every_n_epoch=1" ] }, diff --git a/tutorials/tts/Tacotron2_Training.ipynb b/tutorials/tts/Tacotron2_Training.ipynb index e2ae5082e608..79546bb79db9 100644 --- a/tutorials/tts/Tacotron2_Training.ipynb +++ b/tutorials/tts/Tacotron2_Training.ipynb @@ -295,9 +295,9 @@ " train_dataset=tests/data/asr/an4_train.json \\\n", " validation_datasets=tests/data/asr/an4_val.json \\\n", " trainer.max_epochs=3 \\\n", - " trainer.accelerator=null \\\n", + " trainer.accelerator='gpu' \\\n", " trainer.check_val_every_n_epoch=1 \\\n", - " +trainer.gpus=1)" + " trainer.devices=1)" ] }, { diff --git a/tutorials/tts/Vits_Training.ipynb b/tutorials/tts/Vits_Training.ipynb index db7161c06c61..a8a7ccc76ae2 100644 --- a/tutorials/tts/Vits_Training.ipynb +++ b/tutorials/tts/Vits_Training.ipynb @@ -251,7 +251,7 @@ " num_nodes: 1\n", " devices: 2\n", " accelerator: gpu\n", - " strategy: ddp\n", + " strategy: ddp_find_unused_parameters_true\n", " precision: 32\n", " max_epochs: -1\n", " accumulate_grad_batches: 1\n",