From 9b5d80ad15dde571827c04fa45aadb5895a00a23 Mon Sep 17 00:00:00 2001 From: Zhilin Wang Date: Mon, 16 Jan 2023 21:49:27 -0800 Subject: [PATCH] Fix transducer and question answering tutorial bugs bugs (#5809) --- tutorials/asr/Intro_to_Transducers.ipynb | 4 ++-- tutorials/nlp/Question_Answering.ipynb | 13 +++++++++---- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/tutorials/asr/Intro_to_Transducers.ipynb b/tutorials/asr/Intro_to_Transducers.ipynb index a82a4804ca56..8026e6ddf546 100644 --- a/tutorials/asr/Intro_to_Transducers.ipynb +++ b/tutorials/asr/Intro_to_Transducers.ipynb @@ -63,7 +63,7 @@ "\n", "# For pip based environments,\n", "# Update Numba to > 0.54\n", - "!pip install --upgrade numba==0.54.1" + "!pip install --upgrade numba>=0.54.1" ], "execution_count": null, "outputs": [] @@ -773,4 +773,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/tutorials/nlp/Question_Answering.ipynb b/tutorials/nlp/Question_Answering.ipynb index 5ce89b3baafc..e3ee40a19674 100644 --- a/tutorials/nlp/Question_Answering.ipynb +++ b/tutorials/nlp/Question_Answering.ipynb @@ -107,6 +107,7 @@ "source": [ "import os\n", "import wget\n", + "import gc\n", "\n", "import pytorch_lightning as pl\n", "from omegaconf import OmegaConf\n", @@ -116,7 +117,8 @@ "from nemo.collections.nlp.models.question_answering.qa_s2s_model import S2SQAModel\n", "from nemo.utils.exp_manager import exp_manager\n", "\n", - "pl.seed_everything(42)" + "pl.seed_everything(42)\n", + "gc.disable()" ] }, { @@ -128,8 +130,8 @@ "outputs": [], "source": [ "# set the following paths\n", - "DATA_DIR = \"\" # directory for storing datasets\n", - "WORK_DIR = \"\" # directory for storing trained models, logs, additionally downloaded scripts\n", + "DATA_DIR = \"data_dir\" # directory for storing datasets\n", + "WORK_DIR = \"work_dir\" # directory for storing trained models, logs, additionally downloaded scripts\n", "\n", "os.makedirs(DATA_DIR, exist_ok=True)\n", "os.makedirs(WORK_DIR, exist_ok=True)" @@ -542,7 +544,10 @@ "\n", "config.exp_manager.create_checkpoint_callback = True\n", "\n", - "config.model.optim.lr = 5e-5" + "config.model.optim.lr = 5e-5\n", + "\n", + "#remove vocab_file from gpt model\n", + "config.model.tokenizer.vocab_file = None" ] }, {