From 20e9d22935da624e5c636f31594f2e3845284e8b Mon Sep 17 00:00:00 2001 From: Nithin Holla Date: Wed, 14 Apr 2021 13:52:06 +0200 Subject: [PATCH] Save the Wav2Vec2 processor before training starts (#10910) Co-authored-by: nithin19 --- examples/research_projects/wav2vec2/run_common_voice.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/examples/research_projects/wav2vec2/run_common_voice.py b/examples/research_projects/wav2vec2/run_common_voice.py index 426de3729206a0..0f89dcf2b47f04 100644 --- a/examples/research_projects/wav2vec2/run_common_voice.py +++ b/examples/research_projects/wav2vec2/run_common_voice.py @@ -476,13 +476,14 @@ def compute_metrics(pred): checkpoint = model_args.model_name_or_path else: checkpoint = None - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() - # save the feature_extractor and the tokenizer + # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank): processor.save_pretrained(training_args.output_dir) + train_result = trainer.train(resume_from_checkpoint=checkpoint) + trainer.save_model() + metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)