From 32476344b8bb42fdfac49d6257f8441f7dab315f Mon Sep 17 00:00:00 2001 From: Abi See Date: Thu, 29 Oct 2020 20:38:22 +0000 Subject: [PATCH 1/4] make sure that logging_first_step evaluates --- src/transformers/trainer_callback.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/transformers/trainer_callback.py b/src/transformers/trainer_callback.py index 01be518da1d0..6610f399163d 100644 --- a/src/transformers/trainer_callback.py +++ b/src/transformers/trainer_callback.py @@ -397,6 +397,7 @@ def on_step_end(self, args: TrainingArguments, state: TrainerState, control: Tra # Log if state.global_step == 1 and args.logging_first_step: control.should_log = True + control.should_evaluate = True if args.logging_steps > 0 and state.global_step % args.logging_steps == 0: control.should_log = True From 1331a45eeef6e95ef987f8d38cc97487e989c6e5 Mon Sep 17 00:00:00 2001 From: Abi See Date: Thu, 29 Oct 2020 20:38:41 +0000 Subject: [PATCH 2/4] fix bug with incorrect loss on logging_first_step --- src/transformers/trainer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 2ac88349484c..e19ebe78d2f4 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -729,6 +729,7 @@ def train(self, model_path: Optional[str] = None, trial: Union["optuna.Trial", D tr_loss = torch.tensor(0.0).to(self.args.device) self._logging_loss_scalar = 0 + self._globalstep_last_logged = 0 self._total_flos = self.state.total_flos model.zero_grad() @@ -849,7 +850,7 @@ def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch): if self.control.should_log: logs: Dict[str, float] = {} tr_loss_scalar = tr_loss.item() - logs["loss"] = (tr_loss_scalar - self._logging_loss_scalar) / self.args.logging_steps + logs["loss"] = (tr_loss_scalar - self._logging_loss_scalar) / (self.state.global_step - self._globalstep_last_logged) # backward compatibility for pytorch schedulers logs["learning_rate"] = ( self.lr_scheduler.get_last_lr()[0] @@ -857,6 +858,7 @@ def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch): else self.lr_scheduler.get_lr()[0] ) self._logging_loss_scalar = tr_loss_scalar + self._globalstep_last_logged = self.state.global_step self.log(logs) From 6c0131a15356d66785cf2523af7bf56c3704174f Mon Sep 17 00:00:00 2001 From: Abigail See Date: Fri, 30 Oct 2020 10:12:53 -0700 Subject: [PATCH 3/4] fix style --- src/transformers/trainer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index e19ebe78d2f4..bbe10a5ee30e 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -850,7 +850,9 @@ def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch): if self.control.should_log: logs: Dict[str, float] = {} tr_loss_scalar = tr_loss.item() - logs["loss"] = (tr_loss_scalar - self._logging_loss_scalar) / (self.state.global_step - self._globalstep_last_logged) + logs["loss"] = (tr_loss_scalar - self._logging_loss_scalar) / ( + self.state.global_step - self._globalstep_last_logged + ) # backward compatibility for pytorch schedulers logs["learning_rate"] = ( self.lr_scheduler.get_last_lr()[0] From e245ccdc0a97e551b1332b273c6a189e14385df2 Mon Sep 17 00:00:00 2001 From: Abi See Date: Fri, 30 Oct 2020 20:05:02 +0000 Subject: [PATCH 4/4] logging_first_step only logs, not evals --- src/transformers/trainer_callback.py | 1 - src/transformers/training_args.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/transformers/trainer_callback.py b/src/transformers/trainer_callback.py index 6610f399163d..01be518da1d0 100644 --- a/src/transformers/trainer_callback.py +++ b/src/transformers/trainer_callback.py @@ -397,7 +397,6 @@ def on_step_end(self, args: TrainingArguments, state: TrainerState, control: Tra # Log if state.global_step == 1 and args.logging_first_step: control.should_log = True - control.should_evaluate = True if args.logging_steps > 0 and state.global_step % args.logging_steps == 0: control.should_log = True diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 2fc6ffd71038..2442519084a6 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -250,7 +250,7 @@ class TrainingArguments: warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."}) - logging_first_step: bool = field(default=False, metadata={"help": "Log and eval the first global_step"}) + logging_first_step: bool = field(default=False, metadata={"help": "Log the first global_step"}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."}) save_total_limit: Optional[int] = field(