diff --git a/tests/callbacks/test_callbacks.py b/tests/callbacks/test_callbacks.py index b1034ef7d7f28..374eb7a357335 100644 --- a/tests/callbacks/test_callbacks.py +++ b/tests/callbacks/test_callbacks.py @@ -166,6 +166,7 @@ def on_test_end(self, trainer, pl_module): limit_val_batches=0.1, limit_train_batches=0.2, progress_bar_refresh_rate=0, + default_root_dir=tmpdir, ) assert not test_callback.setup_called diff --git a/tests/callbacks/test_progress_bar.py b/tests/callbacks/test_progress_bar.py index f621e70228012..59f151fd5df2e 100644 --- a/tests/callbacks/test_progress_bar.py +++ b/tests/callbacks/test_progress_bar.py @@ -66,6 +66,7 @@ def test_progress_bar_totals(tmpdir): progress_bar_refresh_rate=1, limit_val_batches=1.0, max_epochs=1, + default_root_dir=tmpdir, ) bar = trainer.progress_bar_callback assert 0 == bar.total_train_batches @@ -182,6 +183,7 @@ def on_test_batch_end(self, trainer, pl_module): limit_train_batches=1.0, num_sanity_val_steps=2, max_epochs=3, + default_root_dir=tmpdir, ) assert trainer.progress_bar_callback.refresh_rate == refresh_rate diff --git a/tests/loggers/test_all.py b/tests/loggers/test_all.py index ca309f42afeee..235dcb9615625 100644 --- a/tests/loggers/test_all.py +++ b/tests/loggers/test_all.py @@ -54,6 +54,7 @@ def log_metrics(self, metrics, step): limit_train_batches=0.2, limit_val_batches=0.5, fast_dev_run=True, + default_root_dir=tmpdir, ) trainer.fit(model) diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index dfe9ffc6437fe..c78d27fb0f638 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -102,7 +102,7 @@ def test_multiple_loggers(tmpdir): assert logger2.finalized_status == "success" -def test_multiple_loggers_pickle(tmpdir): +def test_multiple_loggers_pickle(): """Verify that pickling trainer with multiple loggers works.""" logger1 = CustomLogger() diff --git a/tests/loggers/test_tensorboard.py b/tests/loggers/test_tensorboard.py index e6df2bbc1c691..bf68252f0354d 100644 --- a/tests/loggers/test_tensorboard.py +++ b/tests/loggers/test_tensorboard.py @@ -17,7 +17,10 @@ def test_tensorboard_hparams_reload(tmpdir): model = EvalModelTemplate() - trainer = Trainer(max_epochs=1, default_root_dir=tmpdir) + trainer = Trainer( + max_epochs=1, + default_root_dir=tmpdir, + ) trainer.fit(model) folder_path = trainer.logger.log_dir diff --git a/tests/models/test_amp.py b/tests/models/test_amp.py index 1c187a8188332..b70f1c6413e62 100644 --- a/tests/models/test_amp.py +++ b/tests/models/test_amp.py @@ -39,8 +39,8 @@ def test_amp_multi_gpu(tmpdir, backend): tutils.set_random_master_port() model = EvalModelTemplate() - - trainer_options = dict( + # tutils.run_model_test(trainer_options, model) + trainer = Trainer( default_root_dir=tmpdir, max_epochs=1, # gpus=2, @@ -48,9 +48,6 @@ def test_amp_multi_gpu(tmpdir, backend): distributed_backend=backend, precision=16, ) - - # tutils.run_model_test(trainer_options, model) - trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result @@ -66,17 +63,15 @@ def test_multi_gpu_wandb(tmpdir, backend): model = EvalModelTemplate() logger = WandbLogger(name='utest') - trainer_options = dict( + # tutils.run_model_test(trainer_options, model) + trainer = Trainer( default_root_dir=tmpdir, max_epochs=1, gpus=2, distributed_backend=backend, precision=16, logger=logger, - ) - # tutils.run_model_test(trainer_options, model) - trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result trainer.test(model) @@ -107,6 +102,7 @@ def test_amp_gpu_ddp_slurm_managed(tmpdir): precision=16, checkpoint_callback=checkpoint, logger=logger, + default_root_dir=tmpdir, ) trainer.is_slurm_managing_tasks = True result = trainer.fit(model) diff --git a/tests/models/test_cpu.py b/tests/models/test_cpu.py index 8160bf8c72b44..3eef3807b70c7 100644 --- a/tests/models/test_cpu.py +++ b/tests/models/test_cpu.py @@ -30,6 +30,7 @@ def test_cpu_slurm_save_load(tmpdir): limit_train_batches=0.2, limit_val_batches=0.2, checkpoint_callback=ModelCheckpoint(tmpdir), + default_root_dir=tmpdir, ) result = trainer.fit(model) real_global_step = trainer.global_step @@ -66,6 +67,7 @@ def test_cpu_slurm_save_load(tmpdir): max_epochs=1, logger=logger, checkpoint_callback=ModelCheckpoint(tmpdir), + default_root_dir=tmpdir, ) model = EvalModelTemplate(**hparams) @@ -223,6 +225,7 @@ def test_running_test_no_val(tmpdir): checkpoint_callback=checkpoint, logger=logger, early_stop_callback=False, + default_root_dir=tmpdir, ) result = trainer.fit(model) diff --git a/tests/models/test_gpu.py b/tests/models/test_gpu.py index 734478f26a7a7..bddac38f74693 100644 --- a/tests/models/test_gpu.py +++ b/tests/models/test_gpu.py @@ -39,7 +39,9 @@ def test_multi_gpu_model(tmpdir, backend): """Make sure DDP works.""" tutils.set_random_master_port() - trainer_options = dict( + model = EvalModelTemplate() + # tutils.run_model_test(trainer_options, model) + trainer = Trainer( default_root_dir=tmpdir, max_epochs=1, limit_train_batches=0.4, @@ -47,10 +49,6 @@ def test_multi_gpu_model(tmpdir, backend): gpus=[0, 1], distributed_backend=backend, ) - - model = EvalModelTemplate() - # tutils.run_model_test(trainer_options, model) - trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result @@ -64,7 +62,11 @@ def test_ddp_all_dataloaders_passed_to_fit(tmpdir): """Make sure DDP works with dataloaders passed to fit()""" tutils.set_random_master_port() - trainer_options = dict( + model = EvalModelTemplate() + fit_options = dict(train_dataloader=model.train_dataloader(), + val_dataloaders=model.val_dataloader()) + + trainer = Trainer( default_root_dir=tmpdir, progress_bar_refresh_rate=0, max_epochs=1, @@ -73,12 +75,6 @@ def test_ddp_all_dataloaders_passed_to_fit(tmpdir): gpus=[0, 1], distributed_backend='ddp' ) - - model = EvalModelTemplate() - fit_options = dict(train_dataloader=model.train_dataloader(), - val_dataloaders=model.val_dataloader()) - - trainer = Trainer(**trainer_options) result = trainer.fit(model, **fit_options) assert result == 1, "DDP doesn't work with dataloaders passed to fit()." diff --git a/tests/models/test_grad_norm.py b/tests/models/test_grad_norm.py index ff627c5088987..9f49d4d265dd2 100644 --- a/tests/models/test_grad_norm.py +++ b/tests/models/test_grad_norm.py @@ -89,6 +89,7 @@ def test_grad_tracking(tmpdir, norm_type, rtol=5e-3): logger=logger, track_grad_norm=norm_type, row_log_interval=1, # request grad_norms every batch + default_root_dir=tmpdir, ) result = trainer.fit(model) diff --git a/tests/models/test_hooks.py b/tests/models/test_hooks.py index 7d5a8849948d6..4519d666851bd 100644 --- a/tests/models/test_hooks.py +++ b/tests/models/test_hooks.py @@ -23,6 +23,7 @@ def on_before_zero_grad(self, optimizer): max_steps=max_steps, max_epochs=2, num_sanity_val_steps=5, + default_root_dir=tmpdir, ) assert 0 == model.on_before_zero_grad_called trainer.fit(model) diff --git a/tests/models/test_horovod.py b/tests/models/test_horovod.py index 5f659ade57a38..73e22c754d09f 100644 --- a/tests/models/test_horovod.py +++ b/tests/models/test_horovod.py @@ -146,7 +146,8 @@ def validation_step(self, batch, *args, **kwargs): def test_horovod_multi_optimizer(tmpdir): model = TestGAN(**EvalModelTemplate.get_default_hparams()) - trainer_options = dict( + # fit model + trainer = Trainer( default_root_dir=str(tmpdir), progress_bar_refresh_rate=0, max_epochs=1, @@ -155,9 +156,6 @@ def test_horovod_multi_optimizer(tmpdir): deterministic=True, distributed_backend='horovod', ) - - # fit model - trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result == 1, 'model failed to complete' diff --git a/tests/models/test_restore.py b/tests/models/test_restore.py index 9eb1067322127..94102084d37c9 100644 --- a/tests/models/test_restore.py +++ b/tests/models/test_restore.py @@ -38,6 +38,7 @@ def test_running_test_pretrained_model_distrib(tmpdir, backend): logger=logger, gpus=[0, 1], distributed_backend=backend, + default_root_dir=tmpdir, ) # fit model @@ -84,6 +85,7 @@ def test_running_test_pretrained_model_cpu(tmpdir): limit_val_batches=0.2, checkpoint_callback=checkpoint, logger=logger, + default_root_dir=tmpdir, ) # fit model @@ -225,14 +227,13 @@ def test_model_saving_loading(tmpdir): # logger file to get meta logger = tutils.get_default_logger(tmpdir) - trainer_options = dict( + # fit model + trainer = Trainer( max_epochs=1, logger=logger, - checkpoint_callback=ModelCheckpoint(tmpdir) + checkpoint_callback=ModelCheckpoint(tmpdir), + default_root_dir=tmpdir, ) - - # fit model - trainer = Trainer(**trainer_options) result = trainer.fit(model) # traning complete diff --git a/tests/trainer/test_dataloaders.py b/tests/trainer/test_dataloaders.py index b36eca8a2e429..4157e6c3c7ca7 100644 --- a/tests/trainer/test_dataloaders.py +++ b/tests/trainer/test_dataloaders.py @@ -232,7 +232,7 @@ def test_multiple_dataloaders_passed_to_fit(tmpdir, ckpt_path): default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.1, - limit_train_batches=0.2 + limit_train_batches=0.2, ) fit_options = dict(train_dataloader=model.dataloader(train=True), val_dataloaders=[model.dataloader(train=False), @@ -336,7 +336,7 @@ def test_mixing_of_dataloader_options(tmpdir, ckpt_path): default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.1, - limit_train_batches=0.2 + limit_train_batches=0.2, ) # fit model @@ -453,13 +453,6 @@ def test_warning_with_few_workers(tmpdir, ckpt_path): model = EvalModelTemplate() # logger file to get meta - trainer_options = dict( - default_root_dir=tmpdir, - max_epochs=1, - limit_val_batches=0.1, - limit_train_batches=0.2 - ) - train_dl = model.dataloader(train=True) train_dl.num_workers = 0 @@ -471,7 +464,12 @@ def test_warning_with_few_workers(tmpdir, ckpt_path): fit_options = dict(train_dataloader=train_dl, val_dataloaders=val_dl) - trainer = Trainer(**trainer_options) + trainer = Trainer( + default_root_dir=tmpdir, + max_epochs=1, + limit_val_batches=0.1, + limit_train_batches=0.2, + ) # fit model with pytest.warns(UserWarning, match='train'): @@ -488,7 +486,7 @@ def test_warning_with_few_workers(tmpdir, ckpt_path): @pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs') -def test_dataloader_reinit_for_subclass(): +def test_dataloader_reinit_for_subclass(tmpdir): class CustomDataLoader(torch.utils.data.DataLoader): def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, @@ -505,6 +503,7 @@ def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, gpus=[0, 1], num_nodes=1, distributed_backend='ddp', + default_root_dir=tmpdir, ) class CustomDummyObj: @@ -577,6 +576,7 @@ def train_dataloader(self): limit_train_batches=0.1, limit_val_batches=0, gpus=num_gpus, + default_root_dir=tmpdir, ) # we expect the reduction for the metrics also to happen on the last batch diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 68b41d65471b0..cb7413f0950e3 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -168,11 +168,13 @@ def _optimizer_step(self, epoch, batch_idx, optimizer, model = EvalModelTemplate() schedule = {1: 2, 3: 4} - trainer = Trainer(accumulate_grad_batches=schedule, - limit_train_batches=0.1, - limit_val_batches=0.1, - max_epochs=2, - default_root_dir=tmpdir) + trainer = Trainer( + accumulate_grad_batches=schedule, + limit_train_batches=0.1, + limit_val_batches=0.1, + max_epochs=2, + default_root_dir=tmpdir, + ) # for the test trainer.optimizer_step = _optimizer_step @@ -435,7 +437,7 @@ def test_trainer_max_steps_and_epochs(tmpdir): trainer_options.update( default_root_dir=tmpdir, max_epochs=3, - max_steps=num_train_samples + 10 + max_steps=num_train_samples + 10, ) # fit model @@ -449,7 +451,7 @@ def test_trainer_max_steps_and_epochs(tmpdir): # define less train epochs than steps trainer_options.update( max_epochs=2, - max_steps=trainer_options['max_epochs'] * 2 * num_train_samples + max_steps=trainer_options['max_epochs'] * 2 * num_train_samples, ) # fit model @@ -472,7 +474,7 @@ def test_trainer_min_steps_and_epochs(tmpdir): early_stop_callback=EarlyStopping(monitor='val_loss', min_delta=1.0), val_check_interval=2, min_epochs=1, - max_epochs=7 + max_epochs=7, ) # define less min steps than 1 epoch