Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Makes automatic optimization a model attribute #4602

Merged
merged 15 commits into from
Nov 14, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions pytorch_lightning/core/lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,13 @@ def on_gpu(self):
"""
return self.device.type == "cuda"

@property
def automatic_optimization(self) -> bool:
"""
If False you are responsible for calling .backward, .step, zero_grad.
"""
return True

def print(self, *args, **kwargs) -> None:
r"""
Prints only from process 0. Use this in any distributed mode to log only once.
Expand Down
3 changes: 3 additions & 0 deletions pytorch_lightning/trainer/connectors/model_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ def copy_trainer_model_properties(self, model):
else:
ref_model = model

automatic_optimization = ref_model.automatic_optimization and self.trainer.train_loop.automatic_optimization
self.trainer.train_loop.automatic_optimization = automatic_optimization
justusschock marked this conversation as resolved.
Show resolved Hide resolved

for m in [model, ref_model]:
m.trainer = self.trainer
m.logger = self.trainer.logger
Expand Down
14 changes: 12 additions & 2 deletions pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def __init__(
amp_backend: str = 'native',
amp_level: str = 'O2',
distributed_backend: Optional[str] = None,
automatic_optimization: bool = True,
automatic_optimization: Optional[bool] = None,
move_metrics_to_cpu: bool = False,
):
r"""
Expand Down Expand Up @@ -212,7 +212,9 @@ def __init__(
log_every_n_steps: How often to log within steps (defaults to every 50 steps).

automatic_optimization: If False you are responsible for calling .backward, .step, zero_grad.
Meant to be used with multiple optimizers by advanced users.
If False you are responsible for calling .backward, .step, zero_grad in LightningModule.
This argument has been moved to LightningModule. It is deprecated here in v1.1 and
will be removed in v1.3.

prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.
Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data
Expand Down Expand Up @@ -355,6 +357,14 @@ def __init__(
)

# init train loop related flags
# TODO: deprecate in 1.2.0
if automatic_optimization is None:
automatic_optimization = True
justusschock marked this conversation as resolved.
Show resolved Hide resolved
else:
rank_zero_warn(
"Disable automatic optimization with the trainer flag is deprecated and will be removed in v1.3.0!"
"Please use the property on the LightningModule for disabling automatic optimization"
)
self.train_loop.on_trainer_init(
max_epochs,
min_epochs,
Expand Down