Skip to content

Commit

Permalink
drop
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda committed Dec 8, 2020
1 parent aeaa6b2 commit 99ff309
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 120 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -392,31 +392,14 @@ def log_train_epoch_end_metrics(
# [optimizer_idx][training_step_idx][tbptt_index]
opt_idx_outputs = epoch_output[0]

# TODO: deprecate 1.0
try:
sample_obj = opt_idx_outputs[0][0] if isinstance(opt_idx_outputs[0], list) else opt_idx_outputs[0]
is_result_obj = len(epoch_output) > 0 and isinstance(sample_obj, Result)
is_1_0_result = is_result_obj and 'extra' in sample_obj
except IndexError as e:
is_result_obj = False
is_1_0_result = False

# ------------------
# NEW 1.0.0 PATH
# ------------------
if is_1_0_result:
# lightning module hook
self.training_epoch_end(model, epoch_output, num_optimizers)

# log/aggregate metrics automatically
epoch_log_metrics, epoch_progress_bar_metrics = self.__auto_reduce_results_on_epoch_end(epoch_output)
# lightning module hook
self.training_epoch_end(model, epoch_output, num_optimizers)

# TODO: deprecate 1.0
else:
out = self.__run_legacy_training_epoch_end(
num_optimizers, epoch_output, model, is_result_obj, epoch_callback_metrics
)
epoch_log_metrics, epoch_progress_bar_metrics, epoch_callback_metrics = out
# log/aggregate metrics automatically
epoch_log_metrics, epoch_progress_bar_metrics = self.__auto_reduce_results_on_epoch_end(epoch_output)

# it will perform reduction over epoch and return log metrics
cached_epoch_log_metrics = self.cached_results.get_epoch_log_metrics()
Expand Down
17 changes: 0 additions & 17 deletions pytorch_lightning/trainer/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,23 +59,6 @@ def process_dict_result(self, output, train=False):
Separates loss from logging and progress bar metrics
"""
# --------------------
# WARN DEPRECATED KEYS
# --------------------
# TODO: 1.0.0 remove
if isinstance(output, dict):
for k, v in output.items():
if k in ['log', 'progress_bar']:
m = inspect.cleandoc(
f"""The {{{k}:dict keyword}} was deprecated in 0.9.1 and will be removed in 1.0.0
Please use self.log(...) inside the lightningModule instead.
# log on a step or aggregate epoch metric to the logger and/or progress bar
# (inside LightningModule)
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True)
""")
rank_zero_warn(m)

# --------------------------
# handle single scalar only
# --------------------------
Expand Down
83 changes: 1 addition & 82 deletions pytorch_lightning/trainer/training_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,24 +373,7 @@ def _process_training_step_output(self, training_step_output, split_batch):
return None, None

# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output

# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
# no need for these checks in 1.0.0
# TODO: remove checks in 1.0.0
is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)
is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output)
if is_1_0_output:
return self._process_training_step_output_1_0(training_step_output, split_batch)

# -----------------------------------------
# process old dict (deprecate 1.0)
# process old dict
# -----------------------------------------
training_step_output = self.trainer.process_dict_result(training_step_output, train=True)

Expand All @@ -409,70 +392,6 @@ def _process_training_step_output(self, training_step_output, split_batch):

return training_step_output_for_epoch_end, training_step_output

def _process_training_step_output_1_0(self, training_step_output, split_batch):
result = self.trainer.get_model()._results

loss = None
hiddens = None

# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
result["extra"] = training_step_output

# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
result["extra"] = {}

# map to results under the hood
result.minimize = loss
result.hiddens = hiddens

# track batch for manual reduction with result
result.track_batch_size(len(split_batch))

# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end.cpu()

# what flows back into the system
training_step_output = result

return training_step_output_for_epoch_end, training_step_output

def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)

# don't allow EvalResult in the training_step
if isinstance(training_step_output, EvalResult):
raise MisconfigurationException(
"training_step cannot return EvalResult, " "use a dict or TrainResult instead"
)

training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()

return training_step_output_for_epoch_end

def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure, *args, **kwargs):
model_ref = self.trainer.get_model()

Expand Down

0 comments on commit 99ff309

Please sign in to comment.