Skip to content

Commit

Permalink
drop deprecated TrainResult (#5323)
Browse files Browse the repository at this point in the history
* drop TrainResult

* .

* .

* .

* .

* .

* .
  • Loading branch information
Borda authored Jan 4, 2021
1 parent 2264fe6 commit af833f6
Show file tree
Hide file tree
Showing 10 changed files with 31 additions and 335 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,12 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

- `stat_scores` metric now calculates stat scores over all classes and gains new parameters, in line with the new `StatScores` metric ([#4839](https://github.com/PyTorchLightning/pytorch-lightning/pull/4839))


### Deprecated

- `stat_scores_multiple_classes` is deprecated in favor of `stat_scores` ([#4839](https://github.com/PyTorchLightning/pytorch-lightning/pull/4839))


### Removed

- Removed deprecated checkpoint argument `filepath` ([#5321](https://github.com/PyTorchLightning/pytorch-lightning/pull/5321))
Expand All @@ -38,9 +40,13 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Removed deprecated `Fbeta`, `f1_score` and `fbeta_score` metrics ([#5322](https://github.com/PyTorchLightning/pytorch-lightning/pull/5322))


- Removed deprecated `TrainResult` ([#5323](https://github.com/PyTorchLightning/pytorch-lightning/pull/5323))


### Fixed



## [1.1.0] - 2020-12-09

### Added
Expand Down
151 changes: 0 additions & 151 deletions pytorch_lightning/core/step_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -681,157 +681,6 @@ def collate_tensors(items: Union[List, Tuple]) -> Union[Tensor, List, Tuple]:
return items


class TrainResult(Result):
def __init__(
self,
minimize: Optional[Tensor] = None,
early_stop_on: Optional[Tensor] = None,
checkpoint_on: Optional[Union[Tensor, bool]] = None,
hiddens: Optional[Tensor] = None,
):
"""
Tracks internal metrics aggregations
Args:
minimize: Metric currently being minimized.
early_stop_on: Metric to early stop on.
Should be a one element tensor if combined with default
:class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping`.
If this result is returned by
:meth:`~pytorch_lightning.core.lightning.LightningModule.training_step`,
the specified value will be averaged across all steps.
checkpoint_on: Metric to checkpoint on.
Should be a one element tensor if combined with default checkpoint callback.
If this result is returned by
:meth:`~pytorch_lightning.core.lightning.LightningModule.training_step`,
the specified value will be averaged across all steps.
hiddens:
"""

super().__init__(minimize, early_stop_on, checkpoint_on, hiddens)

def log(
self,
name,
value,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = True,
on_epoch: bool = False,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a key, value
Example::
result.log('train_loss', loss)
# defaults used
result.log(
name,
value,
on_step=True,
on_epoch=False,
logger=True,
prog_bar=False,
reduce_fx=torch.mean,
enable_graph=False
)
Args:
name: key name
value: value name
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs the output of validation_step or test_step
on_epoch: if True, logs the output of the training loop aggregated
reduce_fx: Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across
sync_dist_group: the ddp group
"""
super().log(
name=name,
value=value,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
)

def log_dict(
self,
dictionary: dict,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = False,
on_epoch: bool = True,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a dictonary of values at once
Example::
values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
result.log_dict(values)
Args:
dictionary: key value pairs (str, tensors)
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs the output of validation_step or test_step
on_epoch: if True, logs the output of the training loop aggregated
reduce_fx: Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across
sync_dist_group: the ddp group:
"""
for k, v in dictionary.items():
self.log(
name=k,
value=v,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
)


class EvalResult(Result):
def __init__(
self,
Expand Down
6 changes: 1 addition & 5 deletions pytorch_lightning/overrides/data_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,7 @@ def __gather_structured_result(self, outputs):

outputs = self.gather(outputs)

# pass minimize to constructor for TrainResult
if 'minimize' in outputs:
result = original_class(outputs['minimize'])
else:
result = original_class()
result = original_class()

result.update(outputs)
result['meta'] = meta
Expand Down
34 changes: 15 additions & 19 deletions tests/base/model_train_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@

import torch

from pytorch_lightning.core.step_result import TrainResult


class TrainingStepVariations(ABC):
"""
Expand All @@ -28,27 +26,27 @@ class TrainingStepVariations(ABC):
test_step_inf_loss = float('inf')

def training_step(self, batch, batch_idx, optimizer_idx=None):
"""Lightning calls this inside the training loop"""
self.training_step_called = True

"""Lightning calls this inside the training loop"""
# forward pass
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x)

# calculate loss
loss_val = self.loss(y, y_hat)
log_val = loss_val
loss_train = self.loss(y, y_hat)
log_train = loss_train

# alternate between tensors and scalars for "log" and "progress_bar"
if batch_idx % 2 == 0:
log_val = log_val.item()
log_train = log_train.item()

output = OrderedDict(
{
'loss': loss_val,
'progress_bar': {'some_val': log_val * log_val},
'log': {'train_some_val': log_val * log_val},
'loss': loss_train,
'progress_bar': {'some_val': log_train * log_train},
'log': {'train_some_val': log_train * log_train},
}
)
return output
Expand All @@ -62,24 +60,22 @@ def training_step__inf_loss(self, batch, batch_idx, optimizer_idx=None):
output /= 0
return output

def training_step_result_obj_dp(self, batch, batch_idx, optimizer_idx=None):
def training_step__result_obj_dp(self, batch, batch_idx, optimizer_idx=None):

# forward pass
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x.to(self.device))

# calculate loss
loss_val = self.loss(y.to(y_hat.device), y_hat)
log_val = loss_val
loss_train = self.loss(y.to(y_hat.device), y_hat)
log_train = loss_train

# alternate between tensors and scalars for "log" and "progress_bar"
if batch_idx % 2 == 0:
log_val = log_val.item()

result = TrainResult(loss_val)
result.log('some_val', log_val * log_val, prog_bar=True, logger=False)
result.log('train_some_val', log_val * log_val)
log_train = log_train.item()

self.training_step_called = True
self.log('some_val', log_train * log_train, prog_bar=True, logger=False)
self.log('train_some_val', log_train * log_train)

return result
return loss_train
2 changes: 1 addition & 1 deletion tests/base/model_valid_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def validation_step(self, batch, batch_idx, *args, **kwargs):
})
return output

def validation_step_result_obj_dp(self, batch, batch_idx, *args, **kwargs):
def validation_step__result_obj_dp(self, batch, batch_idx, *args, **kwargs):
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x.to(self.device))
Expand Down
7 changes: 3 additions & 4 deletions tests/checkpointing/test_model_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -493,10 +493,9 @@ def training_step(self, batch, batch_idx):
if batch_idx % 2 == 0:
log_val = log_val.item()

result = pl.core.step_result.TrainResult(loss_val)
result.log('some_val', log_val * log_val, prog_bar=True, logger=False)
result.log('train_some_val', log_val * log_val)
return result
self.log('some_val', log_val * log_val, prog_bar=True, logger=False)
self.log('train_some_val', log_val * log_val)
return loss_val

def validation_step(self, batch, batch_idx):
y_hat = self(batch)
Expand Down
4 changes: 2 additions & 2 deletions tests/core/test_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import torch.distributed as dist
import torch.multiprocessing as mp
from pytorch_lightning import Trainer
from pytorch_lightning.core.step_result import Result, TrainResult, EvalResult
from pytorch_lightning.core.step_result import Result, EvalResult
import tests.base.develop_utils as tutils

from tests.base import EvalModelTemplate
Expand All @@ -45,7 +45,7 @@ def _ddp_test_fn(rank, worldsize, result_cls: Result):
assert res["test_tensor"].item() == dist.get_world_size(), "Result-Log does not work properly with DDP and Tensors"


@pytest.mark.parametrize("result_cls", [Result, TrainResult, EvalResult])
@pytest.mark.parametrize("result_cls", [Result, EvalResult])
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
def test_result_reduce_ddp(result_cls):
"""Make sure result logging works with DDP"""
Expand Down
Loading

0 comments on commit af833f6

Please sign in to comment.