Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove deprecated LoggerCollection #14283

Merged
merged 10 commits into from
Sep 12, 2022
4 changes: 4 additions & 0 deletions src/pytorch_lightning/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Removed the experimental `pytorch_lightning.utiltiies.meta` functions in favor of built-in https://github.com/pytorch/torchdistx support ([#13868](https://github.com/Lightning-AI/lightning/pull/13868))


- Removed the deprecated `LoggerCollection`; `Trainer.logger` and `LightningModule.logger` now returns the first logger when more than one gets passed to the Trainer ([#14283](https://github.com/Lightning-AI/lightning/pull/14283))



### Fixed

- Fixed an assertion error when using a `ReduceOnPlateau` scheduler with the Horovod strategy ([#14215](https://github.com/Lightning-AI/lightning/pull/14215))
Expand Down
24 changes: 2 additions & 22 deletions src/pytorch_lightning/core/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
import numbers
import os
import tempfile
import warnings
import weakref
from contextlib import contextmanager
from pathlib import Path
Expand All @@ -37,7 +36,7 @@
from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin, HyperparametersMixin
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.saving import ModelIO
from pytorch_lightning.loggers import Logger, LoggerCollection
from pytorch_lightning.loggers import Logger
from pytorch_lightning.trainer.connectors.logger_connector.fx_validator import _FxValidator
from pytorch_lightning.utilities import _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_10, GradClipAlgorithmType
from pytorch_lightning.utilities.apply_func import apply_to_collection, convert_to_tensors
Expand Down Expand Up @@ -261,26 +260,7 @@ def truncated_bptt_steps(self, truncated_bptt_steps: int) -> None:
@property
def logger(self) -> Optional[Logger]:
"""Reference to the logger object in the Trainer."""
# this should match the implementation of `trainer.logger`
# we don't reuse it so we can properly set the deprecation stacklevel
if self._trainer is None:
return
loggers = self.trainer.loggers
if len(loggers) == 0:
return None
if len(loggers) == 1:
return loggers[0]
else:
if not self._running_torchscript:
rank_zero_deprecation(
"Using `lightning_module.logger` when multiple loggers are configured."
" This behavior will change in v1.8 when `LoggerCollection` is removed, and"
" `lightning_module.logger` will return the first logger available.",
stacklevel=5,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return LoggerCollection(loggers)
return self._trainer.logger if self._trainer is not None else None

@property
def loggers(self) -> List[Logger]:
Expand Down
4 changes: 2 additions & 2 deletions src/pytorch_lightning/loggers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@
from pytorch_lightning.loggers.base import LightningLoggerBase
from pytorch_lightning.loggers.comet import _COMET_AVAILABLE, CometLogger # noqa: F401
from pytorch_lightning.loggers.csv_logs import CSVLogger
from pytorch_lightning.loggers.logger import Logger, LoggerCollection
from pytorch_lightning.loggers.logger import Logger
from pytorch_lightning.loggers.mlflow import _MLFLOW_AVAILABLE, MLFlowLogger # noqa: F401
from pytorch_lightning.loggers.neptune import NeptuneLogger # noqa: F401
from pytorch_lightning.loggers.tensorboard import TensorBoardLogger
from pytorch_lightning.loggers.wandb import WandbLogger # noqa: F401

__all__ = ["CSVLogger", "LightningLoggerBase", "Logger", "LoggerCollection", "TensorBoardLogger"]
__all__ = ["CSVLogger", "LightningLoggerBase", "Logger", "TensorBoardLogger"]

if _COMET_AVAILABLE:
__all__.append("CometLogger")
Expand Down
5 changes: 0 additions & 5 deletions src/pytorch_lightning/loggers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,6 @@ def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
super().__init__(*args, **kwargs)


class LoggerCollection(logger.LoggerCollection):
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
super().__init__(*args, **kwargs)


class DummyExperiment(logger.DummyExperiment):
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
rank_zero_deprecation(
Expand Down
97 changes: 1 addition & 96 deletions src/pytorch_lightning/loggers/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from argparse import Namespace
from collections import defaultdict
from functools import wraps
from typing import Any, Callable, Dict, Generator, Iterable, List, Mapping, Optional, Sequence, Union
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Union
from weakref import ReferenceType

import numpy as np
Expand Down Expand Up @@ -212,97 +212,6 @@ def version(self) -> Optional[Union[int, str]]:
"""Return the experiment version."""


class LoggerCollection(Logger):
"""The :class:`LoggerCollection` class is used to iterate all logging actions over the given `logger_iterable`.

.. deprecated:: v1.6
`LoggerCollection` is deprecated in v1.6 and will be removed in v1.8.
Directly pass a list of loggers to the Trainer and access the list via the `trainer.loggers` attribute.

Args:
logger_iterable: An iterable collection of loggers
"""

def __init__(self, logger_iterable: Iterable[Logger]):
super().__init__()
self._logger_iterable = logger_iterable
rank_zero_deprecation(
"`LoggerCollection` is deprecated in v1.6 and will be removed in v1.8. Directly pass a list of loggers"
" to the Trainer and access the list via the `trainer.loggers` attribute."
)

def __getitem__(self, index: int) -> Logger:
return list(self._logger_iterable)[index]

def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[Checkpoint]") -> None:
for logger in self._logger_iterable:
logger.after_save_checkpoint(checkpoint_callback)

def update_agg_funcs(
self,
agg_key_funcs: Optional[Mapping[str, Callable[[Sequence[float]], float]]] = None,
agg_default_func: Callable[[Sequence[float]], float] = np.mean,
) -> None:
for logger in self._logger_iterable:
logger.update_agg_funcs(agg_key_funcs, agg_default_func)

@property
def experiment(self) -> List[Any]:
"""Returns a list of experiment objects for all the loggers in the logger collection."""
return [logger.experiment for logger in self._logger_iterable]

def agg_and_log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
for logger in self._logger_iterable:
logger.agg_and_log_metrics(metrics=metrics, step=step)

def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
for logger in self._logger_iterable:
logger.log_metrics(metrics=metrics, step=step)

def log_hyperparams(self, params: Union[Dict[str, Any], Namespace], *args: Any, **kwargs: Any) -> None:
for logger in self._logger_iterable:
logger.log_hyperparams(params, *args, **kwargs)

def log_graph(self, model: "pl.LightningModule", input_array: Optional[Tensor] = None) -> None:
for logger in self._logger_iterable:
logger.log_graph(model, input_array)

def log_text(self, *args: Any, **kwargs: Any) -> None:
for logger in self._logger_iterable:
logger.log_text(*args, **kwargs)

def log_image(self, *args: Any, **kwargs: Any) -> None:
for logger in self._logger_iterable:
logger.log_image(*args, **kwargs)

def save(self) -> None:
for logger in self._logger_iterable:
logger.save()

def finalize(self, status: str) -> None:
for logger in self._logger_iterable:
logger.finalize(status)

@property
def save_dir(self) -> Optional[str]:
"""Returns ``None`` as checkpoints should be saved to default / chosen location when using multiple
loggers."""
# Checkpoints should be saved to default / chosen location when using multiple loggers
return None

@property
def name(self) -> str:
"""Returns the unique experiment names for all the loggers in the logger collection joined by an
underscore."""
return "_".join(dict.fromkeys(str(logger.name) for logger in self._logger_iterable))

@property
def version(self) -> str:
"""Returns the unique experiment versions for all the loggers in the logger collection joined by an
underscore."""
return "_".join(dict.fromkeys(str(logger.version) for logger in self._logger_iterable))


class DummyExperiment:
"""Dummy experiment."""

Expand Down Expand Up @@ -355,10 +264,6 @@ def __getitem__(self, idx: int) -> "DummyLogger":
# enables self.logger[0].experiment.add_image(...)
return self

def __iter__(self) -> Generator[None, None, None]:
# if DummyLogger is substituting a logger collection, pretend it is empty
yield from ()

def __getattr__(self, name: str) -> Callable:
"""Allows the DummyLogger to be called with arbitrary methods, to avoid AttributeErrors."""

Expand Down
20 changes: 2 additions & 18 deletions src/pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.loggers import Logger
from pytorch_lightning.loggers.logger import DummyLogger, LoggerCollection
from pytorch_lightning.loggers.logger import DummyLogger
from pytorch_lightning.loggers.tensorboard import TensorBoardLogger
from pytorch_lightning.loops import PredictionLoop, TrainingEpochLoop
from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop
Expand Down Expand Up @@ -2674,28 +2674,12 @@ def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop

@property
def logger(self) -> Optional[Logger]:
loggers = self.loggers
if len(loggers) == 0:
return None
if len(loggers) == 1:
return loggers[0]
else:
rank_zero_deprecation(
"Using `trainer.logger` when multiple loggers are configured."
" This behavior will change in v1.8 when `LoggerCollection` is removed, and"
" `trainer.logger` will return the first logger available.",
stacklevel=5,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return LoggerCollection(loggers)
return self.loggers[0] if len(self.loggers) > 0 else None

@logger.setter
def logger(self, logger: Optional[Logger]) -> None:
if not logger:
self.loggers = []
elif isinstance(logger, LoggerCollection):
self.loggers = list(logger)
else:
self.loggers = [logger]

Expand Down
8 changes: 7 additions & 1 deletion tests/tests_pytorch/core/test_lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def test_property_logger(tmpdir):
assert model.logger is None

logger = TensorBoardLogger(tmpdir)
trainer = Mock(loggers=[logger])
trainer = Trainer(logger=logger)
model.trainer = trainer
assert model.logger == logger

Expand All @@ -94,6 +94,12 @@ def test_property_loggers(tmpdir):
model.trainer = trainer
assert model.loggers == [logger]

logger0 = TensorBoardLogger(tmpdir)
logger1 = TensorBoardLogger(tmpdir)
trainer = Trainer(logger=[logger0, logger1])
model.trainer = trainer
assert model.loggers == [logger0, logger1]


def test_1_optimizer_toggle_model():
"""Test toggle_model runs when only one optimizer is used."""
Expand Down
26 changes: 1 addition & 25 deletions tests/tests_pytorch/deprecated_api/test_remove_1-8.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.demos.boring_classes import BoringDataModule, BoringModel
from pytorch_lightning.loggers import CSVLogger, Logger, LoggerCollection
from pytorch_lightning.loggers import CSVLogger, Logger
from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin
from pytorch_lightning.profiler import AbstractProfiler, BaseProfiler
from pytorch_lightning.profilers import AdvancedProfiler, Profiler, SimpleProfiler
Expand Down Expand Up @@ -675,30 +675,6 @@ def _get_python_cprofile_total_duration(profile):
np.testing.assert_allclose(recorded_total_duration, expected_total_duration, rtol=0.2)


def test_v1_8_0_logger_collection(tmpdir):
logger1 = CSVLogger(tmpdir)
logger2 = CSVLogger(tmpdir)

trainer1 = Trainer(logger=logger1)
trainer2 = Trainer(logger=[logger1, logger2])

# Should have no deprecation warning
trainer1.logger
trainer1.loggers
trainer2.loggers

with pytest.deprecated_call(match="logger` will return the first logger"):
_ = trainer2.logger
with pytest.deprecated_call(match="`LoggerCollection` is deprecated in v1.6"):
_ = LoggerCollection([logger1, logger2])

model = BoringModel()
trainer = Trainer(logger=[logger1, logger2])
model.trainer = trainer
with pytest.deprecated_call(match="logger` will return the first logger"):
_ = model.logger


def test_v1_8_0_precision_plugin_checkpoint_hooks(tmpdir):
class PrecisionPluginSaveHook(PrecisionPlugin):
def on_save_checkpoint(self, checkpoint):
Expand Down
Loading