Skip to content

Commit

Permalink
fixed cyclic lr state dict (#1469)
Browse files Browse the repository at this point in the history
* fixed cyclic lr state dict

* fixed circular import

* local import of torch_version_is_greater_or_equal

* Fix missing function after merge

---------

Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com>
(cherry picked from commit b56fad8)
  • Loading branch information
shaydeci authored and BloodAxe committed Oct 11, 2023
1 parent fbc41a8 commit d49764f
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 2 deletions.
3 changes: 2 additions & 1 deletion src/super_gradients/training/sg_trainer/sg_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@
read_ckpt_state_dict,
load_checkpoint_to_model,
load_pretrained_weights,
get_scheduler_state,
)
from super_gradients.training.datasets.datasets_utils import DatasetStatisticsTensorboardLogger
from super_gradients.training.utils.callbacks import (
Expand Down Expand Up @@ -617,7 +618,7 @@ def _save_checkpoint(
state["processing_params"] = processing_params

if self._torch_lr_scheduler is not None:
state["torch_scheduler_state_dict"] = self._torch_lr_scheduler.state_dict()
state["torch_scheduler_state_dict"] = get_scheduler_state(self._torch_lr_scheduler)

# SAVES CURRENT MODEL AS ckpt_latest
self.sg_logger.add_checkpoint(tag="ckpt_latest.pth", state_dict=state, global_step=epoch)
Expand Down
18 changes: 17 additions & 1 deletion src/super_gradients/training/utils/checkpoint_utils.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import collections
import os
import tempfile
from typing import Union, Mapping
from typing import Union, Mapping, Dict

import pkg_resources
import torch
Expand Down Expand Up @@ -1628,3 +1628,19 @@ def _maybe_load_preprocessing_params(model: Union[nn.Module, HasPredict], checkp
"predict make sure to call set_dataset_processing_params."
)
return False


def get_scheduler_state(scheduler) -> Dict[str, Tensor]:
"""
Wrapper for getting a torch lr scheduler state dict, resolving some issues with CyclicLR
(see https://github.com/pytorch/pytorch/pull/91400)
:param scheduler: torch.optim.lr_scheduler._LRScheduler, the scheduler
:return: the scheduler's state_dict
"""
from super_gradients.training.utils import torch_version_is_greater_or_equal
from torch.optim.lr_scheduler import CyclicLR

state = scheduler.state_dict()
if isinstance(scheduler, CyclicLR) and not torch_version_is_greater_or_equal(2, 0):
del state["_scale_fn_ref"]
return state

0 comments on commit d49764f

Please sign in to comment.