Skip to content

Commit

Permalink
Update mypy job to 1.1.1 (#16974)
Browse files Browse the repository at this point in the history
(cherry picked from commit 9583128)
  • Loading branch information
carmocca authored and Borda committed Mar 31, 2023
1 parent e95ac31 commit 8f9ab85
Show file tree
Hide file tree
Showing 24 changed files with 83 additions and 77 deletions.
2 changes: 1 addition & 1 deletion requirements/typing.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
mypy==0.982
mypy==1.1.1
-f https://download.pytorch.org/whl/test/cpu/torch_test.html --pre
torch==2.0.0

Expand Down
2 changes: 1 addition & 1 deletion src/lightning_app/core/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def __init__(
root: Union["LightningFlow", LightningWork],
flow_cloud_compute: Optional["CloudCompute"] = None,
log_level: str = "info",
info: frontend.AppInfo = None,
info: Optional[frontend.AppInfo] = None,
root_path: str = "",
) -> None:
"""The Lightning App, or App in short runs a tree of one or more components that interact to create end-to-end
Expand Down
2 changes: 1 addition & 1 deletion src/lightning_app/utilities/packaging/build_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def to_dict(self) -> Dict:
return {"__build_config__": asdict(self)}

@classmethod
def from_dict(cls, d: Dict) -> Self: # type: ignore[valid-type]
def from_dict(cls, d: Dict) -> Self:
return cls(**d["__build_config__"])


Expand Down
6 changes: 3 additions & 3 deletions src/lightning_fabric/plugins/collectives/collective.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,12 +111,12 @@ def destroy_group(cls, group: CollectibleGroup) -> None:
def _convert_to_native_op(cls, op: str) -> Any:
...

def setup(self, **kwargs: Any) -> Self: # type: ignore[valid-type]
def setup(self, **kwargs: Any) -> Self:
if not self.is_initialized():
self.init_group(**kwargs)
return self

def create_group(self, **kwargs: Any) -> Self: # type: ignore[valid-type]
def create_group(self, **kwargs: Any) -> Self:
"""Create a group.
This assumes that :meth:`~lightning_fabric.plugins.collectives.Collective.init_group` has been
Expand All @@ -127,7 +127,7 @@ def create_group(self, **kwargs: Any) -> Self: # type: ignore[valid-type]
self._group = self.new_group(**kwargs)
return self

def teardown(self) -> Self: # type: ignore[valid-type]
def teardown(self) -> Self:
if self._group is None:
raise RuntimeError(f"`{type(self).__name__}` does not own a group to destroy.")
self.destroy_group(self._group)
Expand Down
6 changes: 2 additions & 4 deletions src/lightning_fabric/plugins/collectives/torch_collective.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,7 @@ def barrier(self, device_ids: Optional[List[int]] = None) -> None:
def monitored_barrier(self, timeout: Optional[datetime.timedelta] = None, wait_all_ranks: bool = False) -> None:
dist.monitored_barrier(group=self.group, timeout=timeout, wait_all_ranks=wait_all_ranks)

def setup(
self, main_address: Optional[str] = None, main_port: Optional[str] = None, **kwargs: Any
) -> Self: # type: ignore[valid-type]
def setup(self, main_address: Optional[str] = None, main_port: Optional[str] = None, **kwargs: Any) -> Self:
if self.is_initialized():
return self
# maybe set addr
Expand All @@ -134,7 +132,7 @@ def setup(
os.environ.pop("MASTER_PORT", None)
return self

def teardown(self) -> Self: # type: ignore[valid-type]
def teardown(self) -> Self:
non_group_member = self.group == dist.GroupMember.NON_GROUP_MEMBER
super().teardown() # will destroy its own group
# try to destroy the default group. this should only be done by a group member to avoid race conditions,
Expand Down
14 changes: 7 additions & 7 deletions src/lightning_fabric/utilities/device_dtype_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,14 @@ def device(self) -> torch.device:

return device

def to(self, *args: Any, **kwargs: Any) -> Self: # type: ignore[valid-type]
def to(self, *args: Any, **kwargs: Any) -> Self:
"""See :meth:`torch.nn.Module.to`."""
# this converts `str` device to `torch.device`
device, dtype = torch._C._nn._parse_to(*args, **kwargs)[:2]
self.__update_properties(device=device, dtype=dtype)
return super().to(*args, **kwargs)

def cuda(self, device: Optional[Union[torch.device, int]] = None) -> Self: # type: ignore[valid-type]
def cuda(self, device: Optional[Union[torch.device, int]] = None) -> Self:
"""Moves all model parameters and buffers to the GPU. This also makes associated parameters and buffers
different objects. So it should be called before constructing optimizer if the module will live on GPU
while being optimized.
Expand All @@ -72,27 +72,27 @@ def cuda(self, device: Optional[Union[torch.device, int]] = None) -> Self: # ty
self.__update_properties(device=device)
return super().cuda(device=device)

def cpu(self) -> Self: # type: ignore[valid-type]
def cpu(self) -> Self:
"""See :meth:`torch.nn.Module.cpu`."""
self.__update_properties(device=torch.device("cpu"))
return super().cpu()

def type(self, dst_type: Union[str, torch.dtype]) -> Self: # type: ignore[valid-type]
def type(self, dst_type: Union[str, torch.dtype]) -> Self:
"""See :meth:`torch.nn.Module.type`."""
self.__update_properties(dtype=dst_type)
return super().type(dst_type=dst_type)

def float(self) -> Self: # type: ignore[valid-type]
def float(self) -> Self:
"""See :meth:`torch.nn.Module.float`."""
self.__update_properties(dtype=torch.float)
return super().float()

def double(self) -> Self: # type: ignore[valid-type]
def double(self) -> Self:
"""See :meth:`torch.nn.Module.double`."""
self.__update_properties(dtype=torch.double)
return super().double()

def half(self) -> Self: # type: ignore[valid-type]
def half(self) -> Self:
"""See :meth:`torch.nn.Module.half`."""
self.__update_properties(dtype=torch.half)
return super().half()
Expand Down
4 changes: 2 additions & 2 deletions src/lightning_fabric/utilities/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
if torch.distributed.is_available():
from torch.distributed import ProcessGroup, ReduceOp

RedOpType: TypeAlias = ReduceOp.RedOpType if _TORCH_GREATER_EQUAL_1_13 else object # type: ignore[misc]
RedOpType: TypeAlias = ReduceOp.RedOpType if _TORCH_GREATER_EQUAL_1_13 else object # type: ignore[valid-type]
else:
ProcessGroup = Any # type: ignore[assignment,misc]
ReduceOp = object # type: ignore[assignment,misc] # we are using isinstance check once
Expand Down Expand Up @@ -75,7 +75,7 @@ def step(self, epoch: Optional[int] = None) -> None:


_TORCH_LRSCHEDULER: TypeAlias = (
torch.optim.lr_scheduler.LRScheduler # type: ignore[misc]
torch.optim.lr_scheduler.LRScheduler # type: ignore[valid-type]
if _TORCH_GREATER_EQUAL_2_0
else torch.optim.lr_scheduler._LRScheduler
)
Expand Down
2 changes: 1 addition & 1 deletion src/lightning_fabric/wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def __iter__(self) -> Union[Iterator[Any], Generator[Any, None, None]]:
iterator = iter(self._dataloader)
if self._device is None:
yield from iterator
return
return None

for item in iterator:
yield move_data_to_device(item, self._device)
Expand Down
1 change: 1 addition & 0 deletions src/pytorch_lightning/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

- Removed registration of `ShardedTensor` state dict hooks in `LightningModule.__init__` with `torch>=2.1` ([#16892](https://github.com/Lightning-AI/lightning/pull/16892))

- Removed the `lightning.pytorch.core.saving.ModelIO` class interface ([#16974](https://github.com/Lightning-AI/lightning/pull/16974))


### Fixed
Expand Down
4 changes: 2 additions & 2 deletions src/pytorch_lightning/_graveyard/legacy_import_unpickler.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,13 @@ def compare_version(package: str, op: Callable, version: str, use_base_version:
# https://github.com/Lightning-AI/metrics/blob/v0.7.3/torchmetrics/metric.py#L96
try:
if hasattr(torchmetrics.utilities.imports, "_compare_version"):
torchmetrics.utilities.imports._compare_version = compare_version # type: ignore
torchmetrics.utilities.imports._compare_version = compare_version # type: ignore[assignment]
except AttributeError:
pass

try:
if hasattr(torchmetrics.metric, "_compare_version"):
torchmetrics.metric._compare_version = compare_version # type: ignore
torchmetrics.metric._compare_version = compare_version
except AttributeError:
pass
pickle.Unpickler = RedirectingUnpickler # type: ignore
10 changes: 5 additions & 5 deletions src/pytorch_lightning/callbacks/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ def _prepare_model(self, model: "pl.LightningModule") -> None:
# manually specify where tensors will be converted from quantized
# to floating point in the quantized model
self.__module_forward = model.forward
model.forward = wrap_qat_forward_context( # type: ignore [assignment]
model.forward = wrap_qat_forward_context( # type: ignore[method-assign]
quant_cb=self, model=model, func=model.forward, trigger_condition=self._collect_quantization
)

Expand All @@ -254,7 +254,7 @@ def _prepare_model(self, model: "pl.LightningModule") -> None:
)

elif isinstance(self._qconfig, QConfig):
model.qconfig = self._qconfig # type: ignore [assignment]
model.qconfig = self._qconfig # type: ignore[assignment]

if self._check_feasible_fuse(model):
fuse_modules(model, self._modules_to_fuse, inplace=True)
Expand All @@ -274,7 +274,7 @@ def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -

def on_fit_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if not self._convert_on_fit_end:
pl_module.forward = self.__module_forward # type: ignore [assignment]
pl_module.forward = self.__module_forward # type: ignore[method-assign]
return
pl_module.eval()
# Convert the observed model to a quantized model. This does several things:
Expand All @@ -284,12 +284,12 @@ def on_fit_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") ->
torch.quantization.convert(pl_module, inplace=True)
# check we shall preserve wrapper
if self._input_compatible:
pl_module.forward = wrap_quantize_forward_context( # type: ignore [assignment]
pl_module.forward = wrap_quantize_forward_context( # type: ignore[method-assign]
model=pl_module,
func=self.__module_forward,
)
else:
pl_module.forward = self.__module_forward # type: ignore [assignment]
pl_module.forward = self.__module_forward # type: ignore[method-assign]

def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if "train" in self._observer_disabled_stages:
Expand Down
15 changes: 8 additions & 7 deletions src/pytorch_lightning/core/datamodule.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
"""LightningDataModule for loading DataLoaders with ease."""
import inspect
from argparse import ArgumentParser, Namespace
from typing import Any, Dict, IO, List, Mapping, Optional, Sequence, Tuple, Union
from typing import Any, cast, Dict, IO, List, Mapping, Optional, Sequence, Tuple, Union

from torch.utils.data import DataLoader, Dataset, IterableDataset
from typing_extensions import Self
Expand Down Expand Up @@ -188,13 +188,13 @@ def predict_dataloader() -> EVAL_DATALOADERS:

datamodule = cls(**datamodule_kwargs, **special_kwargs)
if train_dataset is not None:
datamodule.train_dataloader = train_dataloader # type: ignore[assignment]
datamodule.train_dataloader = train_dataloader # type: ignore[method-assign]
if val_dataset is not None:
datamodule.val_dataloader = val_dataloader # type: ignore[assignment]
datamodule.val_dataloader = val_dataloader # type: ignore[method-assign]
if test_dataset is not None:
datamodule.test_dataloader = test_dataloader # type: ignore[assignment]
datamodule.test_dataloader = test_dataloader # type: ignore[method-assign]
if predict_dataset is not None:
datamodule.predict_dataloader = predict_dataloader # type: ignore[assignment]
datamodule.predict_dataloader = predict_dataloader # type: ignore[method-assign]
return datamodule

def state_dict(self) -> Dict[str, Any]:
Expand All @@ -219,7 +219,7 @@ def load_from_checkpoint(
checkpoint_path: Union[_PATH, IO],
hparams_file: Optional[_PATH] = None,
**kwargs: Any,
) -> Self: # type: ignore[valid-type]
) -> Self:
r"""
Primary way of loading a datamodule from a checkpoint. When Lightning saves a checkpoint
it stores the arguments passed to ``__init__`` in the checkpoint under ``"datamodule_hyper_parameters"``.
Expand Down Expand Up @@ -273,11 +273,12 @@ def load_from_checkpoint(
)
"""
return _load_from_checkpoint(
loaded = _load_from_checkpoint(
cls,
checkpoint_path,
map_location=None,
hparams_file=hparams_file,
strict=None,
**kwargs,
)
return cast(Self, loaded)
24 changes: 12 additions & 12 deletions src/pytorch_lightning/core/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -512,7 +512,7 @@ def log(
add_dataloader_idx=add_dataloader_idx,
batch_size=batch_size,
sync_dist=sync_dist and _distributed_available(),
sync_dist_fn=self.trainer.strategy.reduce or _sync_ddp,
sync_dist_fn=self.trainer.strategy.reduce or _sync_ddp, # type: ignore[truthy-function]
sync_dist_group=sync_dist_group,
metric_attribute=metric_attribute,
rank_zero_only=rank_zero_only,
Expand Down Expand Up @@ -754,7 +754,7 @@ def training_step(self, batch, batch_idx, hiddens):
"""
rank_zero_warn("`training_step` must be implemented to be used with the Lightning Trainer")

def training_step_end(self, step_output: STEP_OUTPUT) -> STEP_OUTPUT:
def training_step_end(self, step_output: STEP_OUTPUT) -> STEP_OUTPUT: # type: ignore[empty-body]
"""Use this when training with dp because :meth:`training_step` will operate on only part of the batch.
However, this is still optional and only needed for things like softmax or NCE loss.
Expand Down Expand Up @@ -2082,11 +2082,11 @@ def from_compiled(cls, model: "torch._dynamo.OptimizedModule") -> "pl.LightningM
"original_predict_step": orig_module.predict_step,
}

orig_module.forward = model.dynamo_ctx(orig_module.forward) # type: ignore[assignment]
orig_module.training_step = model.dynamo_ctx(orig_module.training_step) # type: ignore[assignment]
orig_module.validation_step = model.dynamo_ctx(orig_module.validation_step) # type: ignore[assignment]
orig_module.test_step = model.dynamo_ctx(orig_module.test_step) # type: ignore[assignment]
orig_module.predict_step = model.dynamo_ctx(orig_module.predict_step) # type: ignore[assignment]
orig_module.forward = model.dynamo_ctx(orig_module.forward) # type: ignore[method-assign]
orig_module.training_step = model.dynamo_ctx(orig_module.training_step) # type: ignore[method-assign]
orig_module.validation_step = model.dynamo_ctx(orig_module.validation_step) # type: ignore[method-assign]
orig_module.test_step = model.dynamo_ctx(orig_module.test_step) # type: ignore[method-assign]
orig_module.predict_step = model.dynamo_ctx(orig_module.predict_step) # type: ignore[method-assign]
return orig_module

@classmethod
Expand Down Expand Up @@ -2116,11 +2116,11 @@ def to_uncompiled(cls, model: Union["pl.LightningModule", "torch._dynamo.Optimiz
else:
raise ValueError("`model` must either be an instance of OptimizedModule or LightningModule")

model.forward = model._compiler_ctx["original_forward"] # type: ignore[assignment,index]
model.training_step = model._compiler_ctx["original_training_step"] # type: ignore[assignment,index]
model.validation_step = model._compiler_ctx["original_validation_step"] # type: ignore[assignment,index]
model.test_step = model._compiler_ctx["original_test_step"] # type: ignore[assignment,index]
model.predict_step = model._compiler_ctx["original_predict_step"] # type: ignore[assignment,index]
model.forward = model._compiler_ctx["original_forward"] # type: ignore[method-assign,index]
model.training_step = model._compiler_ctx["original_training_step"] # type: ignore[method-assign,index]
model.validation_step = model._compiler_ctx["original_validation_step"] # type: ignore[method-assign,index]
model.test_step = model._compiler_ctx["original_test_step"] # type: ignore[method-assign,index]
model.predict_step = model._compiler_ctx["original_predict_step"] # type: ignore[method-assign,index]
model._compiler_ctx = None

return model # type: ignore[return-value]
Expand Down
4 changes: 2 additions & 2 deletions src/pytorch_lightning/core/saving.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def load_from_checkpoint(
hparams_file: Optional[_PATH] = None,
strict: bool = True,
**kwargs: Any,
) -> Self: # type: ignore[valid-type]
) -> Self:
r"""
Primary way of loading a model from a checkpoint. When Lightning saves a checkpoint
it stores the arguments passed to ``__init__`` in the checkpoint under ``"hyper_parameters"``.
Expand Down Expand Up @@ -136,7 +136,7 @@ def load_from_checkpoint(
pretrained_model.freeze()
y_hat = pretrained_model(x)
"""
return _load_from_checkpoint(
return _load_from_checkpoint( # type: ignore[return-value]
cls,
checkpoint_path,
map_location,
Expand Down
4 changes: 2 additions & 2 deletions src/pytorch_lightning/loops/loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def replace(self, **loops: Union["Loop", Type["Loop"]]) -> None:
# connect to self
self.connect(**new_loops)

def on_skip(self) -> T:
def on_skip(self) -> T: # type: ignore[empty-body]
"""The function to run when :meth:`run` should be skipped, determined by the condition in :attr:`skip`.
Returns:
Expand Down Expand Up @@ -248,7 +248,7 @@ def advance(self, iterator):
def on_advance_end(self) -> None:
"""Hook to be called each time after :attr:`advance` is called."""

def on_run_end(self) -> T:
def on_run_end(self) -> T: # type: ignore[empty-body]
"""Hook to be called at the end of the run.
Its return argument is returned from :attr:`run`.
Expand Down
8 changes: 4 additions & 4 deletions src/pytorch_lightning/serve/servable_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ def configure_response(self):
assert serve_cb.resp.json() == {"output": [0, 1]}
"""

def configure_payload(self) -> Dict[str, Any]:
def configure_payload(self) -> Dict[str, Any]: # type: ignore[empty-body]
"""Returns a request payload as a dictionary."""

def configure_serialization(self) -> Tuple[Dict[str, Callable], Dict[str, Callable]]:
def configure_serialization(self) -> Tuple[Dict[str, Callable], Dict[str, Callable]]: # type: ignore[empty-body]
"""Returns a tuple of dictionaries.
The first dictionary contains the name of the ``serve_step`` input variables name as its keys
Expand All @@ -69,7 +69,7 @@ def configure_serialization(self) -> Tuple[Dict[str, Callable], Dict[str, Callab
and the associated serialization function (e.g function to convert a tensors into payload).
"""

def serve_step(self, *args: Tensor, **kwargs: Tensor) -> Dict[str, Tensor]:
def serve_step(self, *args: Tensor, **kwargs: Tensor) -> Dict[str, Tensor]: # type: ignore[empty-body]
r"""
Returns the predictions of your model as a dictionary.
Expand All @@ -86,5 +86,5 @@ def serve_step(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
- ``dict`` - A dictionary with their associated tensors.
"""

def configure_response(self) -> Dict[str, Any]:
def configure_response(self) -> Dict[str, Any]: # type: ignore[empty-body]
"""Returns a response to validate the server response."""
4 changes: 3 additions & 1 deletion src/pytorch_lightning/strategies/hivemind.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,9 @@ def teardown(self) -> None:

if self._optimizer_zero_grad_original is not None and self.lightning_module is not None:
# re-enable `optimizer_zero_grad`
self.lightning_module.optimizer_zero_grad = self._optimizer_zero_grad_original # type: ignore[assignment]
self.lightning_module.optimizer_zero_grad = ( # type: ignore[method-assign]
self._optimizer_zero_grad_original
)

if self._opt:
self._opt.shutdown()
Expand Down
2 changes: 1 addition & 1 deletion src/pytorch_lightning/strategies/horovod.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def _unpack_lightning_optimizer(opt: Optimizer) -> Optimizer:
for config in lr_scheduler_configs:
scheduler = config.scheduler
if hasattr(scheduler, "base_lrs"):
scheduler.base_lrs = [lr * self.world_size for lr in scheduler.base_lrs] # type: ignore[union-attr]
scheduler.base_lrs = [lr * self.world_size for lr in scheduler.base_lrs]

assert self.lightning_module is not None
# Horovod: broadcast parameters & optimizer state to ensure consistent initialization
Expand Down
Loading

0 comments on commit 8f9ab85

Please sign in to comment.