Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docformatter: config with black #18064

Merged
merged 16 commits into from
Aug 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
6 changes: 6 additions & 0 deletions .actions/assistant.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ def adjust(self, unfreeze: str) -> str:
'arrow>=1.2.0'
>>> _RequirementWithComment("arrow").adjust("major")
'arrow'

"""
out = str(self)
if self.strict:
Expand Down Expand Up @@ -115,6 +116,7 @@ def _parse_requirements(strs: Union[str, Iterable[str]]) -> Iterator[_Requiremen
>>> txt = '\\n'.join(txt)
>>> [r.adjust('none') for r in _parse_requirements(txt)]
['this', 'example', 'foo # strict', 'thing']

"""
lines = yield_lines(strs)
pip_argument = None
Expand Down Expand Up @@ -149,6 +151,7 @@ def load_requirements(path_dir: str, file_name: str = "base.txt", unfreeze: str
>>> path_req = os.path.join(_PROJECT_ROOT, "requirements")
>>> load_requirements(path_req, "docs.txt", unfreeze="major") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['sphinx<...]

"""
assert unfreeze in {"none", "major", "all"}
path = Path(path_dir) / file_name
Expand All @@ -165,6 +168,7 @@ def load_readme_description(path_dir: str, homepage: str, version: str) -> str:

>>> load_readme_description(_PROJECT_ROOT, "", "") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
'...PyTorch Lightning is just organized PyTorch...'

"""
path_readme = os.path.join(path_dir, "README.md")
with open(path_readme, encoding="utf-8") as fo:
Expand Down Expand Up @@ -244,6 +248,7 @@ def _load_aggregate_requirements(req_dir: str = "requirements", freeze_requireme
"""Load all base requirements from all particular packages and prune duplicates.

>>> _load_aggregate_requirements(os.path.join(_PROJECT_ROOT, "requirements"))

"""
requires = [
load_requirements(d, unfreeze="none" if freeze_requirements else "major")
Expand Down Expand Up @@ -300,6 +305,7 @@ def _replace_imports(lines: List[str], mapping: List[Tuple[str, str]], lightning
'http://pytorch_lightning.ai', \
'from lightning_fabric import __version__', \
'@lightning.ai']

"""
out = lines[:]
for source_import, target_import in mapping:
Expand Down
3 changes: 2 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ repos:
rev: v1.7.3
hooks:
- id: docformatter
args: [--in-place, --wrap-summaries=115, --wrap-descriptions=120]
additional_dependencies: [tomli]
args: ["--in-place"]

- repo: https://github.com/asottile/yesqa
rev: v1.5.0
Expand Down
2 changes: 2 additions & 0 deletions docs/source-app/examples/file_server/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ def __init__(
drive: The drive can share data inside your application.
base_dir: The local directory where the data will be stored.
chunk_size: The quantity of bytes to download/upload at once.

"""
super().__init__(
cloud_build_config=L.BuildConfig(["flask, flask-cors"]),
Expand Down Expand Up @@ -238,4 +239,5 @@ def test_file_server_in_cloud():

# 2. By calling logs = get_logs_fn(),
# you get all the logs currently on the admin page.

"""
1 change: 1 addition & 0 deletions docs/source-app/examples/github_repo_runner/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def __init__(
script_args: The arguments to be provided to the script.
requirements: The python requirements tp run the script.
cloud_compute: The object to select the cloud instance.

"""
super().__init__(
script_path=script_path,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ def __init__(self, num_users: int = 100):

Arguments:
num_users: Number of users emulated by Locust

"""
# Note: Using the default port 8089 of Locust.
super().__init__(
Expand Down
2 changes: 2 additions & 0 deletions docs/source-app/examples/model_server_app/model_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ class MLServer(LightningWork):
Example: "mlserver_sklearn.SKLearnModel".
Learn more here: $ML_SERVER_URL/tree/master/runtimes
workers: Number of server worker.

"""

def __init__(
Expand Down Expand Up @@ -51,6 +52,7 @@ def run(self, model_path: Path):

Arguments:
model_path: The path to the trained model.

"""
# 1: Use the host and port at runtime so it works in the cloud.
# $ML_SERVER_URL/blob/master/mlserver/settings.py#L50
Expand Down
1 change: 1 addition & 0 deletions examples/app/hpo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ def download_data(url: str, path: str = "data/", verbose: bool = False) -> None:

Usage:
download_file('http://web4host.net/5MB.zip')

"""
if url == "NEED_TO_BE_CREATED":
raise NotImplementedError
Expand Down
1 change: 1 addition & 0 deletions examples/app/layout/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
lightning run app examples/layout/demo.py

This starts one server for each flow that returns a UI. Access the UI at the link printed in the terminal.

"""

import os
Expand Down
13 changes: 11 additions & 2 deletions examples/fabric/build_your_own_trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ def fit(
If not specified, no validation will run.
ckpt_path: Path to previous checkpoints to resume training from.
If specified, will always look for the latest checkpoint within the given directory.

"""
self.fabric.launch()

Expand Down Expand Up @@ -207,6 +208,7 @@ def train_loop(
If greater then the number of batches in the ``train_loader``, this has no effect.
scheduler_cfg: The learning rate scheduler configuration.
Have a look at :meth:`lightning.pytorch.LightninModule.configure_optimizers` for supported values.

"""
self.fabric.call("on_train_epoch_start")
iterable = self.progbar_wrapper(
Expand Down Expand Up @@ -268,6 +270,7 @@ def val_loop(
val_loader: The dataloader yielding the validation batches.
limit_batches: Limits the batches during this validation epoch.
If greater then the number of batches in the ``val_loader``, this has no effect.

"""
# no validation if val_loader wasn't passed
if val_loader is None:
Expand Down Expand Up @@ -311,13 +314,14 @@ def val_loop(
torch.set_grad_enabled(True)

def training_step(self, model: L.LightningModule, batch: Any, batch_idx: int) -> torch.Tensor:
"""A single training step, running forward and backward. The optimizer step is called separately, as this
is given as a closure to the optimizer step.
"""A single training step, running forward and backward. The optimizer step is called separately, as this is
given as a closure to the optimizer step.

Args:
model: the lightning module to train
batch: the batch to run the forward on
batch_idx: index of the current batch w.r.t the current epoch

"""
outputs: Union[torch.Tensor, Mapping[str, Any]] = model.training_step(batch, batch_idx=batch_idx)

Expand Down Expand Up @@ -347,6 +351,7 @@ def step_scheduler(
Have a look at :meth:`lightning.pytorch.LightningModule.configure_optimizers` for supported values.
level: whether we are trying to step on epoch- or step-level
current_value: Holds the current_epoch if ``level==epoch``, else holds the ``global_step``

"""

# no scheduler
Expand Down Expand Up @@ -395,6 +400,7 @@ def progbar_wrapper(self, iterable: Iterable, total: int, **kwargs: Any):
Args:
iterable: the iterable to wrap with tqdm
total: the total length of the iterable, necessary in case the number of batches was limited.

"""
if self.fabric.is_global_zero:
return tqdm(iterable, total=total, **kwargs)
Expand All @@ -406,6 +412,7 @@ def load(self, state: Optional[Mapping], path: str) -> None:
Args:
state: a mapping contaning model, optimizer and lr scheduler
path: the path to load the checkpoint from

"""
if state is None:
state = {}
Expand Down Expand Up @@ -458,6 +465,7 @@ def _parse_optimizers_schedulers(
Args:
configure_optim_output: The output of ``configure_optimizers``.
For supported values, please refer to :meth:`lightning.pytorch.LightningModule.configure_optimizers`.

"""
_lr_sched_defaults = {"interval": "epoch", "frequency": 1, "monitor": "val_loss"}

Expand Down Expand Up @@ -511,6 +519,7 @@ def _format_iterable(
prog_bar: a progressbar (on global rank zero) or an iterable (every other rank).
candidates: the values to add as postfix strings to the progressbar.
prefix: the prefix to add to each of these values.

"""
if isinstance(prog_bar, tqdm) and candidates is not None:
postfix_str = ""
Expand Down
1 change: 1 addition & 0 deletions examples/fabric/image_classifier/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
Accelerate your training loop by setting the ``--accelerator``, ``--strategy``, ``--devices`` options directly from
the command line. See ``lightning run model --help`` or learn more from the documentation:
https://lightning.ai/docs/fabric.

"""

import argparse
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/basics/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
"""MNIST autoencoder example.

To run: python autoencoder.py --trainer.max_epochs=50

"""
from os import path
from typing import Optional, Tuple
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/basics/backbone_image_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
"""MNIST backbone image classifier example.

To run: python backbone_image_classifier.py --trainer.max_epochs=50

"""
from os import path
from typing import Optional
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/basics/profiler_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
* With PyTorch Tensorboard Profiler (Instructions are here: https://github.com/pytorch/kineto/tree/master/tb_plugin)
1. pip install tensorboard torch-tb-profiler
2. tensorboard --logdir={FOLDER}

"""

from os import path
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computer vision example on Transfer Learning. This computer vision example illustrates how one could fine-tune a
pre-trained network (by default, a ResNet50 is used) using pytorch-lightning. For the sake of this example, the
'cats and dogs dataset' (~60MB, see `DATA_URL` below) and the proposed network (denoted by `TransferLearningModel`,
see below) is trained for 15 epochs.
pre-trained network (by default, a ResNet50 is used) using pytorch-lightning. For the sake of this example, the 'cats
and dogs dataset' (~60MB, see `DATA_URL` below) and the proposed network (denoted by `TransferLearningModel`, see
below) is trained for 15 epochs.

The training consists of three stages.

Expand All @@ -37,6 +37,7 @@

To run:
python computer_vision_fine_tuning.py fit

"""

import logging
Expand Down Expand Up @@ -97,6 +98,7 @@ def __init__(self, dl_path: Union[str, Path] = "data", num_workers: int = 0, bat
dl_path: root directory where to download the data
num_workers: number of CPU workers
batch_size: number of sample in a batch

"""
super().__init__()

Expand Down Expand Up @@ -174,6 +176,7 @@ def __init__(
milestones: List of two epochs milestones
lr: Initial learning rate
lr_scheduler_gamma: Factor by which the learning rate is reduced at each milestone

"""
super().__init__()
self.backbone = backbone
Expand Down Expand Up @@ -209,6 +212,7 @@ def forward(self, x):
"""Forward pass.

Returns logits.

"""
# 1. Feature extraction:
x = self.feature_extractor(x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
After a few epochs, launch TensorBoard to see the images being generated at every batch:

tensorboard --logdir default

"""
from argparse import ArgumentParser, Namespace

Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/domain_templates/imagenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@

python imagenet.py --help
python imagenet.py fit --help

"""
import os
from typing import Optional
Expand Down
Loading