From d9bfb50f7e5085ec1da3992987b44e1b5c07c34f Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Wed, 8 Jul 2020 10:40:32 +0200 Subject: [PATCH 1/2] Minor docs improvement --- torchvision/transforms/functional.py | 12 +++++++++--- torchvision/transforms/transforms.py | 10 ++++++++-- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 4b38c7bb92e..d06b0d1463a 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -311,7 +311,7 @@ def normalize(tensor, mean, std, inplace=False): return tensor -def resize(img: Tensor, size: List[int], interpolation: int = 2) -> Tensor: +def resize(img: Tensor, size: List[int], interpolation: int = Image.BILINEAR) -> Tensor: r"""Resize the input image to the given size. The image can be a PIL Image or a torch Tensor, in which case it is expected to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions @@ -325,7 +325,10 @@ def resize(img: Tensor, size: List[int], interpolation: int = 2) -> Tensor: :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`. In torchscript mode padding as single int is not supported, use a tuple or list of length 1: ``[size, ]``. - interpolation (int, optional): Desired interpolation. Default is bilinear. + interpolation (int, optional): Desired interpolation enum defined by + `PIL resampling filters `_ . + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. Returns: PIL Image or Tensor: Resized image. @@ -455,7 +458,10 @@ def resized_crop( height (int): Height of the crop box. width (int): Width of the crop box. size (sequence or int): Desired output size. Same semantics as ``resize``. - interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR``. + interpolation (int, optional): Desired interpolation enum defined by + `PIL resampling filters `_ . + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. Returns: PIL Image or Tensor: Cropped image. """ diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index a403c54261f..a1ac24de5e3 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -222,7 +222,10 @@ class Resize(torch.nn.Module): (size * height / width, size). In torchscript mode padding as single int is not supported, use a tuple or list of length 1: ``[size, ]``. - interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR`` + interpolation (int, optional): Desired interpolation enum defined by + `PIL resampling filters `_ . + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. """ def __init__(self, size, interpolation=Image.BILINEAR): @@ -703,7 +706,10 @@ class RandomResizedCrop(torch.nn.Module): made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]). scale (tuple of float): range of size of the origin size cropped ratio (tuple of float): range of aspect ratio of the origin aspect ratio cropped. - interpolation (int): Desired interpolation. Default: ``PIL.Image.BILINEAR`` + interpolation (int): Desired interpolation enum defined by + `PIL resampling filters `_ . + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. """ def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR): From 7bbf0c1bb03cb58bc3c03d02b3b6c5322e2eb0b1 Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Wed, 8 Jul 2020 10:52:59 +0200 Subject: [PATCH 2/2] Replaced link by already defined `filters`_ --- torchvision/transforms/functional.py | 6 ++---- torchvision/transforms/transforms.py | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index d06b0d1463a..801df42a187 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -325,8 +325,7 @@ def resize(img: Tensor, size: List[int], interpolation: int = Image.BILINEAR) -> :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`. In torchscript mode padding as single int is not supported, use a tuple or list of length 1: ``[size, ]``. - interpolation (int, optional): Desired interpolation enum defined by - `PIL resampling filters `_ . + interpolation (int, optional): Desired interpolation enum defined by `filters`_. Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` and ``PIL.Image.BICUBIC`` are supported. @@ -458,8 +457,7 @@ def resized_crop( height (int): Height of the crop box. width (int): Width of the crop box. size (sequence or int): Desired output size. Same semantics as ``resize``. - interpolation (int, optional): Desired interpolation enum defined by - `PIL resampling filters `_ . + interpolation (int, optional): Desired interpolation enum defined by `filters`_. Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` and ``PIL.Image.BICUBIC`` are supported. Returns: diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index a1ac24de5e3..f7d421d2b83 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -222,8 +222,7 @@ class Resize(torch.nn.Module): (size * height / width, size). In torchscript mode padding as single int is not supported, use a tuple or list of length 1: ``[size, ]``. - interpolation (int, optional): Desired interpolation enum defined by - `PIL resampling filters `_ . + interpolation (int, optional): Desired interpolation enum defined by `filters`_. Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` and ``PIL.Image.BICUBIC`` are supported. """ @@ -706,8 +705,7 @@ class RandomResizedCrop(torch.nn.Module): made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]). scale (tuple of float): range of size of the origin size cropped ratio (tuple of float): range of aspect ratio of the origin aspect ratio cropped. - interpolation (int): Desired interpolation enum defined by - `PIL resampling filters `_ . + interpolation (int): Desired interpolation enum defined by `filters`_. Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` and ``PIL.Image.BICUBIC`` are supported. """