diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 4b38c7bb92e..801df42a187 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -311,7 +311,7 @@ def normalize(tensor, mean, std, inplace=False): return tensor -def resize(img: Tensor, size: List[int], interpolation: int = 2) -> Tensor: +def resize(img: Tensor, size: List[int], interpolation: int = Image.BILINEAR) -> Tensor: r"""Resize the input image to the given size. The image can be a PIL Image or a torch Tensor, in which case it is expected to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions @@ -325,7 +325,9 @@ def resize(img: Tensor, size: List[int], interpolation: int = 2) -> Tensor: :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`. In torchscript mode padding as single int is not supported, use a tuple or list of length 1: ``[size, ]``. - interpolation (int, optional): Desired interpolation. Default is bilinear. + interpolation (int, optional): Desired interpolation enum defined by `filters`_. + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. Returns: PIL Image or Tensor: Resized image. @@ -455,7 +457,9 @@ def resized_crop( height (int): Height of the crop box. width (int): Width of the crop box. size (sequence or int): Desired output size. Same semantics as ``resize``. - interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR``. + interpolation (int, optional): Desired interpolation enum defined by `filters`_. + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. Returns: PIL Image or Tensor: Cropped image. """ diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index a403c54261f..f7d421d2b83 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -222,7 +222,9 @@ class Resize(torch.nn.Module): (size * height / width, size). In torchscript mode padding as single int is not supported, use a tuple or list of length 1: ``[size, ]``. - interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR`` + interpolation (int, optional): Desired interpolation enum defined by `filters`_. + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. """ def __init__(self, size, interpolation=Image.BILINEAR): @@ -703,7 +705,9 @@ class RandomResizedCrop(torch.nn.Module): made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]). scale (tuple of float): range of size of the origin size cropped ratio (tuple of float): range of aspect ratio of the origin aspect ratio cropped. - interpolation (int): Desired interpolation. Default: ``PIL.Image.BILINEAR`` + interpolation (int): Desired interpolation enum defined by `filters`_. + Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR`` + and ``PIL.Image.BICUBIC`` are supported. """ def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):