diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index d0290f93249..38fc417204c 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -199,21 +199,21 @@ def forward(self, image): class ToPILImage: - """Convert a tensor or an ndarray to PIL Image - this does not scale values. + """Convert a tensor or an ndarray to PIL Image This transform does not support torchscript. Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape - H x W x C to a PIL Image while preserving the value range. + H x W x C to a PIL Image while adjusting the value range depending on the ``mode``. Args: mode (`PIL.Image mode`_): color space and pixel depth of input data (optional). If ``mode`` is ``None`` (default) there are some assumptions made about the input data: + - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``. - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``. - If the input has 2 channels, the ``mode`` is assumed to be ``LA``. - - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``, - ``short``). + - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``, ``short``). .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes """