Skip to content

Commit

Permalink
separate transforms v2 legacy test utils (#7842)
Browse files Browse the repository at this point in the history
  • Loading branch information
pmeier authored Aug 18, 2023
1 parent 99ebb75 commit 87d54c4
Show file tree
Hide file tree
Showing 12 changed files with 754 additions and 594 deletions.
473 changes: 4 additions & 469 deletions test/common_utils.py

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions test/prototype_common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@

import pytest
import torch

from common_utils import combinations_grid, DEFAULT_EXTRA_DIMS, from_loader, from_loaders, TensorLoader
from torch.nn.functional import one_hot

from torchvision.prototype import datapoints

from transforms_v2_legacy_utils import combinations_grid, DEFAULT_EXTRA_DIMS, from_loader, from_loaders, TensorLoader


@dataclasses.dataclass
class LabelLoader(TensorLoader):
Expand Down
26 changes: 13 additions & 13 deletions test/test_datapoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import pytest
import torch
from common_utils import assert_equal, make_bounding_box, make_image, make_segmentation_mask, make_video
from common_utils import assert_equal, make_bounding_boxes, make_image, make_segmentation_mask, make_video
from PIL import Image

from torchvision import datapoints
Expand Down Expand Up @@ -68,7 +68,7 @@ def test_new_requires_grad(data, input_requires_grad, expected_requires_grad):
assert datapoint.requires_grad is expected_requires_grad


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
def test_isinstance(make_input):
assert isinstance(make_input(), torch.Tensor)

Expand All @@ -80,7 +80,7 @@ def test_wrapping_no_copy():
assert image.data_ptr() == tensor.data_ptr()


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
def test_to_wrapping(make_input):
dp = make_input()

Expand All @@ -90,7 +90,7 @@ def test_to_wrapping(make_input):
assert dp_to.dtype is torch.float64


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
def test_to_datapoint_reference(make_input, return_type):
tensor = torch.rand((3, 16, 16), dtype=torch.float64)
Expand All @@ -104,7 +104,7 @@ def test_to_datapoint_reference(make_input, return_type):
assert type(tensor) is torch.Tensor


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
def test_clone_wrapping(make_input, return_type):
dp = make_input()
Expand All @@ -116,7 +116,7 @@ def test_clone_wrapping(make_input, return_type):
assert dp_clone.data_ptr() != dp.data_ptr()


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
def test_requires_grad__wrapping(make_input, return_type):
dp = make_input(dtype=torch.float)
Expand All @@ -131,7 +131,7 @@ def test_requires_grad__wrapping(make_input, return_type):
assert dp_requires_grad.requires_grad


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
def test_detach_wrapping(make_input, return_type):
dp = make_input(dtype=torch.float).requires_grad_(True)
Expand Down Expand Up @@ -170,7 +170,7 @@ def test_force_subclass_with_metadata(return_type):
datapoints.set_return_type("tensor")


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
def test_other_op_no_wrapping(make_input, return_type):
dp = make_input()
Expand All @@ -182,7 +182,7 @@ def test_other_op_no_wrapping(make_input, return_type):
assert type(output) is (type(dp) if return_type == "datapoint" else torch.Tensor)


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
@pytest.mark.parametrize(
"op",
[
Expand All @@ -199,7 +199,7 @@ def test_no_tensor_output_op_no_wrapping(make_input, op):
assert type(output) is not type(dp)


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
def test_inplace_op_no_wrapping(make_input, return_type):
dp = make_input()
Expand All @@ -212,7 +212,7 @@ def test_inplace_op_no_wrapping(make_input, return_type):
assert type(dp) is original_type


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
def test_wrap(make_input):
dp = make_input()

Expand All @@ -225,7 +225,7 @@ def test_wrap(make_input):
assert dp_new.data_ptr() == output.data_ptr()


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
@pytest.mark.parametrize("requires_grad", [False, True])
def test_deepcopy(make_input, requires_grad):
dp = make_input(dtype=torch.float)
Expand All @@ -242,7 +242,7 @@ def test_deepcopy(make_input, requires_grad):
assert dp_deepcopied.requires_grad is requires_grad


@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
@pytest.mark.parametrize(
"op",
Expand Down
32 changes: 16 additions & 16 deletions test/test_prototype_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,21 @@
import pytest
import torch

from common_utils import (
assert_equal,
DEFAULT_EXTRA_DIMS,
make_bounding_box,
make_detection_mask,
make_image,
make_video,
)
from common_utils import assert_equal

from prototype_common_utils import make_label

from torchvision.datapoints import BoundingBoxes, BoundingBoxFormat, Image, Mask, Video
from torchvision.prototype import datapoints, transforms
from torchvision.transforms.v2.functional import clamp_bounding_boxes, InterpolationMode, pil_to_tensor, to_pil_image
from torchvision.transforms.v2.utils import check_type, is_pure_tensor
from transforms_v2_legacy_utils import (
DEFAULT_EXTRA_DIMS,
make_bounding_boxes,
make_detection_mask,
make_image,
make_video,
)

BATCH_EXTRA_DIMS = [extra_dims for extra_dims in DEFAULT_EXTRA_DIMS if extra_dims]

Expand Down Expand Up @@ -167,7 +167,7 @@ def test__get_params(self, mocker):

flat_inputs = [
make_image(size=canvas_size, color_space="RGB"),
make_bounding_box(format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=batch_shape),
make_bounding_boxes(format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=batch_shape),
]
params = transform._get_params(flat_inputs)

Expand Down Expand Up @@ -202,7 +202,7 @@ def test__transform_culling(self, mocker):
),
)

bounding_boxes = make_bounding_box(
bounding_boxes = make_bounding_boxes(
format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(batch_size,)
)
masks = make_detection_mask(size=canvas_size, batch_dims=(batch_size,))
Expand Down Expand Up @@ -240,7 +240,7 @@ def test__transform_bounding_boxes_clamping(self, mocker):
),
)

bounding_boxes = make_bounding_box(
bounding_boxes = make_bounding_boxes(
format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(batch_size,)
)
mock = mocker.patch(
Expand Down Expand Up @@ -283,7 +283,7 @@ class TestPermuteDimensions:
def test_call(self, dims, inverse_dims):
sample = dict(
image=make_image(),
bounding_boxes=make_bounding_box(format=BoundingBoxFormat.XYXY),
bounding_boxes=make_bounding_boxes(format=BoundingBoxFormat.XYXY),
video=make_video(),
str="str",
int=0,
Expand Down Expand Up @@ -327,7 +327,7 @@ class TestTransposeDimensions:
def test_call(self, dims):
sample = dict(
image=make_image(),
bounding_boxes=make_bounding_box(format=BoundingBoxFormat.XYXY),
bounding_boxes=make_bounding_boxes(format=BoundingBoxFormat.XYXY),
video=make_video(),
str="str",
int=0,
Expand Down Expand Up @@ -389,7 +389,7 @@ def make_datapoints():

pil_image = to_pil_image(make_image(size=size, color_space="RGB"))
target = {
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"labels": make_label(extra_dims=(num_objects,), categories=80),
"masks": make_detection_mask(size=size, num_objects=num_objects, dtype=torch.long),
}
Expand All @@ -398,7 +398,7 @@ def make_datapoints():

tensor_image = torch.Tensor(make_image(size=size, color_space="RGB"))
target = {
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"labels": make_label(extra_dims=(num_objects,), categories=80),
"masks": make_detection_mask(size=size, num_objects=num_objects, dtype=torch.long),
}
Expand All @@ -407,7 +407,7 @@ def make_datapoints():

datapoint_image = make_image(size=size, color_space="RGB")
target = {
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"labels": make_label(extra_dims=(num_objects,), categories=80),
"masks": make_detection_mask(size=size, num_objects=num_objects, dtype=torch.long),
}
Expand Down
30 changes: 14 additions & 16 deletions test/test_transforms_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,25 +11,23 @@
import torch
import torchvision.transforms.v2 as transforms

from common_utils import (
assert_equal,
assert_run_python_script,
cpu_and_cuda,
make_bounding_box,
from common_utils import assert_equal, assert_run_python_script, cpu_and_cuda
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision import datapoints
from torchvision.ops.boxes import box_iou
from torchvision.transforms.functional import to_pil_image
from torchvision.transforms.v2 import functional as F
from torchvision.transforms.v2.utils import check_type, is_pure_tensor, query_chw
from transforms_v2_legacy_utils import (
make_bounding_boxes,
make_detection_mask,
make_image,
make_images,
make_multiple_bounding_boxes,
make_segmentation_mask,
make_video,
make_videos,
)
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision import datapoints
from torchvision.ops.boxes import box_iou
from torchvision.transforms.functional import to_pil_image
from torchvision.transforms.v2 import functional as F
from torchvision.transforms.v2.utils import check_type, is_pure_tensor, query_chw


def make_vanilla_tensor_images(*args, **kwargs):
Expand All @@ -45,7 +43,7 @@ def make_pil_images(*args, **kwargs):


def make_vanilla_tensor_bounding_boxes(*args, **kwargs):
for bounding_boxes in make_bounding_boxes(*args, **kwargs):
for bounding_boxes in make_multiple_bounding_boxes(*args, **kwargs):
yield bounding_boxes.data


Expand Down Expand Up @@ -180,13 +178,13 @@ def test_common(self, transform, adapter, container_type, image_or_video, device
image_datapoint=make_image(size=canvas_size),
video_datapoint=make_video(size=canvas_size),
image_pil=next(make_pil_images(sizes=[canvas_size], color_spaces=["RGB"])),
bounding_boxes_xyxy=make_bounding_box(
bounding_boxes_xyxy=make_bounding_boxes(
format=datapoints.BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(3,)
),
bounding_boxes_xywh=make_bounding_box(
bounding_boxes_xywh=make_bounding_boxes(
format=datapoints.BoundingBoxFormat.XYWH, canvas_size=canvas_size, batch_dims=(4,)
),
bounding_boxes_cxcywh=make_bounding_box(
bounding_boxes_cxcywh=make_bounding_boxes(
format=datapoints.BoundingBoxFormat.CXCYWH, canvas_size=canvas_size, batch_dims=(5,)
),
bounding_boxes_degenerate_xyxy=datapoints.BoundingBoxes(
Expand Down Expand Up @@ -813,7 +811,7 @@ def test__transform(self, mocker):

size = (32, 24)
image = make_image(size)
bboxes = make_bounding_box(format="XYXY", canvas_size=size, batch_dims=(6,))
bboxes = make_bounding_boxes(format="XYXY", canvas_size=size, batch_dims=(6,))
masks = make_detection_mask(size, num_objects=6)

sample = [image, bboxes, masks]
Expand Down
26 changes: 12 additions & 14 deletions test/test_transforms_v2_consistency.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,7 @@

import torch
import torchvision.transforms.v2 as v2_transforms
from common_utils import (
ArgsKwargs,
assert_close,
assert_equal,
make_bounding_box,
make_detection_mask,
make_image,
make_images,
make_segmentation_mask,
set_rng_seed,
)
from common_utils import assert_close, assert_equal, set_rng_seed
from torch import nn
from torchvision import datapoints, transforms as legacy_transforms
from torchvision._utils import sequence_to_str
Expand All @@ -32,6 +22,14 @@
from torchvision.transforms.v2._utils import _get_fill
from torchvision.transforms.v2.functional import to_pil_image
from torchvision.transforms.v2.utils import query_size
from transforms_v2_legacy_utils import (
ArgsKwargs,
make_bounding_boxes,
make_detection_mask,
make_image,
make_images,
make_segmentation_mask,
)

DEFAULT_MAKE_IMAGES_KWARGS = dict(color_spaces=["RGB"], extra_dims=[(4,)])

Expand Down Expand Up @@ -1090,7 +1088,7 @@ def make_label(extra_dims, categories):

pil_image = to_pil_image(make_image(size=size, color_space="RGB"))
target = {
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"labels": make_label(extra_dims=(num_objects,), categories=80),
}
if with_mask:
Expand All @@ -1100,7 +1098,7 @@ def make_label(extra_dims, categories):

tensor_image = torch.Tensor(make_image(size=size, color_space="RGB", dtype=torch.float32))
target = {
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"labels": make_label(extra_dims=(num_objects,), categories=80),
}
if with_mask:
Expand All @@ -1110,7 +1108,7 @@ def make_label(extra_dims, categories):

datapoint_image = make_image(size=size, color_space="RGB", dtype=torch.float32)
target = {
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"labels": make_label(extra_dims=(num_objects,), categories=80),
}
if with_mask:
Expand Down
Loading

0 comments on commit 87d54c4

Please sign in to comment.