Skip to content

Commit e034624

Browse files
authored
Merge branch 'main' into add_objc_clang_format
2 parents 61d8b32 + b9a1984 commit e034624

File tree

8 files changed

+53
-65
lines changed

8 files changed

+53
-65
lines changed

packaging/wheel/relocate.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
import glob
44
import hashlib
5-
import io
65

76
# Standard library imports
87
import os
@@ -65,21 +64,12 @@
6564
PYTHON_VERSION = sys.version_info
6665

6766

68-
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
69-
"""Yield pieces of data from a file-like object until EOF."""
70-
while True:
71-
chunk = file.read(size)
72-
if not chunk:
73-
break
74-
yield chunk
75-
76-
7767
def rehash(path, blocksize=1 << 20):
7868
"""Return (hash, length) for path using hashlib.sha256()"""
7969
h = hashlib.sha256()
8070
length = 0
8171
with open(path, "rb") as f:
82-
for block in read_chunks(f, size=blocksize):
72+
while block := f.read(blocksize):
8373
length += len(block)
8474
h.update(block)
8575
digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=")

test/test_functional_tensor.py

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -609,21 +609,6 @@ def test_resize_antialias(device, dt, size, interpolation):
609609
assert_equal(resized_tensor, resize_result)
610610

611611

612-
@needs_cuda
613-
@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC])
614-
def test_assert_resize_antialias(interpolation):
615-
616-
# Checks implementation on very large scales
617-
# and catch TORCH_CHECK inside PyTorch implementation
618-
torch.manual_seed(12)
619-
tensor, _ = _create_data(1000, 1000, device="cuda")
620-
621-
# Error message is not yet updated in pytorch nightly
622-
# with pytest.raises(RuntimeError, match=r"Provided interpolation parameters can not be handled"):
623-
with pytest.raises(RuntimeError, match=r"Too much shared memory required"):
624-
F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True)
625-
626-
627612
def test_resize_antialias_default_warning():
628613

629614
img = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8)
@@ -641,25 +626,6 @@ def test_resize_antialias_default_warning():
641626
F.resized_crop(img, 0, 0, 10, 10, size=(20, 20), interpolation=NEAREST)
642627

643628

644-
@pytest.mark.parametrize("device", cpu_and_gpu())
645-
@pytest.mark.parametrize("dt", [torch.float32, torch.float64, torch.float16])
646-
@pytest.mark.parametrize("size", [[10, 7], [10, 42], [42, 7]])
647-
@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC])
648-
def test_interpolate_antialias_backward(device, dt, size, interpolation):
649-
650-
if dt == torch.float16 and device == "cpu":
651-
# skip float16 on CPU case
652-
return
653-
654-
torch.manual_seed(12)
655-
x = (torch.rand(1, 32, 29, 3, dtype=torch.double, device=device).permute(0, 3, 1, 2).requires_grad_(True),)
656-
resize = partial(F.resize, size=size, interpolation=interpolation, antialias=True)
657-
assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)
658-
659-
x = (torch.rand(1, 3, 32, 29, dtype=torch.double, device=device, requires_grad=True),)
660-
assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)
661-
662-
663629
def check_functional_vs_PIL_vs_scripted(
664630
fn, fn_pil, fn_t, config, device, dtype, channels=3, tol=2.0 + 1e-10, agg_method="max"
665631
):

test/test_transforms_v2_consistency.py

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -87,10 +87,8 @@ def __init__(
8787
ArgsKwargs([32]),
8888
ArgsKwargs((32, 29)),
8989
ArgsKwargs((31, 28), interpolation=v2_transforms.InterpolationMode.NEAREST),
90-
ArgsKwargs((33, 26), interpolation=v2_transforms.InterpolationMode.BICUBIC),
9190
ArgsKwargs((30, 27), interpolation=PIL.Image.NEAREST),
9291
ArgsKwargs((35, 29), interpolation=PIL.Image.BILINEAR),
93-
ArgsKwargs((34, 25), interpolation=PIL.Image.BICUBIC),
9492
NotScriptableArgsKwargs(31, max_size=32),
9593
ArgsKwargs([31], max_size=32),
9694
NotScriptableArgsKwargs(30, max_size=100),
@@ -101,6 +99,15 @@ def __init__(
10199
# atol=1 due to Resize v2 is using native uint8 interpolate path for bilinear and nearest modes
102100
closeness_kwargs=dict(rtol=0, atol=1),
103101
),
102+
ConsistencyConfig(
103+
v2_transforms.Resize,
104+
legacy_transforms.Resize,
105+
[
106+
ArgsKwargs((33, 26), interpolation=v2_transforms.InterpolationMode.BICUBIC, antialias=True),
107+
ArgsKwargs((34, 25), interpolation=PIL.Image.BICUBIC, antialias=True),
108+
],
109+
closeness_kwargs=dict(rtol=0, atol=21),
110+
),
104111
ConsistencyConfig(
105112
v2_transforms.CenterCrop,
106113
legacy_transforms.CenterCrop,
@@ -309,15 +316,22 @@ def __init__(
309316
ArgsKwargs(17, scale=(0.3, 0.7)),
310317
ArgsKwargs(25, ratio=(0.5, 1.5)),
311318
ArgsKwargs((31, 28), interpolation=v2_transforms.InterpolationMode.NEAREST),
312-
ArgsKwargs((33, 26), interpolation=v2_transforms.InterpolationMode.BICUBIC),
313319
ArgsKwargs((31, 28), interpolation=PIL.Image.NEAREST),
314-
ArgsKwargs((33, 26), interpolation=PIL.Image.BICUBIC),
315320
ArgsKwargs((29, 32), antialias=False),
316321
ArgsKwargs((28, 31), antialias=True),
317322
],
318323
# atol=1 due to Resize v2 is using native uint8 interpolate path for bilinear and nearest modes
319324
closeness_kwargs=dict(rtol=0, atol=1),
320325
),
326+
ConsistencyConfig(
327+
v2_transforms.RandomResizedCrop,
328+
legacy_transforms.RandomResizedCrop,
329+
[
330+
ArgsKwargs((33, 26), interpolation=v2_transforms.InterpolationMode.BICUBIC, antialias=True),
331+
ArgsKwargs((33, 26), interpolation=PIL.Image.BICUBIC, antialias=True),
332+
],
333+
closeness_kwargs=dict(rtol=0, atol=21),
334+
),
321335
ConsistencyConfig(
322336
v2_transforms.RandomErasing,
323337
legacy_transforms.RandomErasing,

test/transforms_v2_kernel_infos.py

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -257,17 +257,20 @@ def sample_inputs_resize_image_tensor():
257257

258258
for image_loader, interpolation in itertools.product(
259259
make_image_loaders(sizes=["random"], color_spaces=["RGB"]),
260-
[
261-
F.InterpolationMode.NEAREST,
262-
F.InterpolationMode.BILINEAR,
263-
F.InterpolationMode.BICUBIC,
264-
],
260+
[F.InterpolationMode.NEAREST, F.InterpolationMode.BILINEAR],
265261
):
266262
yield ArgsKwargs(image_loader, size=[min(image_loader.spatial_size) + 1], interpolation=interpolation)
267263

268264
yield ArgsKwargs(make_image_loader(size=(11, 17)), size=20, max_size=25)
269265

270266

267+
def sample_inputs_resize_image_tensor_bicubic():
268+
for image_loader, interpolation in itertools.product(
269+
make_image_loaders(sizes=["random"], color_spaces=["RGB"]), [F.InterpolationMode.BICUBIC]
270+
):
271+
yield ArgsKwargs(image_loader, size=[min(image_loader.spatial_size) + 1], interpolation=interpolation)
272+
273+
271274
@pil_reference_wrapper
272275
def reference_resize_image_tensor(*args, **kwargs):
273276
if not kwargs.pop("antialias", False) and kwargs.get("interpolation", F.InterpolationMode.BILINEAR) in {
@@ -364,6 +367,21 @@ def reference_inputs_resize_bounding_box():
364367
xfail_jit_python_scalar_arg("size"),
365368
],
366369
),
370+
KernelInfo(
371+
F.resize_image_tensor,
372+
sample_inputs_fn=sample_inputs_resize_image_tensor_bicubic,
373+
reference_fn=reference_resize_image_tensor,
374+
reference_inputs_fn=reference_inputs_resize_image_tensor,
375+
float32_vs_uint8=True,
376+
closeness_kwargs={
377+
**pil_reference_pixel_difference(10, mae=True),
378+
**cuda_vs_cpu_pixel_difference(atol=30),
379+
**float32_vs_uint8_pixel_difference(1, mae=True),
380+
},
381+
test_marks=[
382+
xfail_jit_python_scalar_arg("size"),
383+
],
384+
),
367385
KernelInfo(
368386
F.resize_bounding_box,
369387
sample_inputs_fn=sample_inputs_resize_bounding_box,

torchvision/datasets/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str:
5757
else:
5858
md5 = hashlib.md5()
5959
with open(fpath, "rb") as f:
60-
for chunk in iter(lambda: f.read(chunk_size), b""):
60+
while chunk := f.read(chunk_size):
6161
md5.update(chunk)
6262
return md5.hexdigest()
6363

torchvision/prototype/datasets/_builtin/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ import hashlib
9191
def sha256sum(path, chunk_size=1024 * 1024):
9292
checksum = hashlib.sha256()
9393
with open(path, "rb") as f:
94-
for chunk in iter(lambda: f.read(chunk_size), b""):
94+
while chunk := f.read(chunk_size):
9595
checksum.update(chunk)
9696
print(checksum.hexdigest())
9797
```

torchvision/prototype/datasets/utils/_resource.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ def download(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool
136136
def _check_sha256(self, path: pathlib.Path, *, chunk_size: int = 1024 * 1024) -> None:
137137
hash = hashlib.sha256()
138138
with open(path, "rb") as file:
139-
for chunk in iter(lambda: file.read(chunk_size), b""):
139+
while chunk := file.read(chunk_size):
140140
hash.update(chunk)
141141
sha256 = hash.hexdigest()
142142
if sha256 != self.sha256:

torchvision/transforms/v2/functional/_geometry.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -190,14 +190,13 @@ def resize_image_tensor(
190190
if interpolation == InterpolationMode.NEAREST or interpolation == InterpolationMode.NEAREST_EXACT:
191191
# uint8 dtype can be included for cpu and cuda input if nearest mode
192192
acceptable_dtypes.append(torch.uint8)
193-
elif (
194-
interpolation == InterpolationMode.BILINEAR
195-
and image.device.type == "cpu"
196-
and "AVX2" in torch.backends.cpu.get_cpu_capability()
197-
):
198-
# uint8 dtype support for bilinear mode is limited to cpu and
199-
# according to our benchmarks non-AVX CPUs should prefer u8->f32->interpolate->u8 path
200-
acceptable_dtypes.append(torch.uint8)
193+
elif image.device.type == "cpu":
194+
# uint8 dtype support for bilinear and bicubic is limited to cpu and
195+
# according to our benchmarks, non-AVX CPUs should still prefer u8->f32->interpolate->u8 path for bilinear
196+
if (interpolation == InterpolationMode.BILINEAR and "AVX2" in torch.backends.cpu.get_cpu_capability()) or (
197+
interpolation == InterpolationMode.BICUBIC
198+
):
199+
acceptable_dtypes.append(torch.uint8)
201200

202201
strides = image.stride()
203202
if image.is_contiguous(memory_format=torch.channels_last) and image.shape[0] == 1 and numel != strides[0]:
@@ -227,6 +226,7 @@ def resize_image_tensor(
227226

228227
if need_cast:
229228
if interpolation == InterpolationMode.BICUBIC and dtype == torch.uint8:
229+
# This path is hit on non-AVX archs, or on GPU.
230230
image = image.clamp_(min=0, max=255)
231231
if dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
232232
image = image.round_()

0 commit comments

Comments
 (0)