Skip to content

Commit 34595cf

Browse files
authored
Even more test data cached (#40636)
fix Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
1 parent f22ec7f commit 34595cf

File tree

49 files changed

+331
-242
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+331
-242
lines changed

tests/models/aria/test_processing_aria.py

Lines changed: 18 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -15,23 +15,17 @@
1515
import shutil
1616
import tempfile
1717
import unittest
18-
from io import BytesIO
1918

2019
import numpy as np
21-
import requests
2220

2321
from transformers import AriaProcessor
22+
from transformers.image_utils import load_image
2423
from transformers.models.auto.processing_auto import AutoProcessor
2524
from transformers.testing_utils import require_torch, require_vision
26-
from transformers.utils import is_vision_available
2725

2826
from ...test_processing_common import ProcessorTesterMixin, url_to_local_path
2927

3028

31-
if is_vision_available():
32-
from PIL import Image
33-
34-
3529
@require_torch
3630
@require_vision
3731
class AriaProcessorTest(ProcessorTesterMixin, unittest.TestCase):
@@ -42,21 +36,17 @@ def setUpClass(cls):
4236
cls.tmpdirname = tempfile.mkdtemp()
4337
processor = AriaProcessor.from_pretrained("m-ric/Aria_hf_2", size_conversion={490: 2, 980: 2})
4438
processor.save_pretrained(cls.tmpdirname)
45-
cls.image1 = Image.open(
46-
BytesIO(
47-
requests.get(
48-
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
49-
).content
39+
cls.image1 = load_image(
40+
url_to_local_path(
41+
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
5042
)
5143
)
52-
cls.image2 = Image.open(
53-
BytesIO(requests.get("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg").content)
44+
cls.image2 = load_image(
45+
url_to_local_path("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
5446
)
55-
cls.image3 = Image.open(
56-
BytesIO(
57-
requests.get(
58-
"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"
59-
).content
47+
cls.image3 = load_image(
48+
url_to_local_path(
49+
"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"
6050
)
6151
)
6252
cls.bos_token = "<|im_start|>"
@@ -93,6 +83,9 @@ def get_processor(self, **kwargs):
9383

9484
@classmethod
9585
def tearDownClass(cls):
86+
cls.image1.close()
87+
cls.image2.close()
88+
cls.image3.close()
9689
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
9790

9891
# Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens
@@ -273,7 +266,12 @@ def test_image_chat_template_accepts_processing_kwargs(self):
273266

274267
# Now test the ability to return dict
275268
messages[0][0]["content"].append(
276-
{"type": "image", "url": url_to_local_path("https://www.ilankelman.org/stopsigns/australia.jpg")}
269+
{
270+
"type": "image",
271+
"url": url_to_local_path(
272+
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
273+
),
274+
}
277275
)
278276
out_dict = processor.apply_chat_template(
279277
messages,

tests/models/aya_vision/test_modeling_aya_vision.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,10 @@ def test_small_model_integration_batched_generate(self):
379379
{
380380
"role": "user",
381381
"content": [
382-
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
382+
{
383+
"type": "image",
384+
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
385+
},
383386
{"type": "text", "text": "Describe this image"},
384387
],
385388
},

tests/models/bridgetower/test_image_processing_bridgetower.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,17 +16,15 @@
1616
import unittest
1717
from typing import Optional, Union
1818

19-
import requests
20-
19+
from transformers.image_utils import load_image
2120
from transformers.testing_utils import require_torch, require_vision
2221
from transformers.utils import is_torchvision_available, is_vision_available
2322

2423
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
24+
from ...test_processing_common import url_to_local_path
2525

2626

2727
if is_vision_available():
28-
from PIL import Image
29-
3028
from transformers import BridgeTowerImageProcessor
3129

3230
if is_torchvision_available():
@@ -130,9 +128,7 @@ def test_slow_fast_equivalence(self):
130128
if self.image_processing_class is None or self.fast_image_processing_class is None:
131129
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
132130

133-
dummy_image = Image.open(
134-
requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
135-
)
131+
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
136132
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
137133
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
138134

tests/models/cohere2_vision/test_modeling_cohere2_vision.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -330,7 +330,10 @@ def test_model_integration_batched_generate(self):
330330
{
331331
"role": "user",
332332
"content": [
333-
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
333+
{
334+
"type": "image",
335+
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
336+
},
334337
{"type": "text", "text": "Describe this image"},
335338
],
336339
},

tests/models/deepseek_vl/test_modeling_deepseek_vl.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,10 @@ def test_model_text_generation_with_multi_image(self):
333333
{"type": "text", "text": "What's the difference between"},
334334
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
335335
{"type": "text", "text": " and "},
336-
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
336+
{
337+
"type": "image",
338+
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
339+
},
337340
],
338341
}
339342
]

tests/models/deepseek_vl_hybrid/test_image_processing_deepseek_vl_hybrid.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,13 @@
1616
import unittest
1717

1818
import numpy as np
19-
import requests
2019

20+
from transformers.image_utils import load_image
2121
from transformers.testing_utils import require_torch, require_vision
2222
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
2323

2424
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
25+
from ...test_processing_common import url_to_local_path
2526

2627

2728
if is_torch_available():
@@ -226,9 +227,7 @@ def test_slow_fast_equivalence(self):
226227
if self.image_processing_class is None or self.fast_image_processing_class is None:
227228
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
228229

229-
dummy_image = Image.open(
230-
requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
231-
)
230+
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
232231
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
233232
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
234233

tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,10 @@ def test_model_text_generation_with_multi_image(self):
382382
{"type": "text", "text": "What's the difference between"},
383383
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
384384
{"type": "text", "text": " and "},
385-
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
385+
{
386+
"type": "image",
387+
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
388+
},
386389
],
387390
}
388391
]

tests/models/eomt/test_image_processing_eomt.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,14 @@
1616
import unittest
1717

1818
import numpy as np
19-
import requests
2019
from datasets import load_dataset
2120

21+
from transformers.image_utils import load_image
2222
from transformers.testing_utils import require_torch, require_vision
2323
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
2424

2525
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
26+
from ...test_processing_common import url_to_local_path
2627

2728

2829
if is_torch_available():
@@ -261,7 +262,7 @@ def test_post_process_semantic_segmentation(self):
261262
processor = self.image_processing_class(**self.image_processor_dict)
262263
# Set longest_edge to None to test for semantic segmentatiom.
263264
processor.size = {"shortest_edge": 18, "longest_edge": None}
264-
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
265+
image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
265266

266267
inputs = processor(images=image, do_split_image=True, return_tensors="pt")
267268
patch_offsets = inputs["patch_offsets"]
@@ -276,7 +277,7 @@ def test_post_process_semantic_segmentation(self):
276277

277278
def test_post_process_panoptic_segmentation(self):
278279
processor = self.image_processing_class(**self.image_processor_dict)
279-
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
280+
image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
280281

281282
original_sizes = [image.size[::-1], image.size[::-1]]
282283

@@ -293,7 +294,7 @@ def test_post_process_panoptic_segmentation(self):
293294

294295
def test_post_process_instance_segmentation(self):
295296
processor = self.image_processing_class(**self.image_processor_dict)
296-
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
297+
image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
297298

298299
original_sizes = [image.size[::-1], image.size[::-1]]
299300

tests/models/fuyu/test_processing_fuyu.py

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,18 @@
1-
import io
21
import tempfile
32
import unittest
43
from shutil import rmtree
54

6-
import requests
7-
85
from transformers import (
96
AutoProcessor,
107
AutoTokenizer,
118
FuyuImageProcessor,
129
FuyuProcessor,
1310
is_torch_available,
14-
is_vision_available,
1511
)
12+
from transformers.image_utils import load_image
1613
from transformers.testing_utils import require_torch, require_vision
1714

18-
from ...test_processing_common import ProcessorTesterMixin
19-
20-
21-
if is_vision_available():
22-
from PIL import Image
15+
from ...test_processing_common import ProcessorTesterMixin, url_to_local_path
2316

2417

2518
if is_torch_available():
@@ -44,8 +37,10 @@ def setUpClass(cls):
4437
processor.save_pretrained(cls.tmpdirname)
4538

4639
cls.text_prompt = "Generate a coco-style caption.\\n"
47-
bus_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png"
48-
cls.bus_image_pil = Image.open(io.BytesIO(requests.get(bus_image_url).content))
40+
bus_image_url = url_to_local_path(
41+
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png"
42+
)
43+
cls.bus_image_pil = load_image(bus_image_url)
4944

5045
@classmethod
5146
def tearDownClass(cls):

tests/models/gemma3/test_modeling_gemma3.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -427,7 +427,10 @@ def test_model_4b_batch(self):
427427
"type": "image",
428428
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
429429
},
430-
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
430+
{
431+
"type": "image",
432+
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
433+
},
431434
{"type": "text", "text": "Are these images identical?"},
432435
],
433436
},
@@ -545,7 +548,10 @@ def test_model_4b_batch_crops(self):
545548
"type": "image",
546549
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
547550
},
548-
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
551+
{
552+
"type": "image",
553+
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
554+
},
549555
{"type": "text", "text": "Are these images identical?"},
550556
],
551557
},
@@ -605,7 +611,10 @@ def test_model_4b_multiimage(self):
605611
{
606612
"role": "user",
607613
"content": [
608-
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
614+
{
615+
"type": "image",
616+
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
617+
},
609618
{"type": "text", "text": "What do you see here?"},
610619
],
611620
},

0 commit comments

Comments
 (0)