Skip to content

Commit

Permalink
[tests] make more tests device-agnostic (#33580)
Browse files Browse the repository at this point in the history
* enable

* fix

* add xpu skip

* add marker

* skip for xpu

* add more

* enable on accelerator

* add more cases

* add more tests

* add more
  • Loading branch information
faaany authored Sep 20, 2024
1 parent 31650a5 commit 8bd1f2f
Show file tree
Hide file tree
Showing 7 changed files with 24 additions and 17 deletions.
8 changes: 4 additions & 4 deletions tests/models/grounding_dino/test_modeling_grounding_dino.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
from transformers.testing_utils import (
require_timm,
require_torch,
require_torch_gpu,
require_torch_accelerator,
require_vision,
slow,
torch_device,
Expand Down Expand Up @@ -676,7 +676,7 @@ def test_inference_object_detection_head(self):
self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes, atol=1e-2))
self.assertListEqual(results["labels"], expected_labels)

@require_torch_gpu
@require_torch_accelerator
def test_inference_object_detection_head_equivalence_cpu_gpu(self):
processor = self.default_processor
image = prepare_img()
Expand All @@ -690,8 +690,8 @@ def test_inference_object_detection_head_equivalence_cpu_gpu(self):
cpu_outputs = model(**encoding)

# 2. run model on GPU
model.to("cuda")
encoding = encoding.to("cuda")
model.to(torch_device)
encoding = encoding.to(torch_device)
with torch.no_grad():
gpu_outputs = model(**encoding)

Expand Down
6 changes: 4 additions & 2 deletions tests/models/llama/test_modeling_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,12 @@

from transformers import AutoTokenizer, LlamaConfig, StaticCache, is_torch_available, set_seed
from transformers.testing_utils import (
backend_empty_cache,
require_bitsandbytes,
require_flash_attn,
require_read_token,
require_torch,
require_torch_accelerator,
require_torch_gpu,
require_torch_sdpa,
slow,
Expand Down Expand Up @@ -899,11 +901,11 @@ def test_compile_static_cache(self):


@slow
@require_torch_gpu
@require_torch_accelerator
class Mask4DTestHard(unittest.TestCase):
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def setUp(self):
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
Expand Down
5 changes: 3 additions & 2 deletions tests/models/mistral/test_modeling_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
require_flash_attn,
require_read_token,
require_torch,
require_torch_accelerator,
require_torch_gpu,
require_torch_sdpa,
slow,
Expand Down Expand Up @@ -719,14 +720,14 @@ def test_compile_static_cache(self):


@slow
@require_torch_gpu
@require_torch_accelerator
class Mask4DTestHard(unittest.TestCase):
model_name = "mistralai/Mistral-7B-v0.1"
_model = None

def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

@property
def model(self):
Expand Down
4 changes: 2 additions & 2 deletions tests/models/recurrent_gemma/test_modeling_recurrent_gemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
require_bitsandbytes,
require_read_token,
require_torch,
require_torch_gpu,
require_torch_accelerator,
slow,
torch_device,
)
Expand Down Expand Up @@ -418,7 +418,7 @@ def test_inputs_embeds_matches_input_ids_with_generate(self):
pass


@require_torch_gpu
@require_torch_accelerator
@slow
class RecurrentGemmaIntegrationTest(unittest.TestCase):
input_text = ["Hello I am doing", "Hi today"]
Expand Down
7 changes: 4 additions & 3 deletions tests/models/univnet/test_modeling_univnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,10 @@

from transformers import UnivNetConfig, UnivNetFeatureExtractor
from transformers.testing_utils import (
backend_empty_cache,
is_torch_available,
require_torch,
require_torch_gpu,
require_torch_accelerator,
slow,
torch_device,
)
Expand Down Expand Up @@ -207,13 +208,13 @@ def test_unbatched_inputs_outputs(self):
self.assertTrue(outputs.shape[0] == 1, msg="Unbatched input should create batched output with bsz = 1")


@require_torch_gpu
@require_torch_accelerator
@slow
class UnivNetModelIntegrationTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def _load_datasamples(self, num_samples, sampling_rate=24000):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
Expand Down
9 changes: 6 additions & 3 deletions tests/models/whisper/test_modeling_whisper.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,12 @@
is_flaky,
is_pt_flax_cross_test,
require_flash_attn,
require_non_xpu,
require_torch,
require_torch_accelerator,
require_torch_fp16,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_multi_accelerator,
require_torchaudio,
slow,
torch_device,
Expand Down Expand Up @@ -2612,6 +2614,7 @@ def test_generate_with_prompt_ids_and_no_non_prompt_forced_decoder_ids(self):

self.assertTrue(prompt in text)

@require_non_xpu
@slow
@require_torch_gpu
def test_speculative_decoding_distil(self):
Expand Down Expand Up @@ -3239,7 +3242,7 @@ def test_whisper_longform_no_speech_detection(self):
for i in range(num_samples):
assert decoded_all[i] == EXPECTED_TEXT[i]

@require_torch_gpu
@require_torch_accelerator
@slow
def test_whisper_empty_longform(self):
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
Expand Down Expand Up @@ -3278,7 +3281,7 @@ def test_whisper_empty_longform(self):
torch.manual_seed(0)
model.generate(**inputs, **gen_kwargs)

@require_torch_multi_gpu
@require_torch_multi_accelerator
@slow
def test_whisper_empty_longform_multi_gpu(self):
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
Expand Down
2 changes: 1 addition & 1 deletion tests/test_modeling_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -4751,7 +4751,7 @@ def test_static_cache_matches_dynamic(self):

# For now, Let's focus only on GPU for `torch.compile`
@slow
@require_torch_gpu
@require_torch_accelerator
@require_read_token
def test_torch_compile(self):
if version.parse(torch.__version__) < version.parse("2.3"):
Expand Down

0 comments on commit 8bd1f2f

Please sign in to comment.