Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions tests/v1/tpu/test_mha_attn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,10 @@
import torch_xla.core
import torch_xla.core.xla_model

from vllm import envs
from vllm.attention.layer import MultiHeadAttention
from vllm.attention.selector import _cached_get_attn_backend
from vllm.platforms import current_platform

if not envs.VLLM_USE_V1:
pytest.skip(
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
allow_module_level=True,
)


@pytest.fixture(autouse=True)
def clear_cache():
Expand Down
7 changes: 0 additions & 7 deletions tests/v1/tpu/test_multimodal.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,12 @@
import openai
import pytest

from vllm import envs
from vllm.multimodal.utils import encode_image_base64, fetch_image
from vllm.platforms import current_platform

from ...entrypoints.openai.test_vision import TEST_IMAGE_URLS
from ...utils import RemoteOpenAIServer

if not envs.VLLM_USE_V1:
pytest.skip(
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
allow_module_level=True,
)


@pytest.fixture(scope="session")
def base64_encoded_image() -> dict[str, str]:
Expand Down
8 changes: 1 addition & 7 deletions tests/v1/tpu/test_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,10 @@

import pytest

from vllm import LLM, envs
from vllm import LLM
from vllm.platforms import current_platform
from vllm.sampling_params import SamplingParams

if not envs.VLLM_USE_V1:
pytest.skip(
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
allow_module_level=True,
)


@pytest.mark.parametrize("model_name", ["Qwen/Qwen2.5-1.5B-Instruct"])
@pytest.mark.skipif(not current_platform.is_tpu(),
Expand Down