Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions docs/usage/v1_guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@

V1 is now enabled by default for all supported use cases, and we will gradually enable it for every use case we plan to support. Please share any feedback on [GitHub](https://github.com/vllm-project/vllm) or in the [vLLM Slack](https://inviter.co/vllm-slack).

To disable V1, please set the environment variable as: `VLLM_USE_V1=0`, and send us a GitHub issue sharing the reason!

## Why vLLM V1?

vLLM V0 successfully supported a wide range of models and hardware, but as new features were developed independently, the system grew increasingly complex. This complexity made it harder to integrate new capabilities and introduced technical debt, revealing the need for a more streamlined and unified design.
Expand Down
20 changes: 0 additions & 20 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,26 +154,6 @@ def prompts(self, prompts: AudioAssetPrompts) -> list[str]:
"""Singleton instance of {class}`AudioTestAssets`."""


@pytest.fixture(scope="function", autouse=True)
def cleanup_VLLM_USE_V1(monkeypatch):
"""
The V1 oracle sets "VLLM_USE_V1" during loading. This means
that each invocation of a test change the env variable.

If we touch "VLLM_USE_V1" with monkeypatch, then any changes
made during the test run by vLLM will be cleaned up.

This fixture is used by every test.
"""

# If VLLM_USE_V1 is not set, set then delete. This will
# cause monkeypatch to clean up VLLM_USE_V1 upon exit
# if VLLM modifies the value of envs.VLLM_USE_V1.
if "VLLM_USE_V1" not in os.environ:
monkeypatch.setenv("VLLM_USE_V1", "")
monkeypatch.delenv("VLLM_USE_V1")


@pytest.fixture(autouse=True)
def init_test_http_connection():
# pytest_asyncio may use a different event loop per test
Expand Down
7 changes: 2 additions & 5 deletions tests/v1/engine/test_async_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,15 +424,12 @@ async def test_customize_loggers(monkeypatch):


@pytest.mark.asyncio
async def test_customize_aggregated_loggers(monkeypatch):
async def test_customize_aggregated_loggers():
"""Test that we can customize the aggregated loggers.
If a customized logger is provided at the init, it should
be added to the default loggers.
"""

with monkeypatch.context() as m, ExitStack() as after:
m.setenv("VLLM_USE_V1", "1")

with ExitStack() as after:
with set_default_torch_num_threads(1):
engine = AsyncLLM.from_engine_args(
TEXT_ENGINE_ARGS,
Expand Down
3 changes: 0 additions & 3 deletions tests/v1/entrypoints/llm/test_struct_output_generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -868,11 +868,8 @@ def test_structured_output_batched_with_non_structured_outputs_requests(

@pytest.mark.parametrize("guided_decoding_backend", ["xgrammar"])
def test_structured_output_with_structural_tag(
monkeypatch: pytest.MonkeyPatch,
guided_decoding_backend: str,
):
monkeypatch.setenv("VLLM_USE_V1", "1")

llm = LLM(
model="Qwen/Qwen2.5-1.5B-Instruct",
guided_decoding_backend=guided_decoding_backend,
Expand Down
121 changes: 59 additions & 62 deletions tests/v1/sample/test_logprobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -530,7 +530,6 @@ def test_logprobs_mode(logprobs_mode: LogprobsMode):
def test_spec_decode_logprobs(
logprobs_mode: LogprobsMode,
model_setup: tuple[str, str, str],
monkeypatch: pytest.MonkeyPatch,
):
"""Spec decode logprobs should match those of the base model.

Expand All @@ -541,64 +540,62 @@ def test_spec_decode_logprobs(
"""
from vllm import LLM

with monkeypatch.context() as m:
m.setenv("VLLM_USE_V1", "1")
prompt = "Hello world"
sampling_params = SamplingParams(
temperature=0, logprobs=3, max_tokens=10, ignore_eos=False
)
method, model_name, spec_model_name = model_setup
max_model_len = 256

# Run base LLM.
ref_llm = LLM(
model=model_name,
max_logprobs=5,
max_model_len=max_model_len,
seed=42,
logprobs_mode=logprobs_mode,
gpu_memory_utilization=0.4,
)
ref_results = ref_llm.generate([prompt], sampling_params)
# Collect logprobs outputs from reference LLM.
ref_logprobs = []
for output in ref_results[0].outputs:
for logprobs in output.logprobs:
for token_id in logprobs:
ref_logprobs.append(logprobs[token_id])
del ref_llm
torch.cuda.empty_cache()
cleanup_dist_env_and_memory()

# Run spec decode LLM.
spec_llm = LLM(
model_name,
speculative_config={
"method": method,
"model": spec_model_name,
"num_speculative_tokens": 3,
"max_model_len": max_model_len,
},
max_logprobs=5,
max_model_len=max_model_len,
seed=42,
logprobs_mode=logprobs_mode,
gpu_memory_utilization=0.4,
)
spec_results = spec_llm.generate([prompt], sampling_params)
# Collect logprobs outputs from spec decode LLM.
spec_logprobs = []
for output in spec_results[0].outputs:
for logprobs in output.logprobs:
for token_id in logprobs:
spec_logprobs.append(logprobs[token_id])
del spec_llm
torch.cuda.empty_cache()
cleanup_dist_env_and_memory()

# Per-token logprobs are expected to be the same.
assert len(ref_logprobs) == len(spec_logprobs)
for ref_logprob, spec_logprob in zip(ref_logprobs, spec_logprobs):
assert math.isclose(ref_logprob.logprob, spec_logprob.logprob, abs_tol=1e-3)
assert ref_logprob.rank == spec_logprob.rank
assert ref_logprob.decoded_token == spec_logprob.decoded_token
prompt = "Hello world"
sampling_params = SamplingParams(
temperature=0, logprobs=3, max_tokens=10, ignore_eos=False
)
method, model_name, spec_model_name = model_setup
max_model_len = 256

# Run base LLM.
ref_llm = LLM(
model=model_name,
max_logprobs=5,
max_model_len=max_model_len,
seed=42,
logprobs_mode=logprobs_mode,
gpu_memory_utilization=0.4,
)
ref_results = ref_llm.generate([prompt], sampling_params)
# Collect logprobs outputs from reference LLM.
ref_logprobs = []
for output in ref_results[0].outputs:
for logprobs in output.logprobs:
for token_id in logprobs:
ref_logprobs.append(logprobs[token_id])
del ref_llm
torch.cuda.empty_cache()
cleanup_dist_env_and_memory()

# Run spec decode LLM.
spec_llm = LLM(
model_name,
speculative_config={
"method": method,
"model": spec_model_name,
"num_speculative_tokens": 3,
"max_model_len": max_model_len,
},
max_logprobs=5,
max_model_len=max_model_len,
seed=42,
logprobs_mode=logprobs_mode,
gpu_memory_utilization=0.4,
)
spec_results = spec_llm.generate([prompt], sampling_params)
# Collect logprobs outputs from spec decode LLM.
spec_logprobs = []
for output in spec_results[0].outputs:
for logprobs in output.logprobs:
for token_id in logprobs:
spec_logprobs.append(logprobs[token_id])
del spec_llm
torch.cuda.empty_cache()
cleanup_dist_env_and_memory()

# Per-token logprobs are expected to be the same.
assert len(ref_logprobs) == len(spec_logprobs)
for ref_logprob, spec_logprob in zip(ref_logprobs, spec_logprobs):
assert math.isclose(ref_logprob.logprob, spec_logprob.logprob, abs_tol=1e-3)
assert ref_logprob.rank == spec_logprob.rank
assert ref_logprob.decoded_token == spec_logprob.decoded_token
18 changes: 6 additions & 12 deletions vllm/attention/layers/chunked_local_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

import torch

from vllm import envs
from vllm.attention.backends.abstract import AttentionBackend, AttentionMetadata
from vllm.attention.selector import get_attn_backend
from vllm.config import CacheConfig
Expand Down Expand Up @@ -78,17 +77,12 @@ def __init__(
kv_cache_dtype = "auto"
block_size = 16

if envs.VLLM_USE_V1:
underlying_attn_backend = get_attn_backend(
head_size, dtype, kv_cache_dtype, block_size
)

attn_backend = create_chunked_local_attention_backend(
underlying_attn_backend, attention_chunk_size, block_size
)
else:
# in v0 the local attention is handled inside the backends
attn_backend = None
underlying_attn_backend = get_attn_backend(
head_size, dtype, kv_cache_dtype, block_size
)
attn_backend = create_chunked_local_attention_backend(
underlying_attn_backend, attention_chunk_size, block_size
)

super().__init__(
num_heads=num_heads,
Expand Down
14 changes: 4 additions & 10 deletions vllm/attention/layers/cross_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import numpy as np
import torch

from vllm import envs
from vllm.attention.backends.abstract import (
AttentionBackend,
AttentionMetadata,
Expand Down Expand Up @@ -150,15 +149,10 @@ def __init__(
kv_cache_dtype = "auto"
block_size = 16

if envs.VLLM_USE_V1:
underlying_attn_backend = get_attn_backend(
head_size, dtype, kv_cache_dtype, block_size
)

attn_backend = create_cross_attention_backend(underlying_attn_backend)
else:
# in v0 cross attention is handled inside the backends
attn_backend = None
underlying_attn_backend = get_attn_backend(
head_size, dtype, kv_cache_dtype, block_size
)
attn_backend = create_cross_attention_backend(underlying_attn_backend)

if attn_type is not None:
assert attn_type == AttentionType.ENCODER_DECODER, (
Expand Down
15 changes: 4 additions & 11 deletions vllm/attention/layers/encoder_only_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

import torch

from vllm import envs
from vllm.attention.backends.abstract import (
AttentionBackend,
AttentionMetadata,
Expand Down Expand Up @@ -74,17 +73,11 @@ def __init__(
kv_cache_dtype = "auto"
block_size = 16

if envs.VLLM_USE_V1:
underlying_attn_backend = get_attn_backend(
head_size, dtype, kv_cache_dtype, block_size
)
underlying_attn_backend = get_attn_backend(
head_size, dtype, kv_cache_dtype, block_size
)

attn_backend = create_encoder_only_attention_backend(
underlying_attn_backend
)
else:
# in v0 encoder only attention is handled inside the backends
attn_backend = None
attn_backend = create_encoder_only_attention_backend(underlying_attn_backend)

if attn_type is not None:
assert attn_type == AttentionType.ENCODER_ONLY, (
Expand Down
8 changes: 1 addition & 7 deletions vllm/attention/selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,16 +134,11 @@ def get_attn_backend(
use_sparse: bool = False,
) -> type[AttentionBackend]:
"""Selects which attention backend to use and lazily imports it."""
# Accessing envs.* behind an @lru_cache decorator can cause the wrong
# value to be returned from the cache if the value changes between calls.
# To avoid this, we read envs.VLLM_USE_V1 here and pass it explicitly to the
# private function.
return _cached_get_attn_backend(
head_size=head_size,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
block_size=block_size,
use_v1=envs.VLLM_USE_V1,
use_mla=use_mla,
has_sink=has_sink,
use_sparse=use_sparse,
Expand All @@ -156,7 +151,6 @@ def _cached_get_attn_backend(
dtype: torch.dtype,
kv_cache_dtype: str | None,
block_size: int,
use_v1: bool = False,
use_mla: bool = False,
has_sink: bool = False,
use_sparse: bool = False,
Expand Down Expand Up @@ -199,7 +193,7 @@ def _cached_get_attn_backend(
dtype,
kv_cache_dtype,
block_size,
use_v1,
True,
use_mla,
has_sink,
use_sparse,
Expand Down
7 changes: 0 additions & 7 deletions vllm/distributed/kv_transfer/kv_connector/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from collections.abc import Callable
from typing import TYPE_CHECKING, Optional, cast

import vllm.envs as envs
from vllm.distributed.kv_transfer.kv_connector.base import (
KVConnectorBase,
KVConnectorBaseType,
Expand Down Expand Up @@ -47,12 +46,6 @@ def create_connector(
role: KVConnectorRole,
kv_cache_config: Optional["KVCacheConfig"] = None,
) -> KVConnectorBase:
if not envs.VLLM_USE_V1:
raise ValueError(
"Attempting to initialize a V1 Connector, "
f"but found {envs.VLLM_USE_V1=}"
)

kv_transfer_config = config.kv_transfer_config
if kv_transfer_config is None:
raise ValueError("kv_transfer_config must be set to create a connector")
Expand Down
14 changes: 5 additions & 9 deletions vllm/distributed/kv_transfer/kv_transfer_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import TYPE_CHECKING, Optional

from vllm import envs
from vllm.distributed.kv_transfer.kv_connector.base import KVConnectorBaseType
from vllm.distributed.kv_transfer.kv_connector.factory import KVConnectorFactory
from vllm.distributed.kv_transfer.kv_connector.v1 import (
Expand Down Expand Up @@ -65,14 +64,11 @@ def ensure_kv_transfer_initialized(
vllm_config.kv_transfer_config.is_kv_transfer_instance
and _KV_CONNECTOR_AGENT is None
):
if envs.VLLM_USE_V1:
_KV_CONNECTOR_AGENT = KVConnectorFactory.create_connector(
config=vllm_config,
role=KVConnectorRole.WORKER,
kv_cache_config=kv_cache_config,
)
else:
raise ValueError("V0 is no longer supported")
_KV_CONNECTOR_AGENT = KVConnectorFactory.create_connector(
config=vllm_config,
role=KVConnectorRole.WORKER,
kv_cache_config=kv_cache_config,
)


def ensure_kv_transfer_shutdown() -> None:
Expand Down
Loading