Skip to content

Commit aed1687

Browse files
authored
Move ModelConfig from config/__init__.py to config/model.py (#25252)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
1 parent cf278ff commit aed1687

File tree

13 files changed

+2160
-2149
lines changed

13 files changed

+2160
-2149
lines changed

tests/conftest.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,8 @@
3939
from vllm.assets.audio import AudioAsset
4040
from vllm.assets.image import ImageAsset
4141
from vllm.assets.video import VideoAsset
42-
from vllm.config import ConvertOption, RunnerOption, _get_and_verify_dtype
42+
from vllm.config.model import (ConvertOption, RunnerOption,
43+
_get_and_verify_dtype)
4344
from vllm.connections import global_http_connection
4445
from vllm.distributed import (cleanup_dist_env_and_memory,
4546
init_distributed_environment,

tests/distributed/test_pipeline_parallel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
import pytest
1616

17-
from vllm.config import _FLOAT16_NOT_SUPPORTED_MODELS, RunnerOption
17+
from vllm.config.model import _FLOAT16_NOT_SUPPORTED_MODELS, RunnerOption
1818
from vllm.logger import init_logger
1919
from vllm.transformers_utils.config import get_config
2020

tests/models/test_initialization.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
import pytest
88

99
from vllm import LLM
10-
from vllm.config import ModelImpl
1110
from vllm.engine.llm_engine import LLMEngine as V0LLMEngine
1211
from vllm.utils import GiB_bytes
1312
from vllm.v1.core.kv_cache_utils import get_kv_cache_configs
@@ -111,8 +110,8 @@ def _initialize_kv_caches_v1(self, vllm_config):
111110
# these tests seem to produce leftover memory
112111
gpu_memory_utilization=0.80,
113112
load_format="dummy",
114-
model_impl=ModelImpl.TRANSFORMERS
115-
if model_arch in _TRANSFORMERS_BACKEND_MODELS else ModelImpl.VLLM,
113+
model_impl="transformers"
114+
if model_arch in _TRANSFORMERS_BACKEND_MODELS else "vllm",
116115
hf_overrides=hf_overrides_fn,
117116
max_num_seqs=model_info.max_num_seqs)
118117

tests/v1/sample/test_logprobs.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
import itertools
55
from collections.abc import Generator
6+
from typing import get_args
67

78
import pytest
89
import torch
@@ -464,7 +465,7 @@ def test_all_logprobs(example_prompts, monkeypatch: pytest.MonkeyPatch):
464465
assert len(prompt_logprob) == vocab_size
465466

466467

467-
@pytest.mark.parametrize("logprobs_mode", list(LogprobsMode))
468+
@pytest.mark.parametrize("logprobs_mode", get_args(LogprobsMode))
468469
def test_logprobs_mode(logprobs_mode: LogprobsMode,
469470
monkeypatch: pytest.MonkeyPatch):
470471
"""Test with LLM engine with different logprobs_mode.
@@ -493,14 +494,12 @@ def test_logprobs_mode(logprobs_mode: LogprobsMode,
493494
for logprobs in output.logprobs:
494495
for token_id in logprobs:
495496
logprob = logprobs[token_id]
496-
if logprobs_mode in (LogprobsMode.RAW_LOGPROBS,
497-
LogprobsMode.PROCESSED_LOGPROBS):
497+
if logprobs_mode in ("raw_logprobs", "processed_logprobs"):
498498
assert logprob.logprob <= 0
499499
if logprob.logprob > 0:
500500
positive_values = positive_values + 1
501501
total_token_with_logprobs = total_token_with_logprobs + 1
502502
assert total_token_with_logprobs >= len(results[0].outputs)
503-
if logprobs_mode in (LogprobsMode.RAW_LOGITS,
504-
LogprobsMode.PROCESSED_LOGITS):
503+
if logprobs_mode in ("raw_logits", "processed_logits"):
505504
assert positive_values > 0
506505
del llm

0 commit comments

Comments
 (0)