Skip to content

Commit 033cc87

Browse files
DarkLight1337rtourgeman
authored andcommitted
[Chore] Rename utils submodules (vllm-project#26920)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
1 parent a444ac4 commit 033cc87

File tree

24 files changed

+28
-24
lines changed

24 files changed

+28
-24
lines changed

tests/lora/test_add_lora.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from vllm.inputs import TextPrompt
1313
from vllm.lora.request import LoRARequest
1414
from vllm.sampling_params import SamplingParams
15-
from vllm.utils.async_utils import merge_async_iterators
15+
from vllm.utils.asyncio import merge_async_iterators
1616

1717
MODEL_PATH = "zai-org/chatglm3-6b"
1818
LORA_RANK = 64

tests/models/multimodal/generation/test_common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
)
1818

1919
from vllm.platforms import current_platform
20-
from vllm.utils.func import identity
20+
from vllm.utils.functools import identity
2121

2222
from ....conftest import (
2323
IMAGE_ASSETS,

tests/utils_/test_async_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import pytest
77

8-
from vllm.utils.async_utils import merge_async_iterators
8+
from vllm.utils.asyncio import merge_async_iterators
99

1010

1111
async def _mock_async_iterator(idx: int):

tests/utils_/test_func_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
import pytest
66

7-
from vllm.utils.func import deprecate_kwargs, supports_kw
7+
from vllm.utils.functools import deprecate_kwargs, supports_kw
88

99
from ..utils import error_on_warning
1010

vllm/benchmarks/throughput.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
from vllm.lora.request import LoRARequest
3535
from vllm.outputs import RequestOutput
3636
from vllm.sampling_params import BeamSearchParams
37-
from vllm.utils.async_utils import merge_async_iterators
37+
from vllm.utils.asyncio import merge_async_iterators
3838

3939

4040
def run_vllm(

vllm/entrypoints/chat_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
from vllm.transformers_utils.processor import cached_get_processor
5252
from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
5353
from vllm.utils import random_uuid
54-
from vllm.utils.func import supports_kw
54+
from vllm.utils.functools import supports_kw
5555

5656
logger = init_logger(__name__)
5757

vllm/entrypoints/openai/serving_completion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
from vllm.sampling_params import BeamSearchParams, SamplingParams
3636
from vllm.transformers_utils.tokenizer import AnyTokenizer
3737
from vllm.utils import as_list
38-
from vllm.utils.async_utils import merge_async_iterators
38+
from vllm.utils.asyncio import merge_async_iterators
3939

4040
logger = init_logger(__name__)
4141

vllm/entrypoints/openai/serving_embedding.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
)
4141
from vllm.pooling_params import PoolingParams
4242
from vllm.utils import chunk_list
43-
from vllm.utils.async_utils import merge_async_iterators
43+
from vllm.utils.asyncio import merge_async_iterators
4444

4545
logger = init_logger(__name__)
4646

vllm/entrypoints/openai/serving_engine.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@
9191
)
9292
from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
9393
from vllm.utils import is_list_of, random_uuid
94-
from vllm.utils.async_utils import (
94+
from vllm.utils.asyncio import (
9595
AsyncMicrobatchTokenizer,
9696
collect_from_async_generator,
9797
make_async,

vllm/entrypoints/openai/serving_pooling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
from vllm.logger import init_logger
3737
from vllm.outputs import PoolingOutput, PoolingRequestOutput
3838
from vllm.tasks import SupportedTask
39-
from vllm.utils.async_utils import merge_async_iterators
39+
from vllm.utils.asyncio import merge_async_iterators
4040

4141
logger = init_logger(__name__)
4242

0 commit comments

Comments
 (0)