Skip to content

Commit 7a6c8c3

Browse files
iAmir97iAmir97
andauthored
[Chore] Separate out vllm.utils.network_utils (#27164)
Signed-off-by: iAmir97 <Amir.balwel@embeddedllm.com> Co-authored-by: iAmir97 <Amir.balwel@embeddedllm.com>
1 parent 221bf72 commit 7a6c8c3

File tree

37 files changed

+516
-487
lines changed

37 files changed

+516
-487
lines changed

examples/offline_inference/data_parallel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
from time import sleep
3434

3535
from vllm import LLM, SamplingParams
36-
from vllm.utils import get_open_port
36+
from vllm.utils.network_utils import get_open_port
3737

3838

3939
def parse_args():

examples/offline_inference/rlhf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
from transformers import AutoModelForCausalLM
3939

4040
from vllm import LLM, SamplingParams
41-
from vllm.utils import get_ip, get_open_port
41+
from vllm.utils.network_utils import get_ip, get_open_port
4242

4343

4444
class MyLLM(LLM):

tests/distributed/test_multi_node_assignment.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from vllm import initialize_ray_cluster
2020
from vllm.config import ParallelConfig
2121
from vllm.executor.ray_utils import _wait_until_pg_removed
22-
from vllm.utils import get_ip
22+
from vllm.utils.network_utils import get_ip
2323

2424
VLLM_MULTI_NODE = os.getenv("VLLM_MULTI_NODE", "0") == "1"
2525

tests/distributed/test_node_count.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
from vllm.distributed.parallel_state import _node_count
99
from vllm.distributed.utils import StatelessProcessGroup
10-
from vllm.utils import get_ip, get_open_port
10+
from vllm.utils.network_utils import get_ip, get_open_port
1111

1212
if __name__ == "__main__":
1313
dist.init_process_group(backend="gloo")

tests/distributed/test_same_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
from vllm.distributed.parallel_state import in_the_same_node_as
99
from vllm.distributed.utils import StatelessProcessGroup
10-
from vllm.utils import get_ip, get_open_port
10+
from vllm.utils.network_utils import get_ip, get_open_port
1111

1212
if __name__ == "__main__":
1313
dist.init_process_group(backend="gloo")

tests/distributed/test_shm_broadcast.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,8 @@
1010

1111
from vllm.distributed.device_communicators.shm_broadcast import MessageQueue
1212
from vllm.distributed.utils import StatelessProcessGroup
13-
from vllm.utils import get_open_port, update_environment_variables
13+
from vllm.utils import update_environment_variables
14+
from vllm.utils.network_utils import get_open_port
1415

1516

1617
def get_arrays(n: int, seed: int = 0) -> list[np.ndarray]:

tests/distributed/test_utils.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,8 @@
1010
import vllm.envs as envs
1111
from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator
1212
from vllm.distributed.utils import StatelessProcessGroup
13-
from vllm.utils import (
14-
get_open_port,
15-
update_environment_variables,
16-
)
13+
from vllm.utils import update_environment_variables
14+
from vllm.utils.network_utils import get_open_port
1715
from vllm.utils.torch_utils import cuda_device_count_stateless
1816

1917
from ..utils import multi_gpu_test

tests/entrypoints/openai/test_shutdown.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import openai
1010
import pytest
1111

12-
from ...utils import get_open_port
12+
from vllm.utils.network_utils import get_open_port
1313

1414
MODEL_NAME = "hmellor/tiny-random-LlamaForCausalLM"
1515

tests/kernels/moe/modular_kernel_tools/parallel_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
from vllm.config import VllmConfig, set_current_vllm_config
1414
from vllm.distributed import init_distributed_environment, initialize_model_parallel
15-
from vllm.utils import get_open_port
15+
from vllm.utils.network_utils import get_open_port
1616

1717
## Parallel Processes Utils
1818

tests/kernels/moe/parallel_utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,8 @@
1515
from torch.multiprocessing import spawn # pyright: ignore[reportPrivateImportUsage]
1616
from typing_extensions import ParamSpec
1717

18-
from vllm.utils import get_open_port, has_deep_ep
18+
from vllm.utils import has_deep_ep
19+
from vllm.utils.network_utils import get_open_port
1920

2021
if has_deep_ep():
2122
from vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize import (

0 commit comments

Comments
 (0)