Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[misc] remove engine_use_ray #8126

Merged
merged 7 commits into from
Sep 12, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 4 additions & 14 deletions tests/async_engine/test_api_server.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import os
import subprocess
import sys
import time
Expand Down Expand Up @@ -26,8 +25,7 @@ def _query_server_long(prompt: str) -> dict:


@pytest.fixture
def api_server(tokenizer_pool_size: int, engine_use_ray: bool,
worker_use_ray: bool):
def api_server(tokenizer_pool_size: int, worker_use_ray: bool):
script_path = Path(__file__).parent.joinpath(
"api_server_async_engine.py").absolute()
commands = [
Expand All @@ -37,25 +35,17 @@ def api_server(tokenizer_pool_size: int, engine_use_ray: bool,
str(tokenizer_pool_size)
]

# Copy the environment variables and append `VLLM_ALLOW_ENGINE_USE_RAY=1`
# to prevent `--engine-use-ray` raises an exception due to it deprecation
env_vars = os.environ.copy()
env_vars["VLLM_ALLOW_ENGINE_USE_RAY"] = "1"

if engine_use_ray:
commands.append("--engine-use-ray")
if worker_use_ray:
commands.append("--worker-use-ray")
uvicorn_process = subprocess.Popen(commands, env=env_vars)
uvicorn_process = subprocess.Popen(commands)
yield
uvicorn_process.terminate()


@pytest.mark.parametrize("tokenizer_pool_size", [0, 2])
@pytest.mark.parametrize("worker_use_ray", [False, True])
@pytest.mark.parametrize("engine_use_ray", [False, True])
def test_api_server(api_server, tokenizer_pool_size: int, worker_use_ray: bool,
engine_use_ray: bool):
def test_api_server(api_server, tokenizer_pool_size: int,
worker_use_ray: bool):
"""
Run the API server and test it.
Expand Down
10 changes: 2 additions & 8 deletions tests/async_engine/test_async_llm_engine.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import asyncio
import os
from asyncio import CancelledError
from dataclasses import dataclass
from typing import Optional
Expand Down Expand Up @@ -79,7 +78,7 @@ def _init_engine(self, *args, **kwargs):

@pytest.mark.asyncio
async def test_new_requests_event():
engine = MockAsyncLLMEngine(worker_use_ray=False, engine_use_ray=False)
engine = MockAsyncLLMEngine(worker_use_ray=False)
engine.start_background_loop()
await asyncio.sleep(0.01)
assert engine.engine.step_calls == 0
Expand Down Expand Up @@ -112,16 +111,11 @@ async def test_new_requests_event():
assert engine.engine.add_request_calls == 3
assert engine.engine.step_calls == old_step_calls + 1

# Allow deprecated engine_use_ray to not raise exception
os.environ["VLLM_ALLOW_ENGINE_USE_RAY"] = "1"

engine = MockAsyncLLMEngine(worker_use_ray=True, engine_use_ray=True)
engine = MockAsyncLLMEngine(worker_use_ray=True)
assert engine.get_model_config() is not None
assert engine.get_tokenizer() is not None
assert engine.get_decoding_config() is not None

os.environ.pop("VLLM_ALLOW_ENGINE_USE_RAY")


def start_engine():
wait_for_gpu_memory_to_clear(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,11 @@ def server():
"--max-model-len",
"2048",
"--enforce-eager",
"--engine-use-ray",
"--chat-template",
str(chatml_jinja_path),
]

# Allow `--engine-use-ray`, otherwise the launch of the server throw
# an error due to try to use a deprecated feature
env_dict = {"VLLM_ALLOW_ENGINE_USE_RAY": "1"}
with RemoteOpenAIServer(MODEL_NAME, args,
env_dict=env_dict) as remote_server:
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server


Expand Down
9 changes: 0 additions & 9 deletions tests/spec_decode/e2e/conftest.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import asyncio
import os
from itertools import cycle
from typing import Dict, List, Optional, Sequence, Tuple, Union

Expand Down Expand Up @@ -58,10 +57,6 @@ def __init__(
if "disable_log_stats" not in kwargs:
kwargs["disable_log_stats"] = True

# Needed to engine_use_ray works as a deprecated feature,
# otherwise the following constructor will raise an exception
os.environ["VLLM_ALLOW_ENGINE_USE_RAY"] = "1"

engine_args = AsyncEngineArgs(
model=model,
tokenizer=tokenizer,
Expand All @@ -78,10 +73,6 @@ def __init__(
swap_space=swap_space,
enforce_eager=enforce_eager,
max_seq_len_to_capture=max_seq_len_to_capture,
# For now use ray for the distributed back-end, since
# we rely on the use of engine_use_ray=True to avoid
# reinitializing CUDA in the same process (driver worker)
engine_use_ray=True,
distributed_executor_backend="ray",
disable_custom_all_reduce=disable_custom_all_reduce,
**kwargs,
Expand Down
11 changes: 0 additions & 11 deletions vllm/engine/arg_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1001,24 +1001,13 @@ def create_engine_config(self) -> EngineConfig:
@dataclass
class AsyncEngineArgs(EngineArgs):
"""Arguments for asynchronous vLLM engine."""
engine_use_ray: bool = False
disable_log_requests: bool = False

@staticmethod
def add_cli_args(parser: FlexibleArgumentParser,
async_args_only: bool = False) -> FlexibleArgumentParser:
if not async_args_only:
parser = EngineArgs.add_cli_args(parser)
parser.add_argument('--engine-use-ray',
action='store_true',
help='Use Ray to start the LLM engine in a '
'separate process as the server process.'
'(DEPRECATED. This argument is deprecated '
'and will be removed in a future update. '
'Set `VLLM_ALLOW_ENGINE_USE_RAY=1` to force '
'use it. See '
'https://github.com/vllm-project/vllm/issues/7045.'
')')
parser.add_argument('--disable-log-requests',
action='store_true',
help='Disable logging requests.')
Expand Down
Loading
Loading