Skip to content

Commit 31a6f39

Browse files
committed
Fix(llm): Clean up enqueued requests on batch chat failure Fixes #26081
Signed-off-by: vensenmu <vensenmu@gmail.com>
1 parent 237cf6d commit 31a6f39

File tree

2 files changed

+75
-14
lines changed

2 files changed

+75
-14
lines changed

tests/entrypoints/llm/test_chat.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
from vllm import LLM
88
from vllm.distributed import cleanup_dist_env_and_memory
9+
from vllm.sampling_params import SamplingParams
910

1011
from ..openai.test_vision import TEST_IMAGE_ASSETS
1112

@@ -23,6 +24,29 @@ def text_llm():
2324
cleanup_dist_env_and_memory()
2425

2526

27+
@pytest.fixture(scope="function")
28+
def llm_for_failure_test():
29+
"""
30+
Fixture for testing issue #26081.
31+
Uses a small max_model_len to easily trigger length errors.
32+
"""
33+
# pytest caches the fixture so we use weakref.proxy to
34+
# enable garbage collection
35+
llm = LLM(
36+
model="meta-llama/Llama-3.2-1B-Instruct",
37+
enforce_eager=True,
38+
seed=0,
39+
max_model_len=128,
40+
disable_log_stats=True,
41+
)
42+
43+
yield weakref.proxy(llm)
44+
45+
del llm
46+
47+
cleanup_dist_env_and_memory()
48+
49+
2650
def test_chat(text_llm):
2751
prompt1 = "Explain the concept of entropy."
2852
messages = [
@@ -157,3 +181,32 @@ def test_chat_extra_kwargs(thinking_llm, enable_thinking):
157181
else:
158182
# The chat template includes dummy thinking process
159183
assert think_id in prompt_token_ids
184+
185+
186+
def test_chat_batch_failure_cleanup(llm_for_failure_test):
187+
"""
188+
Tests that if a batch call to llm.chat() fails mid-way
189+
(e.g., due to one invalid prompt), the requests that
190+
were already enqueued are properly aborted and do not
191+
pollute the queue for subsequent calls.
192+
(Fixes Issue #26081)
193+
"""
194+
llm = llm_for_failure_test
195+
valid_msg = [{"role": "user", "content": "Hello"}]
196+
long_text = "This is a very long text to test the error " * 50
197+
invalid_msg = [{"role": "user", "content": long_text}]
198+
batch_1 = [
199+
valid_msg,
200+
valid_msg,
201+
invalid_msg,
202+
]
203+
batch_2 = [
204+
valid_msg,
205+
valid_msg,
206+
]
207+
sampling_params = SamplingParams(temperature=0, max_tokens=10)
208+
with pytest.raises(ValueError, match="longer than the maximum model length"):
209+
llm.chat(batch_1, sampling_params=sampling_params)
210+
outputs_2 = llm.chat(batch_2, sampling_params=sampling_params)
211+
assert len(outputs_2) == len(batch_2)
212+
assert llm.llm_engine.get_num_unfinished_requests() == 0

vllm/entrypoints/llm.py

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1560,20 +1560,27 @@ def _validate_and_add_requests(
15601560
tqdm_func = use_tqdm if callable(use_tqdm) else tqdm
15611561
it = tqdm_func(it, desc="Adding requests")
15621562

1563-
for i, prompt in enumerate(it):
1564-
if isinstance(prompt, dict):
1565-
self._validate_mm_data_and_uuids(
1566-
prompt.get("multi_modal_data"), prompt.get("multi_modal_uuids")
1567-
)
1563+
added_request_ids: list[str] = []
15681564

1569-
self._add_request(
1570-
prompt,
1571-
params[i] if isinstance(params, Sequence) else params,
1572-
lora_request=lora_request[i]
1573-
if isinstance(lora_request, Sequence)
1574-
else lora_request,
1575-
priority=priority[i] if priority else 0,
1576-
)
1565+
try:
1566+
for i, prompt in enumerate(it):
1567+
if isinstance(prompt, dict):
1568+
self._validate_mm_data_and_uuids(
1569+
prompt.get("multi_modal_data"), prompt.get("multi_modal_uuids")
1570+
)
1571+
request_id = self._add_request(
1572+
prompt,
1573+
params[i] if isinstance(params, Sequence) else params,
1574+
lora_request=lora_request[i]
1575+
if isinstance(lora_request, Sequence)
1576+
else lora_request,
1577+
priority=priority[i] if priority else 0,
1578+
)
1579+
added_request_ids.append(request_id)
1580+
except Exception as e:
1581+
if added_request_ids:
1582+
self.llm_engine.abort_request(request_id)
1583+
raise e
15771584

15781585
def _validate_mm_data_and_uuids(
15791586
self,
@@ -1656,7 +1663,7 @@ def _add_request(
16561663
params: SamplingParams | PoolingParams,
16571664
lora_request: LoRARequest | None = None,
16581665
priority: int = 0,
1659-
) -> None:
1666+
) -> str:
16601667
prompt_text, _, _ = get_prompt_components(prompt)
16611668
request_id = str(next(self.request_counter))
16621669

@@ -1677,6 +1684,7 @@ def _add_request(
16771684
priority=priority,
16781685
prompt_text=prompt_text,
16791686
)
1687+
return request_id
16801688

16811689
def _run_engine(
16821690
self, *, use_tqdm: bool | Callable[..., tqdm] = True

0 commit comments

Comments
 (0)