Skip to content

Commit ace4bae

Browse files
author
liuww
committed
run pre-commit to check update code
Signed-off-by: liuww <liuww@superred.com.cn>
1 parent 3c3582d commit ace4bae

File tree

3 files changed

+34
-15
lines changed

3 files changed

+34
-15
lines changed

requirements-test.txt

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,10 @@ anyio==4.6.2.post1
2323
# via httpx
2424
argcomplete==3.5.1
2525
# via datamodel-code-generator
26+
async-timeout==4.0.3
27+
# via
28+
# aiohttp
29+
# redis
2630
attrs==24.2.0
2731
# via
2832
# aiohttp
@@ -116,6 +120,10 @@ encodec==0.1.1
116120
# via vocos
117121
evaluate==0.4.3
118122
# via lm-eval
123+
exceptiongroup==1.2.2
124+
# via
125+
# anyio
126+
# pytest
119127
fastparquet==2024.11.0
120128
# via genai-perf
121129
fastrlock==0.8.2
@@ -544,9 +552,7 @@ sentence-transformers==3.2.1
544552
sentencepiece==0.2.0
545553
# via mistral-common
546554
setuptools==75.8.0
547-
# via
548-
# pytablewriter
549-
# torch
555+
# via pytablewriter
550556
six==1.16.0
551557
# via
552558
# python-dateutil
@@ -591,6 +597,12 @@ timm==1.0.11
591597
# via -r requirements-test.in
592598
tokenizers==0.21.0
593599
# via transformers
600+
toml==0.10.2
601+
# via datamodel-code-generator
602+
tomli==2.2.1
603+
# via
604+
# black
605+
# pytest
594606
torch==2.5.1
595607
# via
596608
# -r requirements-test.in
@@ -651,13 +663,17 @@ typepy==1.3.2
651663
# tabledata
652664
typing-extensions==4.12.2
653665
# via
666+
# anyio
654667
# bitsandbytes
668+
# black
655669
# huggingface-hub
656670
# librosa
657671
# mistral-common
672+
# multidict
658673
# pqdm
659674
# pydantic
660675
# pydantic-core
676+
# rich
661677
# torch
662678
tzdata==2024.2
663679
# via pandas

tests/entrypoints/openai/test_chat.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -655,9 +655,11 @@ async def test_named_tool_use(client: openai.AsyncOpenAI,
655655
"role": "system",
656656
"content": "you are a helpful assistant"
657657
}, {
658-
"role": "user",
659-
"content":f"Give an example JSON for an employee profile that "
660-
f"fits this schema: {sample_json_schema}"
658+
"role":
659+
"user",
660+
"content":
661+
f"Give an example JSON for an employee profile that "
662+
f"fits this schema: {sample_json_schema}"
661663
}]
662664

663665
# non-streaming
@@ -689,8 +691,10 @@ async def test_named_tool_use(client: openai.AsyncOpenAI,
689691

690692
messages.append({"role": "assistant", "content": json_string})
691693
messages.append({
692-
"role": "user",
693-
"content": "Give me another one with a different name and age"
694+
"role":
695+
"user",
696+
"content":
697+
"Give me another one with a different name and age"
694698
})
695699

696700
# streaming

vllm/entrypoints/openai/serving_chat.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,14 @@
2020
ChatCompletionRequest, ChatCompletionResponse,
2121
ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
2222
ChatCompletionStreamResponse, ChatMessage, DeltaFunctionCall, DeltaMessage,
23-
DeltaToolCall, ErrorResponse, FunctionCall, PromptTokenUsageInfo,
24-
RequestResponseMetadata, ToolCall, UsageInfo)
23+
DeltaToolCall, ErrorResponse, PromptTokenUsageInfo,
24+
RequestResponseMetadata, UsageInfo)
2525
from vllm.entrypoints.openai.reasoning_parsers import (ReasoningParser,
2626
ReasoningParserManager)
2727
from vllm.entrypoints.openai.serving_engine import (OpenAIServing,
2828
clamp_prompt_logprobs)
2929
from vllm.entrypoints.openai.serving_models import OpenAIServingModels
3030
from vllm.entrypoints.openai.tool_parsers import ToolParser, ToolParserManager
31-
from vllm.entrypoints.openai.tool_parsers.mistral_tool_parser import (
32-
MistralToolCall)
3331
from vllm.logger import init_logger
3432
from vllm.outputs import CompletionOutput, RequestOutput
3533
from vllm.sampling_params import BeamSearchParams, SamplingParams
@@ -337,7 +335,8 @@ async def chat_completion_stream_generator(
337335
# Prepare the tool parser if it's needed
338336
try:
339337
use_auto_tool = tool_choice_auto and self.tool_parser
340-
use_choice_tool = request.tool_choice and type(request.tool_choice) is ChatCompletionNamedToolChoiceParam
338+
use_choice_tool = request.tool_choice and type(
339+
request.tool_choice) is ChatCompletionNamedToolChoiceParam
341340
if use_auto_tool or use_choice_tool:
342341
tool_parsers: list[Optional[ToolParser]] = [
343342
self.tool_parser(tokenizer)
@@ -758,13 +757,13 @@ async def chat_completion_full_generator(
758757
logger.exception("Error in tool parser creation.")
759758
return self.create_error_response(str(e))
760759

761-
tool_call_info = tool_parser.extract_tool_calls(output.text, request=request)
760+
tool_call_info = tool_parser.extract_tool_calls(
761+
output.text, request=request)
762762

763763
message = ChatMessage(role=role,
764764
content=tool_call_info.content,
765765
tool_calls=tool_call_info.tool_calls)
766766

767-
768767
# if the request doesn't use tool choice
769768
# OR specifies to not use a tool
770769
elif not request.tool_choice or request.tool_choice == "none":

0 commit comments

Comments
 (0)