Skip to content

Commit c9b7549

Browse files
chaunceyjiangilmarkov
authored andcommitted
[CI] Reorganize entrypoints tests (vllm-project#27403)
Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com>
1 parent 96df85c commit c9b7549

File tree

2 files changed

+141
-139
lines changed

2 files changed

+141
-139
lines changed

tests/entrypoints/openai/test_chat.py

Lines changed: 0 additions & 139 deletions
Original file line numberDiff line numberDiff line change
@@ -599,145 +599,6 @@ async def test_structured_outputs_choice_chat_logprobs(
599599
assert item.logprob >= -9999.0, f"Failed (top_logprobs={top_logprobs})"
600600

601601

602-
@pytest.mark.asyncio
603-
async def test_named_tool_use(
604-
client: openai.AsyncOpenAI,
605-
sample_json_schema,
606-
):
607-
messages = [
608-
{"role": "system", "content": "you are a helpful assistant"},
609-
{
610-
"role": "user",
611-
"content": (
612-
"Give an example JSON for an employee profile using the specified tool."
613-
),
614-
},
615-
]
616-
tools = [
617-
{
618-
"type": "function",
619-
"function": {
620-
"name": "dummy_function_name",
621-
"description": "This is a dummy function",
622-
"parameters": sample_json_schema,
623-
},
624-
}
625-
]
626-
tool_choice = {"type": "function", "function": {"name": "dummy_function_name"}}
627-
628-
# non-streaming
629-
630-
chat_completion = await client.chat.completions.create(
631-
model=MODEL_NAME,
632-
messages=messages,
633-
max_completion_tokens=1000,
634-
tools=tools,
635-
tool_choice=tool_choice,
636-
)
637-
message = chat_completion.choices[0].message
638-
assert len(message.content) == 0
639-
json_string = message.tool_calls[0].function.arguments
640-
json1 = json.loads(json_string)
641-
jsonschema.validate(instance=json1, schema=sample_json_schema)
642-
643-
messages.append({"role": "assistant", "content": json_string})
644-
messages.append(
645-
{"role": "user", "content": "Give me another one with a different name and age"}
646-
)
647-
648-
# streaming
649-
650-
stream = await client.chat.completions.create(
651-
model=MODEL_NAME,
652-
messages=messages,
653-
max_completion_tokens=1000,
654-
tools=tools,
655-
tool_choice=tool_choice,
656-
stream=True,
657-
)
658-
659-
output = []
660-
finish_reason_count = 0
661-
async for chunk in stream:
662-
delta = chunk.choices[0].delta
663-
if delta.role:
664-
assert delta.role == "assistant"
665-
assert delta.content is None or len(delta.content) == 0
666-
if delta.tool_calls:
667-
output.append(delta.tool_calls[0].function.arguments)
668-
if chunk.choices[0].finish_reason is not None:
669-
finish_reason_count += 1
670-
# finish reason should only return in last block
671-
assert finish_reason_count == 1
672-
json2 = json.loads("".join(output))
673-
jsonschema.validate(instance=json2, schema=sample_json_schema)
674-
assert json1["name"] != json2["name"]
675-
assert json1["age"] != json2["age"]
676-
677-
678-
@pytest.mark.asyncio
679-
async def test_inconsistent_tool_choice_and_tools(
680-
client: openai.AsyncOpenAI, sample_json_schema
681-
):
682-
messages = [
683-
{"role": "system", "content": "you are a helpful assistant"},
684-
{
685-
"role": "user",
686-
"content": f"Give an example JSON for an employee profile that "
687-
f"fits this schema: {sample_json_schema}",
688-
},
689-
]
690-
691-
with pytest.raises(openai.BadRequestError):
692-
await client.chat.completions.create(
693-
model=MODEL_NAME,
694-
messages=messages,
695-
max_completion_tokens=1000,
696-
tool_choice={
697-
"type": "function",
698-
"function": {"name": "dummy_function_name"},
699-
},
700-
)
701-
702-
with pytest.raises(openai.BadRequestError):
703-
await client.chat.completions.create(
704-
model=MODEL_NAME,
705-
messages=messages,
706-
max_completion_tokens=1000,
707-
tools=[
708-
{
709-
"type": "function",
710-
"function": {
711-
"name": "dummy_function_name",
712-
"description": "This is a dummy function",
713-
"parameters": sample_json_schema,
714-
},
715-
}
716-
],
717-
tool_choice={
718-
"type": "function",
719-
"function": {"name": "nondefined_function_name"},
720-
},
721-
)
722-
with pytest.raises(openai.BadRequestError):
723-
await client.chat.completions.create(
724-
model=MODEL_NAME,
725-
messages=messages,
726-
max_completion_tokens=1000,
727-
tools=[
728-
{
729-
"type": "function",
730-
"function": {
731-
"name": "dummy_function_name",
732-
"description": "This is a dummy function",
733-
"parameters": sample_json_schema,
734-
},
735-
}
736-
],
737-
tool_choice={},
738-
)
739-
740-
741602
@pytest.mark.asyncio
742603
async def test_response_format_json_object(client: openai.AsyncOpenAI):
743604
for _ in range(2):

tests/entrypoints/openai/test_completion_with_function_calling.py

Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@
22
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
33

44
import datetime
5+
import json
56

7+
import jsonschema
68
import openai # use the official client for correctness check
79
import pytest
810
import pytest_asyncio
@@ -341,3 +343,142 @@ async def test_no_args_tool_call(
341343
else:
342344
# No tool called — just print model's direct reply
343345
assert message.content is not None
346+
347+
348+
@pytest.mark.asyncio
349+
async def test_named_tool_use(
350+
client: openai.AsyncOpenAI,
351+
sample_json_schema,
352+
):
353+
messages = [
354+
{"role": "system", "content": "you are a helpful assistant"},
355+
{
356+
"role": "user",
357+
"content": (
358+
"Give an example JSON for an employee profile using the specified tool."
359+
),
360+
},
361+
]
362+
tools = [
363+
{
364+
"type": "function",
365+
"function": {
366+
"name": "dummy_function_name",
367+
"description": "This is a dummy function",
368+
"parameters": sample_json_schema,
369+
},
370+
}
371+
]
372+
tool_choice = {"type": "function", "function": {"name": "dummy_function_name"}}
373+
374+
# non-streaming
375+
376+
chat_completion = await client.chat.completions.create(
377+
model=MODEL_NAME,
378+
messages=messages,
379+
max_completion_tokens=1000,
380+
tools=tools,
381+
tool_choice=tool_choice,
382+
)
383+
message = chat_completion.choices[0].message
384+
assert len(message.content) == 0
385+
json_string = message.tool_calls[0].function.arguments
386+
json1 = json.loads(json_string)
387+
jsonschema.validate(instance=json1, schema=sample_json_schema)
388+
389+
messages.append({"role": "assistant", "content": json_string})
390+
messages.append(
391+
{"role": "user", "content": "Give me another one with a different name and age"}
392+
)
393+
394+
# streaming
395+
396+
stream = await client.chat.completions.create(
397+
model=MODEL_NAME,
398+
messages=messages,
399+
max_completion_tokens=1000,
400+
tools=tools,
401+
tool_choice=tool_choice,
402+
stream=True,
403+
)
404+
405+
output = []
406+
finish_reason_count = 0
407+
async for chunk in stream:
408+
delta = chunk.choices[0].delta
409+
if delta.role:
410+
assert delta.role == "assistant"
411+
assert delta.content is None or len(delta.content) == 0
412+
if delta.tool_calls:
413+
output.append(delta.tool_calls[0].function.arguments)
414+
if chunk.choices[0].finish_reason is not None:
415+
finish_reason_count += 1
416+
# finish reason should only return in last block
417+
assert finish_reason_count == 1
418+
json2 = json.loads("".join(output))
419+
jsonschema.validate(instance=json2, schema=sample_json_schema)
420+
assert json1["name"] != json2["name"]
421+
assert json1["age"] != json2["age"]
422+
423+
424+
@pytest.mark.asyncio
425+
async def test_inconsistent_tool_choice_and_tools(
426+
client: openai.AsyncOpenAI, sample_json_schema
427+
):
428+
messages = [
429+
{"role": "system", "content": "you are a helpful assistant"},
430+
{
431+
"role": "user",
432+
"content": f"Give an example JSON for an employee profile that "
433+
f"fits this schema: {sample_json_schema}",
434+
},
435+
]
436+
437+
with pytest.raises(openai.BadRequestError):
438+
await client.chat.completions.create(
439+
model=MODEL_NAME,
440+
messages=messages,
441+
max_completion_tokens=1000,
442+
tool_choice={
443+
"type": "function",
444+
"function": {"name": "dummy_function_name"},
445+
},
446+
)
447+
448+
with pytest.raises(openai.BadRequestError):
449+
await client.chat.completions.create(
450+
model=MODEL_NAME,
451+
messages=messages,
452+
max_completion_tokens=1000,
453+
tools=[
454+
{
455+
"type": "function",
456+
"function": {
457+
"name": "dummy_function_name",
458+
"description": "This is a dummy function",
459+
"parameters": sample_json_schema,
460+
},
461+
}
462+
],
463+
tool_choice={
464+
"type": "function",
465+
"function": {"name": "nondefined_function_name"},
466+
},
467+
)
468+
with pytest.raises(openai.BadRequestError):
469+
await client.chat.completions.create(
470+
model=MODEL_NAME,
471+
messages=messages,
472+
max_completion_tokens=1000,
473+
tools=[
474+
{
475+
"type": "function",
476+
"function": {
477+
"name": "dummy_function_name",
478+
"description": "This is a dummy function",
479+
"parameters": sample_json_schema,
480+
},
481+
}
482+
],
483+
tool_choice={},
484+
)

0 commit comments

Comments
 (0)