-
Notifications
You must be signed in to change notification settings - Fork 1.4k
Closed
Labels
bugSomething isn't workingSomething isn't working
Description
Initial Checks
- I confirm that I'm using the latest version of Pydantic AI
- I confirm that I searched for my issue in https://github.com/pydantic/pydantic-ai/issues before opening this issue
Description
Related: #2360
@DouweM I am afraid #2365 is not enough to fix the issue.
The cause:
If parallel tool calls containing both tool A (whose return value is a ToolReturn) and a deferred tool B, then
When the agent exits, the last parts of message history is:
-2: ToolReturnPart (A.return_value)
-1: UserPromptPart (A.content)
The order breaks most LLM's order, as the tool return of the deferred tool call is not yet added.
Example Code
# Modified from https://ai.pydantic.dev/toolsets/#deferred-toolset
# `get_user_name` return value is modified
# prompt is modified to force a parallel tool call
import pprint
from pydantic import BaseModel
from pydantic_ai import Agent
from pydantic_ai.messages import (
ModelMessage,
ModelRequest,
RetryPromptPart,
ToolReturn,
ToolReturnPart,
UserPromptPart,
)
from pydantic_ai.output import DeferredToolCalls
from pydantic_ai.tools import ToolDefinition
from pydantic_ai.toolsets import DeferredToolset
from pydantic_ai.toolsets.function import FunctionToolset
toolset = FunctionToolset()
@toolset.tool
def get_default_language():
return "en-US"
@toolset.tool
def get_user_name():
return ToolReturn(
return_value="David",
content="The user's name is David",
metadata="David",
)
class PersonalizedGreeting(BaseModel):
greeting: str
language_code: str
agent = Agent("anthropic:claude-sonnet-4-0", toolsets=[toolset], output_type=PersonalizedGreeting)
def run_agent(
messages: list[ModelMessage] = [], frontend_tools: list[ToolDefinition] = {}
) -> tuple[PersonalizedGreeting | DeferredToolCalls, list[ModelMessage]]:
deferred_toolset = DeferredToolset(frontend_tools)
result = agent.run_sync(
toolsets=[deferred_toolset],
output_type=[agent.output_type, DeferredToolCalls],
message_history=messages,
)
return result.output, result.new_messages()
frontend_tool_definitions = [
ToolDefinition(
name="get_preferred_language",
parameters_json_schema={"type": "object", "properties": {"default_language": {"type": "string"}}},
description="Get the user's preferred language from their browser",
)
]
def get_preferred_language(default_language: str) -> str:
return "es-MX"
frontend_tool_functions = {"get_preferred_language": get_preferred_language}
messages: list[ModelMessage] = [
ModelRequest(
parts=[UserPromptPart(content="Call get_preferred_language and get_user_name in parallel")]
)
]
final_output = None
while True:
output, new_messages = run_agent(messages, frontend_tool_definitions)
messages += new_messages
if not isinstance(output, DeferredToolCalls):
final_output = output
break
print(output.tool_calls)
for tool_call in output.tool_calls:
if function := frontend_tool_functions.get(tool_call.tool_name):
part = ToolReturnPart(
tool_name=tool_call.tool_name,
content=function(**tool_call.args_as_dict()),
tool_call_id=tool_call.tool_call_id,
)
else:
part = RetryPromptPart(
tool_name=tool_call.tool_name,
content=f"Unknown tool {tool_call.tool_name!r}",
tool_call_id=tool_call.tool_call_id,
)
messages.append(ModelRequest(parts=[part]))
print("Current messages:")
pprint.pprint(messages)
print(repr(final_output))Python, Pydantic AI & LLM client version
latest
Metadata
Metadata
Assignees
Labels
bugSomething isn't workingSomething isn't working