Skip to content

Commit 05493d5

Browse files
committed
chore: deprecate ToolResponseMessage in agent.resume API
Summary: Test Plan:
1 parent 59dddaf commit 05493d5

File tree

5 files changed

+17
-41
lines changed

5 files changed

+17
-41
lines changed

docs/_static/llama-stack-spec.html

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -9490,21 +9490,11 @@
94909490
"type": "object",
94919491
"properties": {
94929492
"tool_responses": {
9493-
"oneOf": [
9494-
{
9495-
"type": "array",
9496-
"items": {
9497-
"$ref": "#/components/schemas/ToolResponse"
9498-
}
9499-
},
9500-
{
9501-
"type": "array",
9502-
"items": {
9503-
"$ref": "#/components/schemas/ToolResponseMessage"
9504-
}
9505-
}
9506-
],
9507-
"description": "The tool call responses to resume the turn with. NOTE: ToolResponseMessage will be deprecated. Use ToolResponse."
9493+
"type": "array",
9494+
"items": {
9495+
"$ref": "#/components/schemas/ToolResponse"
9496+
},
9497+
"description": "The tool call responses to resume the turn with."
95089498
},
95099499
"stream": {
95109500
"type": "boolean",

docs/_static/llama-stack-spec.yaml

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6405,16 +6405,11 @@ components:
64056405
type: object
64066406
properties:
64076407
tool_responses:
6408-
oneOf:
6409-
- type: array
6410-
items:
6411-
$ref: '#/components/schemas/ToolResponse'
6412-
- type: array
6413-
items:
6414-
$ref: '#/components/schemas/ToolResponseMessage'
6408+
type: array
6409+
items:
6410+
$ref: '#/components/schemas/ToolResponse'
64156411
description: >-
6416-
The tool call responses to resume the turn with. NOTE: ToolResponseMessage
6417-
will be deprecated. Use ToolResponse.
6412+
The tool call responses to resume the turn with.
64186413
stream:
64196414
type: boolean
64206415
description: Whether to stream the response.

llama_stack/apis/agents/agents.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -370,7 +370,7 @@ class AgentTurnResumeRequest(BaseModel):
370370
agent_id: str
371371
session_id: str
372372
turn_id: str
373-
tool_responses: Union[List[ToolResponse], List[ToolResponseMessage]]
373+
tool_responses: List[ToolResponse]
374374
stream: Optional[bool] = False
375375

376376

@@ -449,7 +449,7 @@ async def resume_agent_turn(
449449
agent_id: str,
450450
session_id: str,
451451
turn_id: str,
452-
tool_responses: Union[List[ToolResponse], List[ToolResponseMessage]],
452+
tool_responses: List[ToolResponse],
453453
stream: Optional[bool] = False,
454454
) -> Union[Turn, AsyncIterator[AgentTurnResponseStreamChunk]]:
455455
"""Resume an agent turn with executed tool call responses.
@@ -460,7 +460,6 @@ async def resume_agent_turn(
460460
:param session_id: The ID of the session to resume.
461461
:param turn_id: The ID of the turn to resume.
462462
:param tool_responses: The tool call responses to resume the turn with.
463-
NOTE: ToolResponseMessage will be deprecated. Use ToolResponse.
464463
:param stream: Whether to stream the response.
465464
:returns: A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk objects.
466465
"""

llama_stack/providers/inline/agents/meta_reference/agent_instance.py

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -218,18 +218,10 @@ async def _run_turn(
218218
steps = []
219219
messages = await self.get_messages_from_turns(turns)
220220
if is_resume:
221-
if isinstance(request.tool_responses[0], ToolResponseMessage):
222-
tool_response_messages = request.tool_responses
223-
tool_responses = [
224-
ToolResponse(call_id=x.call_id, tool_name=x.tool_name, content=x.content)
225-
for x in request.tool_responses
226-
]
227-
else:
228-
tool_response_messages = [
229-
ToolResponseMessage(call_id=x.call_id, tool_name=x.tool_name, content=x.content)
230-
for x in request.tool_responses
231-
]
232-
tool_responses = request.tool_responses
221+
tool_response_messages = [
222+
ToolResponseMessage(call_id=x.call_id, tool_name=x.tool_name, content=x.content)
223+
for x in request.tool_responses
224+
]
233225
messages.extend(tool_response_messages)
234226
last_turn = turns[-1]
235227
last_turn_messages = self.turn_to_messages(last_turn)
@@ -252,7 +244,7 @@ async def _run_turn(
252244
step_id=(in_progress_tool_call_step.step_id if in_progress_tool_call_step else str(uuid.uuid4())),
253245
turn_id=request.turn_id,
254246
tool_calls=(in_progress_tool_call_step.tool_calls if in_progress_tool_call_step else []),
255-
tool_responses=tool_responses,
247+
tool_responses=request.tool_responses,
256248
completed_at=now,
257249
started_at=(in_progress_tool_call_step.started_at if in_progress_tool_call_step else now),
258250
)

llama_stack/providers/inline/agents/meta_reference/agents.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ async def resume_agent_turn(
172172
agent_id: str,
173173
session_id: str,
174174
turn_id: str,
175-
tool_responses: Union[List[ToolResponse], List[ToolResponseMessage]],
175+
tool_responses: List[ToolResponse],
176176
stream: Optional[bool] = False,
177177
) -> AsyncGenerator:
178178
request = AgentTurnResumeRequest(

0 commit comments

Comments
 (0)