Skip to content

Commit 8febb3d

Browse files
committed
chore: deprecate ToolResponseMessage in agent.resume API
Summary: Test Plan:
1 parent d33b8ea commit 8febb3d

File tree

3 files changed

+8
-17
lines changed

3 files changed

+8
-17
lines changed

llama_stack/apis/agents/agents.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ class AgentTurnResumeRequest(BaseModel):
353353
agent_id: str
354354
session_id: str
355355
turn_id: str
356-
tool_responses: Union[List[ToolResponse], List[ToolResponseMessage]]
356+
tool_responses: List[ToolResponse]
357357
stream: Optional[bool] = False
358358

359359

@@ -432,7 +432,7 @@ async def resume_agent_turn(
432432
agent_id: str,
433433
session_id: str,
434434
turn_id: str,
435-
tool_responses: Union[List[ToolResponse], List[ToolResponseMessage]],
435+
tool_responses: List[ToolResponse],
436436
stream: Optional[bool] = False,
437437
) -> Union[Turn, AsyncIterator[AgentTurnResponseStreamChunk]]:
438438
"""Resume an agent turn with executed tool call responses.
@@ -443,7 +443,6 @@ async def resume_agent_turn(
443443
:param session_id: The ID of the session to resume.
444444
:param turn_id: The ID of the turn to resume.
445445
:param tool_responses: The tool call responses to resume the turn with.
446-
NOTE: ToolResponseMessage will be deprecated. Use ToolResponse.
447446
:param stream: Whether to stream the response.
448447
:returns: A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk objects.
449448
"""

llama_stack/providers/inline/agents/meta_reference/agent_instance.py

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -218,18 +218,10 @@ async def _run_turn(
218218
steps = []
219219
messages = await self.get_messages_from_turns(turns)
220220
if is_resume:
221-
if isinstance(request.tool_responses[0], ToolResponseMessage):
222-
tool_response_messages = request.tool_responses
223-
tool_responses = [
224-
ToolResponse(call_id=x.call_id, tool_name=x.tool_name, content=x.content)
225-
for x in request.tool_responses
226-
]
227-
else:
228-
tool_response_messages = [
229-
ToolResponseMessage(call_id=x.call_id, tool_name=x.tool_name, content=x.content)
230-
for x in request.tool_responses
231-
]
232-
tool_responses = request.tool_responses
221+
tool_response_messages = [
222+
ToolResponseMessage(call_id=x.call_id, tool_name=x.tool_name, content=x.content)
223+
for x in request.tool_responses
224+
]
233225
messages.extend(tool_response_messages)
234226
last_turn = turns[-1]
235227
last_turn_messages = self.turn_to_messages(last_turn)
@@ -252,7 +244,7 @@ async def _run_turn(
252244
step_id=(in_progress_tool_call_step.step_id if in_progress_tool_call_step else str(uuid.uuid4())),
253245
turn_id=request.turn_id,
254246
tool_calls=(in_progress_tool_call_step.tool_calls if in_progress_tool_call_step else []),
255-
tool_responses=tool_responses,
247+
tool_responses=request.tool_responses,
256248
completed_at=now,
257249
started_at=(in_progress_tool_call_step.started_at if in_progress_tool_call_step else now),
258250
)

llama_stack/providers/inline/agents/meta_reference/agents.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ async def resume_agent_turn(
169169
agent_id: str,
170170
session_id: str,
171171
turn_id: str,
172-
tool_responses: Union[List[ToolResponse], List[ToolResponseMessage]],
172+
tool_responses: List[ToolResponse],
173173
stream: Optional[bool] = False,
174174
) -> AsyncGenerator:
175175
request = AgentTurnResumeRequest(

0 commit comments

Comments
 (0)