Skip to content

Commit bcd17fa

Browse files
committed
Fix #1564 Add conversations API support
1 parent a81601a commit bcd17fa

15 files changed

+92
-17
lines changed

src/agents/extensions/models/litellm_model.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,8 @@ async def get_response(
8282
output_schema: AgentOutputSchemaBase | None,
8383
handoffs: list[Handoff],
8484
tracing: ModelTracing,
85-
previous_response_id: str | None,
85+
previous_response_id: str | None = None, # unused
86+
conversation_id: str | None = None, # unused
8687
prompt: Any | None = None,
8788
) -> ModelResponse:
8889
with generation_span(
@@ -171,7 +172,8 @@ async def stream_response(
171172
output_schema: AgentOutputSchemaBase | None,
172173
handoffs: list[Handoff],
173174
tracing: ModelTracing,
174-
previous_response_id: str | None,
175+
previous_response_id: str | None = None, # unused
176+
conversation_id: str | None = None, # unused
175177
prompt: Any | None = None,
176178
) -> AsyncIterator[TResponseStreamEvent]:
177179
with generation_span(

src/agents/models/interface.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ async def get_response(
4848
tracing: ModelTracing,
4949
*,
5050
previous_response_id: str | None,
51+
conversation_id: str | None,
5152
prompt: ResponsePromptParam | None,
5253
) -> ModelResponse:
5354
"""Get a response from the model.
@@ -62,6 +63,7 @@ async def get_response(
6263
tracing: Tracing configuration.
6364
previous_response_id: the ID of the previous response. Generally not used by the model,
6465
except for the OpenAI Responses API.
66+
conversation_id: The ID of the stored conversation, if any.
6567
prompt: The prompt config to use for the model.
6668
6769
Returns:
@@ -81,6 +83,7 @@ def stream_response(
8183
tracing: ModelTracing,
8284
*,
8385
previous_response_id: str | None,
86+
conversation_id: str | None,
8487
prompt: ResponsePromptParam | None,
8588
) -> AsyncIterator[TResponseStreamEvent]:
8689
"""Stream a response from the model.
@@ -95,6 +98,7 @@ def stream_response(
9598
tracing: Tracing configuration.
9699
previous_response_id: the ID of the previous response. Generally not used by the model,
97100
except for the OpenAI Responses API.
101+
conversation_id: The ID of the stored conversation, if any.
98102
prompt: The prompt config to use for the model.
99103
100104
Returns:

src/agents/models/openai_chatcompletions.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,8 @@ async def get_response(
5555
output_schema: AgentOutputSchemaBase | None,
5656
handoffs: list[Handoff],
5757
tracing: ModelTracing,
58-
previous_response_id: str | None,
58+
previous_response_id: str | None = None, # unused
59+
conversation_id: str | None = None, # unused
5960
prompt: ResponsePromptParam | None = None,
6061
) -> ModelResponse:
6162
with generation_span(
@@ -142,7 +143,8 @@ async def stream_response(
142143
output_schema: AgentOutputSchemaBase | None,
143144
handoffs: list[Handoff],
144145
tracing: ModelTracing,
145-
previous_response_id: str | None,
146+
previous_response_id: str | None = None, # unused
147+
conversation_id: str | None = None, # unused
146148
prompt: ResponsePromptParam | None = None,
147149
) -> AsyncIterator[TResponseStreamEvent]:
148150
"""

src/agents/models/openai_responses.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,8 @@ async def get_response(
7575
output_schema: AgentOutputSchemaBase | None,
7676
handoffs: list[Handoff],
7777
tracing: ModelTracing,
78-
previous_response_id: str | None,
78+
previous_response_id: str | None = None,
79+
conversation_id: str | None = None,
7980
prompt: ResponsePromptParam | None = None,
8081
) -> ModelResponse:
8182
with response_span(disabled=tracing.is_disabled()) as span_response:
@@ -87,7 +88,8 @@ async def get_response(
8788
tools,
8889
output_schema,
8990
handoffs,
90-
previous_response_id,
91+
previous_response_id=previous_response_id,
92+
conversation_id=conversation_id,
9193
stream=False,
9294
prompt=prompt,
9395
)
@@ -150,7 +152,8 @@ async def stream_response(
150152
output_schema: AgentOutputSchemaBase | None,
151153
handoffs: list[Handoff],
152154
tracing: ModelTracing,
153-
previous_response_id: str | None,
155+
previous_response_id: str | None = None, # unused
156+
conversation_id: str | None = None, # unused
154157
prompt: ResponsePromptParam | None = None,
155158
) -> AsyncIterator[ResponseStreamEvent]:
156159
"""
@@ -165,7 +168,8 @@ async def stream_response(
165168
tools,
166169
output_schema,
167170
handoffs,
168-
previous_response_id,
171+
previous_response_id=previous_response_id,
172+
conversation_id=conversation_id,
169173
stream=True,
170174
prompt=prompt,
171175
)
@@ -203,6 +207,7 @@ async def _fetch_response(
203207
output_schema: AgentOutputSchemaBase | None,
204208
handoffs: list[Handoff],
205209
previous_response_id: str | None,
210+
conversation_id: str | None,
206211
stream: Literal[True],
207212
prompt: ResponsePromptParam | None = None,
208213
) -> AsyncStream[ResponseStreamEvent]: ...
@@ -217,6 +222,7 @@ async def _fetch_response(
217222
output_schema: AgentOutputSchemaBase | None,
218223
handoffs: list[Handoff],
219224
previous_response_id: str | None,
225+
conversation_id: str | None,
220226
stream: Literal[False],
221227
prompt: ResponsePromptParam | None = None,
222228
) -> Response: ...
@@ -229,7 +235,8 @@ async def _fetch_response(
229235
tools: list[Tool],
230236
output_schema: AgentOutputSchemaBase | None,
231237
handoffs: list[Handoff],
232-
previous_response_id: str | None,
238+
previous_response_id: str | None = None,
239+
conversation_id: str | None = None,
233240
stream: Literal[True] | Literal[False] = False,
234241
prompt: ResponsePromptParam | None = None,
235242
) -> Response | AsyncStream[ResponseStreamEvent]:
@@ -265,6 +272,7 @@ async def _fetch_response(
265272
f"Tool choice: {tool_choice}\n"
266273
f"Response format: {response_format}\n"
267274
f"Previous response id: {previous_response_id}\n"
275+
f"Conversation id: {conversation_id}\n"
268276
)
269277

270278
extra_args = dict(model_settings.extra_args or {})
@@ -278,6 +286,7 @@ async def _fetch_response(
278286

279287
return await self._client.responses.create(
280288
previous_response_id=self._non_null_or_not_given(previous_response_id),
289+
conversation=self._non_null_or_not_given(conversation_id),
281290
instructions=self._non_null_or_not_given(system_instructions),
282291
model=self.model,
283292
input=list_input,

src/agents/run.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,9 @@ class RunOptions(TypedDict, Generic[TContext]):
208208
previous_response_id: NotRequired[str | None]
209209
"""The ID of the previous response, if any."""
210210

211+
conversation_id: NotRequired[str | None]
212+
"""The ID of the stored conversation, if any."""
213+
211214
session: NotRequired[Session | None]
212215
"""The session for the run."""
213216

@@ -224,6 +227,7 @@ async def run(
224227
hooks: RunHooks[TContext] | None = None,
225228
run_config: RunConfig | None = None,
226229
previous_response_id: str | None = None,
230+
conversation_id: str | None = None,
227231
session: Session | None = None,
228232
) -> RunResult:
229233
"""Run a workflow starting at the given agent. The agent will run in a loop until a final
@@ -248,6 +252,7 @@ async def run(
248252
run_config: Global settings for the entire agent run.
249253
previous_response_id: The ID of the previous response, if using OpenAI models via the
250254
Responses API, this allows you to skip passing in input from the previous turn.
255+
conversation_id: The ID of the stored conversation, if any.
251256
Returns:
252257
A run result containing all the inputs, guardrail results and the output of the last
253258
agent. Agents may perform handoffs, so we don't know the specific type of the output.
@@ -261,6 +266,7 @@ async def run(
261266
hooks=hooks,
262267
run_config=run_config,
263268
previous_response_id=previous_response_id,
269+
conversation_id=conversation_id,
264270
session=session,
265271
)
266272

@@ -275,6 +281,7 @@ def run_sync(
275281
hooks: RunHooks[TContext] | None = None,
276282
run_config: RunConfig | None = None,
277283
previous_response_id: str | None = None,
284+
conversation_id: str | None = None,
278285
session: Session | None = None,
279286
) -> RunResult:
280287
"""Run a workflow synchronously, starting at the given agent. Note that this just wraps the
@@ -302,6 +309,7 @@ def run_sync(
302309
run_config: Global settings for the entire agent run.
303310
previous_response_id: The ID of the previous response, if using OpenAI models via the
304311
Responses API, this allows you to skip passing in input from the previous turn.
312+
conversation_id: The ID of the stored conversation, if any.
305313
Returns:
306314
A run result containing all the inputs, guardrail results and the output of the last
307315
agent. Agents may perform handoffs, so we don't know the specific type of the output.
@@ -315,6 +323,7 @@ def run_sync(
315323
hooks=hooks,
316324
run_config=run_config,
317325
previous_response_id=previous_response_id,
326+
conversation_id=conversation_id,
318327
session=session,
319328
)
320329

@@ -328,6 +337,7 @@ def run_streamed(
328337
hooks: RunHooks[TContext] | None = None,
329338
run_config: RunConfig | None = None,
330339
previous_response_id: str | None = None,
340+
conversation_id: str | None = None,
331341
session: Session | None = None,
332342
) -> RunResultStreaming:
333343
"""Run a workflow starting at the given agent in streaming mode. The returned result object
@@ -353,6 +363,7 @@ def run_streamed(
353363
run_config: Global settings for the entire agent run.
354364
previous_response_id: The ID of the previous response, if using OpenAI models via the
355365
Responses API, this allows you to skip passing in input from the previous turn.
366+
conversation_id: The ID of the stored conversation, if any.
356367
Returns:
357368
A result object that contains data about the run, as well as a method to stream events.
358369
"""
@@ -365,6 +376,7 @@ def run_streamed(
365376
hooks=hooks,
366377
run_config=run_config,
367378
previous_response_id=previous_response_id,
379+
conversation_id=conversation_id,
368380
session=session,
369381
)
370382

@@ -386,6 +398,7 @@ async def run(
386398
hooks = kwargs.get("hooks")
387399
run_config = kwargs.get("run_config")
388400
previous_response_id = kwargs.get("previous_response_id")
401+
conversation_id = kwargs.get("conversation_id")
389402
session = kwargs.get("session")
390403
if hooks is None:
391404
hooks = RunHooks[Any]()
@@ -478,6 +491,7 @@ async def run(
478491
should_run_agent_start_hooks=should_run_agent_start_hooks,
479492
tool_use_tracker=tool_use_tracker,
480493
previous_response_id=previous_response_id,
494+
conversation_id=conversation_id,
481495
),
482496
)
483497
else:
@@ -492,6 +506,7 @@ async def run(
492506
should_run_agent_start_hooks=should_run_agent_start_hooks,
493507
tool_use_tracker=tool_use_tracker,
494508
previous_response_id=previous_response_id,
509+
conversation_id=conversation_id,
495510
)
496511
should_run_agent_start_hooks = False
497512

@@ -558,6 +573,7 @@ def run_sync(
558573
hooks = kwargs.get("hooks")
559574
run_config = kwargs.get("run_config")
560575
previous_response_id = kwargs.get("previous_response_id")
576+
conversation_id = kwargs.get("conversation_id")
561577
session = kwargs.get("session")
562578

563579
return asyncio.get_event_loop().run_until_complete(
@@ -570,6 +586,7 @@ def run_sync(
570586
hooks=hooks,
571587
run_config=run_config,
572588
previous_response_id=previous_response_id,
589+
conversation_id=conversation_id,
573590
)
574591
)
575592

@@ -584,6 +601,7 @@ def run_streamed(
584601
hooks = kwargs.get("hooks")
585602
run_config = kwargs.get("run_config")
586603
previous_response_id = kwargs.get("previous_response_id")
604+
conversation_id = kwargs.get("conversation_id")
587605
session = kwargs.get("session")
588606

589607
if hooks is None:
@@ -638,6 +656,7 @@ def run_streamed(
638656
context_wrapper=context_wrapper,
639657
run_config=run_config,
640658
previous_response_id=previous_response_id,
659+
conversation_id=conversation_id,
641660
session=session,
642661
)
643662
)
@@ -738,6 +757,7 @@ async def _start_streaming(
738757
context_wrapper: RunContextWrapper[TContext],
739758
run_config: RunConfig,
740759
previous_response_id: str | None,
760+
conversation_id: str | None,
741761
session: Session | None,
742762
):
743763
if streamed_result.trace:
@@ -821,6 +841,7 @@ async def _start_streaming(
821841
tool_use_tracker,
822842
all_tools,
823843
previous_response_id,
844+
conversation_id,
824845
)
825846
should_run_agent_start_hooks = False
826847

@@ -923,6 +944,7 @@ async def _run_single_turn_streamed(
923944
tool_use_tracker: AgentToolUseTracker,
924945
all_tools: list[Tool],
925946
previous_response_id: str | None,
947+
conversation_id: str | None,
926948
) -> SingleStepResult:
927949
emitted_tool_call_ids: set[str] = set()
928950

@@ -983,6 +1005,7 @@ async def _run_single_turn_streamed(
9831005
run_config.tracing_disabled, run_config.trace_include_sensitive_data
9841006
),
9851007
previous_response_id=previous_response_id,
1008+
conversation_id=conversation_id,
9861009
prompt=prompt_config,
9871010
):
9881011
if isinstance(event, ResponseCompletedEvent):
@@ -1091,6 +1114,7 @@ async def _run_single_turn(
10911114
should_run_agent_start_hooks: bool,
10921115
tool_use_tracker: AgentToolUseTracker,
10931116
previous_response_id: str | None,
1117+
conversation_id: str | None,
10941118
) -> SingleStepResult:
10951119
# Ensure we run the hooks before anything else
10961120
if should_run_agent_start_hooks:
@@ -1124,6 +1148,7 @@ async def _run_single_turn(
11241148
run_config,
11251149
tool_use_tracker,
11261150
previous_response_id,
1151+
conversation_id,
11271152
prompt_config,
11281153
)
11291154

@@ -1318,6 +1343,7 @@ async def _get_new_response(
13181343
run_config: RunConfig,
13191344
tool_use_tracker: AgentToolUseTracker,
13201345
previous_response_id: str | None,
1346+
conversation_id: str | None,
13211347
prompt_config: ResponsePromptParam | None,
13221348
) -> ModelResponse:
13231349
# Allow user to modify model input right before the call, if configured
@@ -1352,6 +1378,7 @@ async def _get_new_response(
13521378
run_config.tracing_disabled, run_config.trace_include_sensitive_data
13531379
),
13541380
previous_response_id=previous_response_id,
1381+
conversation_id=conversation_id,
13551382
prompt=prompt_config,
13561383
)
13571384
# If the agent has hooks, we need to call them after the LLM call

tests/fake_model.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ async def get_response(
6161
tracing: ModelTracing,
6262
*,
6363
previous_response_id: str | None,
64+
conversation_id: str | None,
6465
prompt: Any | None,
6566
) -> ModelResponse:
6667
self.last_turn_args = {
@@ -70,6 +71,7 @@ async def get_response(
7071
"tools": tools,
7172
"output_schema": output_schema,
7273
"previous_response_id": previous_response_id,
74+
"conversation_id": conversation_id,
7375
}
7476

7577
with generation_span(disabled=not self.tracing_enabled) as span:
@@ -103,8 +105,9 @@ async def stream_response(
103105
handoffs: list[Handoff],
104106
tracing: ModelTracing,
105107
*,
106-
previous_response_id: str | None,
107-
prompt: Any | None,
108+
previous_response_id: str | None = None,
109+
conversation_id: str | None = None,
110+
prompt: Any | None = None,
108111
) -> AsyncIterator[TResponseStreamEvent]:
109112
self.last_turn_args = {
110113
"system_instructions": system_instructions,
@@ -113,6 +116,7 @@ async def stream_response(
113116
"tools": tools,
114117
"output_schema": output_schema,
115118
"previous_response_id": previous_response_id,
119+
"conversation_id": conversation_id,
116120
}
117121
with generation_span(disabled=not self.tracing_enabled) as span:
118122
output = self.get_next_output()

tests/models/test_kwargs_functionality.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ async def fake_acompletion(model, messages=None, **kwargs):
4747
handoffs=[],
4848
tracing=ModelTracing.DISABLED,
4949
previous_response_id=None,
50+
conversation_id=None,
5051
)
5152

5253
# Verify that all kwargs were passed through

0 commit comments

Comments
 (0)