Skip to content
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 68 additions & 0 deletions src/agents/models/openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import json
from collections.abc import AsyncIterator
import secrets
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Literal, cast, overload

Expand Down Expand Up @@ -241,6 +242,73 @@ async def _fetch_response(
) -> Response | AsyncStream[ResponseStreamEvent]:
list_input = ItemHelpers.input_to_new_input_list(input)

# --- Defensive normalization for reasoning items
# Server requires: every reasoning item must be immediately followed by an assistant
# message. We preserve all reasoning items (needed for references) and, when the next
# item is NOT a message, we synthesize a minimal placeholder assistant message.
# This prevents 400 errors like:
# "Item '<id>' of type 'reasoning' was provided without its required following item."
# and also preserves required reasoning when a subsequent function_call references it.
def _ensure_reasoning_followed(seq: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""Ensure each reasoning item is immediately followed by an allowed follower.

Allowed followers (no placeholder inserted):
- message (assistant response text)
- function_call (the model decided to call a tool directly)
- code_interpreter_call (direct code interpreter invocation)

We only synthesize a placeholder assistant message when the next item is
missing or is NOT one of the allowed follower types. This preserves the
original adjacency requirements enforced by the Responses API.
"""
existing_ids = {d.get("id") for d in seq if isinstance(d, dict)}
allowed_followers = {"message", "function_call", "code_interpreter_call"}
out: list[dict[str, Any]] = []
for idx, item in enumerate(seq):
out.append(item)
if not isinstance(item, dict) or item.get("type") != "reasoning":
continue
nxt = seq[idx + 1] if idx + 1 < len(seq) else None
if isinstance(nxt, dict) and nxt.get("type") in allowed_followers:
continue # already satisfied by allowed follower
# Insert placeholder assistant message (safe follower)
placeholder_id = None
for _ in range(5):
cand = f"msg_{secrets.token_hex(24)}"
if cand not in existing_ids:
placeholder_id = cand
existing_ids.add(cand)
break
if not placeholder_id:
placeholder_id = "msg_placeholder"
Comment on lines +276 to +283
Copy link
Preview

Copilot AI Aug 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The magic number 5 for retry attempts should be defined as a named constant (e.g., MAX_ID_GENERATION_ATTEMPTS = 5) to improve code readability and maintainability.

Copilot uses AI. Check for mistakes.

out.append(
{
"id": placeholder_id,
"type": "message",
"role": "assistant",
"status": "completed",
"content": [
{
"type": "output_text",
"text": "(placeholder – reasoning context)",
"annotations": [],
}
],
}
)
return out

try:
# list_input is List[TResponseInputItem]; we only mutate dict entries.
list_input = [dict(x) if isinstance(x, dict) else x for x in list_input] # shallow copy
# Only fix if there exists at least one reasoning item.
if any(isinstance(x, dict) and x.get("type") == "reasoning" for x in list_input):
dict_seq = [x for x in list_input if isinstance(x, dict)]
fixed = _ensure_reasoning_followed(dict_seq)
list_input = fixed # type: ignore[assignment]
Comment on lines +306 to +308
Copy link
Preview

Copilot AI Aug 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This logic filters out non-dict items from list_input but then replaces the entire list with only the dict items, potentially losing non-dict entries that should be preserved in the original sequence.

Suggested change
dict_seq = [x for x in list_input if isinstance(x, dict)]
fixed = _ensure_reasoning_followed(dict_seq)
list_input = fixed # type: ignore[assignment]
list_input = _ensure_reasoning_followed(list_input) # type: ignore[assignment]

Copilot uses AI. Check for mistakes.

except Exception as _norm_exc: # fail-open
logger.debug(f"Reasoning normalization skipped due to error: {_norm_exc}")

parallel_tool_calls = (
True
if model_settings.parallel_tool_calls and tools and len(tools) > 0
Expand Down