Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -858,6 +858,7 @@ def _parse_chunk_from_openai(
metadata: dict[str, Any] = {}
contents: list[Contents] = []
conversation_id: str | None = None
response_id: str | None = None
model = self.model_id
# TODO(peterychang): Add support for other content types
match event.type:
Expand Down Expand Up @@ -940,7 +941,14 @@ def _parse_chunk_from_openai(
case "response.reasoning_summary_text.done":
contents.append(TextReasoningContent(text=event.text, raw_representation=event))
metadata.update(self._get_metadata_from_response(event))
case "response.created":
response_id = event.response.id
conversation_id = self._get_conversation_id(event.response, chat_options.store)
case "response.in_progress":
response_id = event.response.id
conversation_id = self._get_conversation_id(event.response, chat_options.store)
case "response.completed":
response_id = event.response.id
conversation_id = self._get_conversation_id(event.response, chat_options.store)
model = event.response.model
if event.response.usage:
Expand Down Expand Up @@ -1106,6 +1114,7 @@ def _get_ann_value(key: str) -> Any:
return ChatResponseUpdate(
contents=contents,
conversation_id=conversation_id,
response_id=response_id,
role=Role.ASSISTANT,
model_id=model,
additional_properties=metadata,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -374,11 +374,37 @@ async def test_response_format_parse_path() -> None:
response = await client.get_response(
messages=[ChatMessage(role="user", text="Test message")], response_format=OutputStruct, store=True
)

assert response.response_id == "parsed_response_123"
assert response.conversation_id == "parsed_response_123"
assert response.model_id == "test-model"


async def test_response_format_parse_path_with_conversation_id() -> None:
"""Test get_response response_format parsing path with set conversation ID."""
client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")

# Mock successful parse response
mock_parsed_response = MagicMock()
mock_parsed_response.id = "parsed_response_123"
mock_parsed_response.text = "Parsed response"
mock_parsed_response.model = "test-model"
mock_parsed_response.created_at = 1000000000
mock_parsed_response.metadata = {}
mock_parsed_response.output_parsed = None
mock_parsed_response.usage = None
mock_parsed_response.finish_reason = None
mock_parsed_response.conversation = MagicMock()
mock_parsed_response.conversation.id = "conversation_456"

with patch.object(client.client.responses, "parse", return_value=mock_parsed_response):
response = await client.get_response(
messages=[ChatMessage(role="user", text="Test message")], response_format=OutputStruct, store=True
)
assert response.response_id == "parsed_response_123"
assert response.conversation_id == "conversation_456"
assert response.model_id == "test-model"


async def test_bad_request_error_non_content_filter() -> None:
"""Test get_response BadRequestError without content_filter."""
client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")
Expand Down Expand Up @@ -994,6 +1020,44 @@ def test_streaming_response_basic_structure() -> None:
assert response.raw_representation is mock_event


def test_streaming_response_created_type() -> None:
"""Test streaming response with created type"""
client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")
chat_options = ChatOptions()
function_call_ids: dict[int, tuple[str, str]] = {}

mock_event = MagicMock()
mock_event.type = "response.created"
mock_event.response = MagicMock()
mock_event.response.id = "resp_1234"
mock_event.response.conversation = MagicMock()
mock_event.response.conversation.id = "conv_5678"

response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids)

assert response.response_id == "resp_1234"
assert response.conversation_id == "conv_5678"


def test_streaming_response_in_progress_type() -> None:
"""Test streaming response with in_progress type"""
client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")
chat_options = ChatOptions()
function_call_ids: dict[int, tuple[str, str]] = {}

mock_event = MagicMock()
mock_event.type = "response.in_progress"
mock_event.response = MagicMock()
mock_event.response.id = "resp_1234"
mock_event.response.conversation = MagicMock()
mock_event.response.conversation.id = "conv_5678"

response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids)

assert response.response_id == "resp_1234"
assert response.conversation_id == "conv_5678"


def test_streaming_annotation_added_with_file_path() -> None:
"""Test streaming annotation added event with file_path type extracts HostedFileContent."""
client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")
Expand Down
Loading