77
88from openai import NOT_GIVEN , AsyncOpenAI , AsyncStream
99from openai .types import ChatModel
10- from openai .types .chat import ChatCompletion , ChatCompletionChunk
10+ from openai .types .chat import ChatCompletion , ChatCompletionChunk , ChatCompletionMessage
11+ from openai .types .chat .chat_completion import Choice
1112from openai .types .responses import Response
1213from openai .types .responses .response_prompt_param import ResponsePromptParam
1314from openai .types .responses .response_usage import InputTokensDetails , OutputTokensDetails
@@ -74,8 +75,11 @@ async def get_response(
7475 prompt = prompt ,
7576 )
7677
77- first_choice = response .choices [0 ]
78- message = first_choice .message
78+ message : ChatCompletionMessage | None = None
79+ first_choice : Choice | None = None
80+ if response .choices and len (response .choices ) > 0 :
81+ first_choice = response .choices [0 ]
82+ message = first_choice .message
7983
8084 if _debug .DONT_LOG_MODEL_DATA :
8185 logger .debug ("Received model response" )
@@ -86,10 +90,8 @@ async def get_response(
8690 json .dumps (message .model_dump (), indent = 2 ),
8791 )
8892 else :
89- logger .debug (
90- "LLM resp had no message. finish_reason: %s" ,
91- first_choice .finish_reason ,
92- )
93+ finish_reason = first_choice .finish_reason if first_choice else "-"
94+ logger .debug (f"LLM resp had no message. finish_reason: { finish_reason } " )
9395
9496 usage = (
9597 Usage (
0 commit comments