Skip to content

Commit a4d59a9

Browse files
committed
Fix openai#1559: Handle empty choices array in LiteLLM model
Add defensive checks before accessing response.choices[0] to prevent IndexError when Gemini or other providers return an empty choices array. This follows the same pattern as PR openai#935 which fixed the identical issue in openai_chatcompletions.py. Changes: - Add null checks for response.choices before array access - Return empty output when choices array is empty - Preserve usage information even when choices is empty - Add appropriate type annotations for litellm types
1 parent 8c4d4d0 commit a4d59a9

File tree

1 file changed

+24
-13
lines changed

1 file changed

+24
-13
lines changed

src/agents/extensions/models/litellm_model.py

Lines changed: 24 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -110,18 +110,27 @@ async def get_response(
110110
prompt=prompt,
111111
)
112112

113-
assert isinstance(response.choices[0], litellm.types.utils.Choices)
113+
message: litellm.types.utils.Message | None = None
114+
first_choice: litellm.types.utils.Choices | litellm.types.utils.StreamingChoices | None = (
115+
None
116+
)
117+
if response.choices and len(response.choices) > 0:
118+
first_choice = response.choices[0]
119+
assert isinstance(first_choice, litellm.types.utils.Choices)
120+
message = first_choice.message
114121

115122
if _debug.DONT_LOG_MODEL_DATA:
116123
logger.debug("Received model response")
117124
else:
118-
logger.debug(
119-
f"""LLM resp:\n{
120-
json.dumps(
121-
response.choices[0].message.model_dump(), indent=2, ensure_ascii=False
122-
)
123-
}\n"""
124-
)
125+
if message is not None:
126+
logger.debug(
127+
f"""LLM resp:\n{
128+
json.dumps(message.model_dump(), indent=2, ensure_ascii=False)
129+
}\n"""
130+
)
131+
else:
132+
finish_reason = first_choice.finish_reason if first_choice else "-"
133+
logger.debug(f"LLM resp had no message. finish_reason: {finish_reason}")
125134

126135
if hasattr(response, "usage"):
127136
response_usage = response.usage
@@ -151,16 +160,18 @@ async def get_response(
151160
usage = Usage()
152161
logger.warning("No usage information returned from Litellm")
153162

154-
if tracing.include_data():
155-
span_generation.span_data.output = [response.choices[0].message.model_dump()]
163+
if tracing.include_data() and message is not None:
164+
span_generation.span_data.output = [message.model_dump()]
156165
span_generation.span_data.usage = {
157166
"input_tokens": usage.input_tokens,
158167
"output_tokens": usage.output_tokens,
159168
}
160169

161-
items = Converter.message_to_output_items(
162-
LitellmConverter.convert_message_to_openai(response.choices[0].message)
163-
)
170+
items = []
171+
if message is not None:
172+
items = Converter.message_to_output_items(
173+
LitellmConverter.convert_message_to_openai(message)
174+
)
164175

165176
return ModelResponse(
166177
output=items,

0 commit comments

Comments
 (0)