Skip to content

anthropic: emit informative error message if there are only system messages in a prompt #30822

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
May 16, 2025
91 changes: 56 additions & 35 deletions libs/partners/anthropic/langchain_anthropic/chat_models.py
Original file line number Diff line number Diff line change
@@ -282,7 +282,6 @@ def _format_messages(
"""
system: Union[str, list[dict], None] = None
formatted_messages: list[dict] = []

merged_messages = _merge_messages(messages)
for i, message in enumerate(merged_messages):
if message.type == "system":
@@ -410,6 +409,16 @@ def _format_messages(
return system, formatted_messages


def _handle_anthropic_bad_request(e: anthropic.BadRequestError) -> None:
"""Handle Anthropic BadRequestError."""
if ("messages: at least one message is required") in e.message:
message = "Received only system message(s). "
warnings.warn(message)
raise e
else:
raise


class ChatAnthropic(BaseChatModel):
"""Anthropic chat models.
@@ -1111,23 +1120,26 @@ def _stream(
stream_usage = self.stream_usage
kwargs["stream"] = True
payload = self._get_request_payload(messages, stop=stop, **kwargs)
stream = self._client.messages.create(**payload)
coerce_content_to_string = (
not _tools_in_params(payload)
and not _documents_in_params(payload)
and not _thinking_in_params(payload)
)
for event in stream:
msg = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
try:
stream = self._client.messages.create(**payload)
coerce_content_to_string = (
not _tools_in_params(payload)
and not _documents_in_params(payload)
and not _thinking_in_params(payload)
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
for event in stream:
msg = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)

async def _astream(
self,
@@ -1142,23 +1154,26 @@ async def _astream(
stream_usage = self.stream_usage
kwargs["stream"] = True
payload = self._get_request_payload(messages, stop=stop, **kwargs)
stream = await self._async_client.messages.create(**payload)
coerce_content_to_string = (
not _tools_in_params(payload)
and not _documents_in_params(payload)
and not _thinking_in_params(payload)
)
async for event in stream:
msg = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
try:
stream = await self._async_client.messages.create(**payload)
coerce_content_to_string = (
not _tools_in_params(payload)
and not _documents_in_params(payload)
and not _thinking_in_params(payload)
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
await run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
async for event in stream:
msg = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
await run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)

def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
data_dict = data.model_dump()
@@ -1218,7 +1233,10 @@ def _generate(
)
return generate_from_stream(stream_iter)
payload = self._get_request_payload(messages, stop=stop, **kwargs)
data = self._client.messages.create(**payload)
try:
data = self._client.messages.create(**payload)
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)
return self._format_output(data, **kwargs)

async def _agenerate(
@@ -1234,7 +1252,10 @@ async def _agenerate(
)
return await agenerate_from_stream(stream_iter)
payload = self._get_request_payload(messages, stop=stop, **kwargs)
data = await self._async_client.messages.create(**payload)
try:
data = await self._async_client.messages.create(**payload)
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)
return self._format_output(data, **kwargs)

def _get_llm_for_structured_output_when_thinking_is_enabled(