Skip to content

langchain: catch if there are only system messages in a prompt for anthropic #30822

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 56 additions & 35 deletions libs/partners/anthropic/langchain_anthropic/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,6 @@ def _format_messages(
"""
system: Union[str, list[dict], None] = None
formatted_messages: list[dict] = []

merged_messages = _merge_messages(messages)
for i, message in enumerate(merged_messages):
if message.type == "system":
Expand Down Expand Up @@ -409,6 +408,16 @@ def _format_messages(
return system, formatted_messages


def _handle_anthropic_bad_request(e: anthropic.BadRequestError) -> None:
"""Handle Anthropic BadRequestError."""
if ("messages: at least one message is required") in e.message:
message = "Received only system message(s). "
warnings.warn(message)
raise ValueError(message)
else:
raise


class ChatAnthropic(BaseChatModel):
"""Anthropic chat models.

Expand Down Expand Up @@ -1093,23 +1102,26 @@ def _stream(
stream_usage = self.stream_usage
kwargs["stream"] = True
payload = self._get_request_payload(messages, stop=stop, **kwargs)
stream = self._client.messages.create(**payload)
coerce_content_to_string = (
not _tools_in_params(payload)
and not _documents_in_params(payload)
and not _thinking_in_params(payload)
)
for event in stream:
msg = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
try:
stream = self._client.messages.create(**payload)
coerce_content_to_string = (
not _tools_in_params(payload)
and not _documents_in_params(payload)
and not _thinking_in_params(payload)
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
for event in stream:
msg = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)

async def _astream(
self,
Expand All @@ -1124,23 +1136,26 @@ async def _astream(
stream_usage = self.stream_usage
kwargs["stream"] = True
payload = self._get_request_payload(messages, stop=stop, **kwargs)
stream = await self._async_client.messages.create(**payload)
coerce_content_to_string = (
not _tools_in_params(payload)
and not _documents_in_params(payload)
and not _thinking_in_params(payload)
)
async for event in stream:
msg = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
try:
stream = await self._async_client.messages.create(**payload)
coerce_content_to_string = (
not _tools_in_params(payload)
and not _documents_in_params(payload)
and not _thinking_in_params(payload)
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
await run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
async for event in stream:
msg = _make_message_chunk_from_anthropic_event(
event,
stream_usage=stream_usage,
coerce_content_to_string=coerce_content_to_string,
)
if msg is not None:
chunk = ChatGenerationChunk(message=msg)
if run_manager and isinstance(msg.content, str):
await run_manager.on_llm_new_token(msg.content, chunk=chunk)
yield chunk
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)

def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
data_dict = data.model_dump()
Expand Down Expand Up @@ -1200,7 +1215,10 @@ def _generate(
)
return generate_from_stream(stream_iter)
payload = self._get_request_payload(messages, stop=stop, **kwargs)
data = self._client.messages.create(**payload)
try:
data = self._client.messages.create(**payload)
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)
return self._format_output(data, **kwargs)

async def _agenerate(
Expand All @@ -1216,7 +1234,10 @@ async def _agenerate(
)
return await agenerate_from_stream(stream_iter)
payload = self._get_request_payload(messages, stop=stop, **kwargs)
data = await self._async_client.messages.create(**payload)
try:
data = await self._async_client.messages.create(**payload)
except anthropic.BadRequestError as e:
_handle_anthropic_bad_request(e)
return self._format_output(data, **kwargs)

def _get_llm_for_structured_output_when_thinking_is_enabled(
Expand Down
Loading