From 8f9a58dd596ab70286e428edc3430bf99f8fc3a8 Mon Sep 17 00:00:00 2001 From: Ankit raj <113342181+ankit-v2-3@users.noreply.github.com> Date: Fri, 25 Oct 2024 19:27:27 +0530 Subject: [PATCH 1/5] feat: add claude --- backend/requirements.txt | 1 + backend/spielberg/agents/prompt_clip.py | 2 +- backend/spielberg/constants.py | 2 + backend/spielberg/llm/anthropic.py | 195 ++++++++++++++++++++++++ 4 files changed, 199 insertions(+), 1 deletion(-) create mode 100644 backend/spielberg/llm/anthropic.py diff --git a/backend/requirements.txt b/backend/requirements.txt index 2c64610..b13a96f 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -1,4 +1,5 @@ -e . +anthropic==0.37.1 Flask==3.0.3 Flask-SocketIO==5.3.6 Flask-Cors==4.0.1 diff --git a/backend/spielberg/agents/prompt_clip.py b/backend/spielberg/agents/prompt_clip.py index 4a6083c..154d932 100644 --- a/backend/spielberg/agents/prompt_clip.py +++ b/backend/spielberg/agents/prompt_clip.py @@ -100,7 +100,7 @@ def _text_prompter(self, transcript_text, prompt): self.llm.chat_completions, [ ContextMessage( - content=prompt, role=RoleTypes.system + content=prompt, role=RoleTypes.user ).to_llm_msg() ], response_format={"type": "json_object"}, diff --git a/backend/spielberg/constants.py b/backend/spielberg/constants.py index c9b50ad..a3fb57b 100644 --- a/backend/spielberg/constants.py +++ b/backend/spielberg/constants.py @@ -18,9 +18,11 @@ class LLMType(str, Enum): """Enum for LLM types""" OPENAI = "openai" + ANTHROPIC = "anthropic" class EnvPrefix(str, Enum): """Enum for environment prefixes""" OPENAI_ = "OPENAI_" + ANTHROPIC_ = "ANTHROPIC_" diff --git a/backend/spielberg/llm/anthropic.py b/backend/spielberg/llm/anthropic.py new file mode 100644 index 0000000..f3829df --- /dev/null +++ b/backend/spielberg/llm/anthropic.py @@ -0,0 +1,195 @@ +from enum import Enum + +from pydantic import Field, field_validator, FieldValidationInfo +from pydantic_settings import SettingsConfigDict + +from spielberg.core.session import RoleTypes +from spielberg.llm.base import BaseLLM, BaseLLMConfig, LLMResponse, LLMResponseStatus +from spielberg.constants import ( + LLMType, + EnvPrefix, +) + + +class AnthropicChatModel(str, Enum): + """Enum for Anthropic Chat models""" + + CLAUDE_3_HAIKU = "claude-3-haiku-20240307" + CLAUDE_3_OPUS = "claude-3-opus-20240229" + CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620" + + +class AnthropicTextModel(str, Enum): + """Enum for Anthropic Text models""" + + CLAUDE_3_HAIKU = "claude-3-haiku-20240307" + CLAUDE_3_OPUS = "claude-3-opus-20240229" + CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620" + + +class AnthropicAIConfig(BaseLLMConfig): + model_config = SettingsConfigDict( + env_prefix=EnvPrefix.ANTHROPIC_, + extra="ignore", + ) + + llm_type: str = LLMType.ANTHROPIC + api_key: str = "" + api_base: str = "" + chat_model: str = Field(default=AnthropicChatModel.CLAUDE_3_5_SONNET) + text_model: str = Field(default=AnthropicTextModel.CLAUDE_3_5_SONNET) + enable_langfuse: bool = False + + @field_validator("api_key") + @classmethod + def validate_non_empty(cls, v, info: FieldValidationInfo): + if not v: + raise ValueError( + f"{info.field_name} must not be empty. please set {EnvPrefix.OPENAI_.value}{info.field_name.upper()} environment variable." + ) + return v + + +class AnthropicAI(BaseLLM): + def __init__(self, config: AnthropicAIConfig = None): + """ + :param config: AnthropicAI Config + """ + if config is None: + config = AnthropicAIConfig() + super().__init__(config=config) + try: + import anthropic + except ImportError: + raise ImportError("Please install Anthropic python library.") + + self.client = anthropic.Anthropic(api_key=self.api_key) + + def _format_messages(self, messages: list): + system = "" + formatted_messages = [] + if messages[0]["role"] == RoleTypes.system: + system = messages[0]["content"] + messages = messages[1:] + + for message in messages: + if message["role"] == RoleTypes.assistant and message.get("tool_calls"): + tool = message["tool_calls"][0]["tool"] + formatted_messages.append( + { + "role": message["role"], + "content": [ + { + "type": "text", + "text": message["content"], + }, + { + "id": message["tool_calls"][0]["id"], + "type": message["tool_calls"][0]["type"], + "name": tool["name"], + "input": tool["arguments"], + }, + ], + } + ) + + elif message["role"] == RoleTypes.tool: + formatted_messages.append( + { + "role": RoleTypes.user, + "content": [ + { + "type": "tool_result", + "tool_use_id": message["tool_call_id"], + "content": message["content"], + } + ], + } + ) + else: + formatted_messages.append(message) + + return system, formatted_messages + + def _format_tools(self, tools: list): + """Format the tools to the format that Anthropic expects. + + **Example**:: + + [ + { + "name": "get_weather", + "description": "Get the current weather in a given location", + "input_schema": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + } + }, + "required": ["location"], + }, + } + ] + """ + formatted_tools = [] + for tool in tools: + formatted_tools.append( + { + "name": tool["name"], + "description": tool["description"], + "input_schema": tool["parameters"], + } + ) + return formatted_tools + + def chat_completions( + self, messages: list, tools: list = [], stop=None, response_format=None + ): + """Get completions for chat. + + docs: https://docs.anthropic.com/en/docs/build-with-claude/tool-use + """ + system, messages = self._format_messages(messages) + params = { + "model": self.chat_model, + "messages": messages, + "system": system, + "max_tokens": self.max_tokens, + } + if tools: + params["tools"] = self._format_tools(tools) + + try: + response = self.client.messages.create(**params) + except Exception as e: + raise e + return LLMResponse(content=f"Error: {e}") + + return LLMResponse( + content=response.content[0].text, + tool_calls=[ + { + "id": response.content[1].id, + "tool": { + "name": response.content[1].name, + "arguments": response.content[1].input, + }, + "type": response.content[1].type, + } + ] + if next( + (block for block in response.content if block.type == "tool_use"), None + ) + is not None + else [], + finish_reason=response.stop_reason, + send_tokens=response.usage.input_tokens, + recv_tokens=response.usage.output_tokens, + total_tokens=(response.usage.input_tokens + response.usage.output_tokens), + status=LLMResponseStatus.SUCCESS, + ) + + def text_completions(self): + raise NotImplementedError("Not implemented yet") From fff4cfc0f88047009f34682f1b3899cfc8715365 Mon Sep 17 00:00:00 2001 From: Ankit raj <113342181+ankit-v2-3@users.noreply.github.com> Date: Fri, 25 Oct 2024 19:35:56 +0530 Subject: [PATCH 2/5] fix: remove langfuse --- backend/spielberg/llm/openai.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/backend/spielberg/llm/openai.py b/backend/spielberg/llm/openai.py index 355f18a..597b463 100644 --- a/backend/spielberg/llm/openai.py +++ b/backend/spielberg/llm/openai.py @@ -46,7 +46,6 @@ class OpenaiConfig(BaseLLMConfig): chat_model: str = Field(default=OpenAIChatModel.GPT4o) text_model: str = Field(default=OpenAITextModel.GPT4_TURBO) max_tokens: int = 4096 - enable_langfuse: bool = False @field_validator("api_key") @classmethod @@ -66,18 +65,10 @@ def __init__(self, config: OpenaiConfig = None): if config is None: config = OpenaiConfig() super().__init__(config=config) - - if self.enable_langfuse: - try: - from langfuse.openai import openai - except ImportError: - raise ImportError("Please install Langfuse and OpenAI python library.") - self.init_langfuse() - else: - try: - import openai - except ImportError: - raise ImportError("Please install OpenAI python library.") + try: + import openai + except ImportError: + raise ImportError("Please install OpenAI python library.") self.client = openai.OpenAI(api_key=self.api_key, base_url=self.api_base) From ce2aa2532b2a08205b7783d374afc00808727ad0 Mon Sep 17 00:00:00 2001 From: Ankit raj <113342181+ankit-v2-3@users.noreply.github.com> Date: Mon, 28 Oct 2024 19:32:54 +0530 Subject: [PATCH 3/5] build: remove text_model --- backend/spielberg/llm/anthropic.py | 17 ++++------------- backend/spielberg/llm/base.py | 8 -------- backend/spielberg/llm/openai.py | 15 --------------- 3 files changed, 4 insertions(+), 36 deletions(-) diff --git a/backend/spielberg/llm/anthropic.py b/backend/spielberg/llm/anthropic.py index f3829df..830f8b1 100644 --- a/backend/spielberg/llm/anthropic.py +++ b/backend/spielberg/llm/anthropic.py @@ -17,17 +17,12 @@ class AnthropicChatModel(str, Enum): CLAUDE_3_HAIKU = "claude-3-haiku-20240307" CLAUDE_3_OPUS = "claude-3-opus-20240229" CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620" - - -class AnthropicTextModel(str, Enum): - """Enum for Anthropic Text models""" - - CLAUDE_3_HAIKU = "claude-3-haiku-20240307" - CLAUDE_3_OPUS = "claude-3-opus-20240229" - CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620" + CLAUDE_3_5_SONNET_LATEST = "claude-3-5-sonnet-20241022" class AnthropicAIConfig(BaseLLMConfig): + """AnthropicAI Config""" + model_config = SettingsConfigDict( env_prefix=EnvPrefix.ANTHROPIC_, extra="ignore", @@ -37,7 +32,6 @@ class AnthropicAIConfig(BaseLLMConfig): api_key: str = "" api_base: str = "" chat_model: str = Field(default=AnthropicChatModel.CLAUDE_3_5_SONNET) - text_model: str = Field(default=AnthropicTextModel.CLAUDE_3_5_SONNET) enable_langfuse: bool = False @field_validator("api_key") @@ -149,7 +143,7 @@ def chat_completions( ): """Get completions for chat. - docs: https://docs.anthropic.com/en/docs/build-with-claude/tool-use + tools docs: https://docs.anthropic.com/en/docs/build-with-claude/tool-use """ system, messages = self._format_messages(messages) params = { @@ -190,6 +184,3 @@ def chat_completions( total_tokens=(response.usage.input_tokens + response.usage.output_tokens), status=LLMResponseStatus.SUCCESS, ) - - def text_completions(self): - raise NotImplementedError("Not implemented yet") diff --git a/backend/spielberg/llm/base.py b/backend/spielberg/llm/base.py index c1f458d..193b939 100644 --- a/backend/spielberg/llm/base.py +++ b/backend/spielberg/llm/base.py @@ -29,7 +29,6 @@ class BaseLLMConfig(BaseSettings): :param str api_key: API key for the LLM. :param str api_base: Base URL for the LLM API. :param str chat_model: Model name for chat completions. - :param str text_model: Model name for text completions. :param str temperature: Sampling temperature for completions. :param float top_p: Top p sampling for completions. :param int max_tokens: Maximum tokens to generate. @@ -40,7 +39,6 @@ class BaseLLMConfig(BaseSettings): api_key: str = "" api_base: str = "" chat_model: str = "" - text_model: str = "" temperature: float = 0.9 top_p: float = 1 max_tokens: int = 4096 @@ -60,7 +58,6 @@ def __init__(self, config: BaseLLMConfig): self.api_key = config.api_key self.api_base = config.api_base self.chat_model = config.chat_model - self.text_model = config.text_model self.temperature = config.temperature self.top_p = config.top_p self.max_tokens = config.max_tokens @@ -71,8 +68,3 @@ def __init__(self, config: BaseLLMConfig): def chat_completions(self, messages: List[Dict], tools: List[Dict]) -> LLMResponse: """Abstract method for chat completions""" pass - - @abstractmethod - def text_completions(self, prompt: str) -> LLMResponse: - """Abstract method for text completions""" - pass diff --git a/backend/spielberg/llm/openai.py b/backend/spielberg/llm/openai.py index 597b463..b0d7205 100644 --- a/backend/spielberg/llm/openai.py +++ b/backend/spielberg/llm/openai.py @@ -22,16 +22,6 @@ class OpenAIChatModel(str, Enum): GPT4o_MINI = "gpt-4o-mini" -class OpenAITextModel(str, Enum): - """Enum for OpenAI Text models""" - - GPT4 = "gpt-4" - GPT4_32K = "gpt-4-32k" - GPT4_TURBO = "gpt-4-turbo" - GPT4o = "gpt-4o-2024-08-06" - GPT4o_MINI = "gpt-4o-mini" - - class OpenaiConfig(BaseLLMConfig): """OpenAI Config""" @@ -44,7 +34,6 @@ class OpenaiConfig(BaseLLMConfig): api_key: str = "" api_base: str = "https://api.openai.com/v1" chat_model: str = Field(default=OpenAIChatModel.GPT4o) - text_model: str = Field(default=OpenAITextModel.GPT4_TURBO) max_tokens: int = 4096 @field_validator("api_key") @@ -197,7 +186,3 @@ def chat_completions( total_tokens=response.usage.total_tokens, status=LLMResponseStatus.SUCCESS, ) - - def text_completions(self): - """Get completions for text.""" - raise NotImplementedError("Not implemented yet") From 717dd280ffadf2183898bce86434cbeedfe2f873 Mon Sep 17 00:00:00 2001 From: Ankit raj <113342181+ankit-v2-3@users.noreply.github.com> Date: Mon, 28 Oct 2024 19:33:13 +0530 Subject: [PATCH 4/5] docs: add anthropic --- docs/llm/anthropic.md | 15 +++++++++++++++ mkdocs.yml | 1 + 2 files changed, 16 insertions(+) create mode 100644 docs/llm/anthropic.md diff --git a/docs/llm/anthropic.md b/docs/llm/anthropic.md new file mode 100644 index 0000000..1e24036 --- /dev/null +++ b/docs/llm/anthropic.md @@ -0,0 +1,15 @@ +## AnthropicAI + +AnthropicAI extends the base LLM and implements the Anthropic API. + +### AnthropicAI Config + +AnthropicAI Config is the configuration object for AnthropicAI. It is used to configure AnthropicAI and is passed to AnthropicAI when it is created. + +::: spielberg.llm.anthropic.AnthropicAIConfig + +### AnthropicAI Interface + +AnthropicAI is the LLM used by the agents and tools. It is used to generate responses to messages. + +::: spielberg.llm.anthropic.AnthropicAI diff --git a/mkdocs.yml b/mkdocs.yml index 198a55e..9c3e2eb 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -73,6 +73,7 @@ nav: - 'Interface': 'llm/interface.md' - Integrations: - 'OpenAI': 'llm/openai.md' + - 'AnthropicAI': 'llm/anthropic.md' - 'Database': - 'Interface': 'database/interface.md' - Integrations: From 0ac8c31ea075430e0789db4f338f6cc8607736c6 Mon Sep 17 00:00:00 2001 From: Ankit raj <113342181+ankit-v2-3@users.noreply.github.com> Date: Mon, 28 Oct 2024 19:34:56 +0530 Subject: [PATCH 5/5] fix: remove langfuse --- backend/spielberg/llm/anthropic.py | 1 - 1 file changed, 1 deletion(-) diff --git a/backend/spielberg/llm/anthropic.py b/backend/spielberg/llm/anthropic.py index 830f8b1..3428953 100644 --- a/backend/spielberg/llm/anthropic.py +++ b/backend/spielberg/llm/anthropic.py @@ -32,7 +32,6 @@ class AnthropicAIConfig(BaseLLMConfig): api_key: str = "" api_base: str = "" chat_model: str = Field(default=AnthropicChatModel.CLAUDE_3_5_SONNET) - enable_langfuse: bool = False @field_validator("api_key") @classmethod