Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: patch pydantic args and identities #2457

Merged
merged 8 commits into from
Feb 26, 2025
15 changes: 10 additions & 5 deletions letta/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,10 +245,13 @@ def execute_tool_and_persist_state(
action_name=action_name, args=function_args, api_key=composio_api_key, entity_id=entity_id
)
else:
# Parse the source code to extract function annotations
annotations = get_function_annotations_from_source(target_letta_tool.source_code, function_name)
# Coerce the function arguments to the correct types based on the annotations
function_args = coerce_dict_args_by_annotations(function_args, annotations)
try:
# Parse the source code to extract function annotations
annotations = get_function_annotations_from_source(target_letta_tool.source_code, function_name)
# Coerce the function arguments to the correct types based on the annotations
function_args = coerce_dict_args_by_annotations(function_args, annotations)
except ValueError as e:
self.logger.debug(f"Error coercing function arguments: {e}")

# execute tool in a sandbox
# TODO: allow agent_state to specify which sandbox to execute tools in
Expand All @@ -257,7 +260,9 @@ def execute_tool_and_persist_state(
agent_state_copy.tools = []
agent_state_copy.tool_rules = []

sandbox_run_result = ToolExecutionSandbox(function_name, function_args, self.user).run(agent_state=agent_state_copy)
sandbox_run_result = ToolExecutionSandbox(function_name, function_args, self.user, tool_object=target_letta_tool).run(
agent_state=agent_state_copy
)
function_response, updated_agent_state = sandbox_run_result.func_return, sandbox_run_result.agent_state
assert orig_memory_str == self.agent_state.memory.compile(), "Memory should not be modified in a sandbox tool"
if updated_agent_state is not None:
Expand Down
7 changes: 7 additions & 0 deletions letta/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,13 +52,20 @@
BASE_MEMORY_TOOLS = ["core_memory_append", "core_memory_replace"]
# Multi agent tools
MULTI_AGENT_TOOLS = ["send_message_to_agent_and_wait_for_reply", "send_message_to_agents_matching_all_tags", "send_message_to_agent_async"]
# Set of all built-in Letta tools
LETTA_TOOL_SET = set(BASE_TOOLS + BASE_MEMORY_TOOLS + MULTI_AGENT_TOOLS)

# The name of the tool used to send message to the user
# May not be relevant in cases where the agent has multiple ways to message to user (send_imessage, send_discord_mesasge, ...)
# or in cases where the agent has no concept of messaging a user (e.g. a workflow agent)
DEFAULT_MESSAGE_TOOL = "send_message"
DEFAULT_MESSAGE_TOOL_KWARG = "message"

PRE_EXECUTION_MESSAGE_ARG = "pre_exec_msg"

REQUEST_HEARTBEAT_PARAM = "request_heartbeat"


# Structured output models
STRUCTURED_OUTPUT_MODELS = {"gpt-4o", "gpt-4o-mini"}

Expand Down
5 changes: 3 additions & 2 deletions letta/helpers/composio_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,11 @@
from letta.settings import tool_settings


def get_composio_api_key(actor: User, logger: Logger) -> Optional[str]:
def get_composio_api_key(actor: User, logger: Optional[Logger] = None) -> Optional[str]:
api_keys = SandboxConfigManager().list_sandbox_env_vars_by_key(key="COMPOSIO_API_KEY", actor=actor)
if not api_keys:
logger.warning(f"No API keys found for Composio. Defaulting to the environment variable...")
if logger:
logger.warning(f"No API keys found for Composio. Defaulting to the environment variable...")
if tool_settings.composio_api_key:
return tool_settings.composio_api_key
else:
Expand Down
171 changes: 171 additions & 0 deletions letta/helpers/tool_execution_helper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
from collections import OrderedDict
from typing import Any, Dict, Optional

from letta.constants import COMPOSIO_ENTITY_ENV_VAR_KEY, PRE_EXECUTION_MESSAGE_ARG
from letta.functions.ast_parsers import coerce_dict_args_by_annotations, get_function_annotations_from_source
from letta.functions.helpers import execute_composio_action, generate_composio_action_from_func_name
from letta.helpers.composio_helpers import get_composio_api_key
from letta.orm.enums import ToolType
from letta.schemas.agent import AgentState
from letta.schemas.sandbox_config import SandboxRunResult
from letta.schemas.tool import Tool
from letta.schemas.user import User
from letta.services.tool_execution_sandbox import ToolExecutionSandbox
from letta.utils import get_friendly_error_msg


def enable_strict_mode(tool_schema: Dict[str, Any]) -> Dict[str, Any]:
"""Enables strict mode for a tool schema by setting 'strict' to True and
disallowing additional properties in the parameters.

Args:
tool_schema (Dict[str, Any]): The original tool schema.

Returns:
Dict[str, Any]: A new tool schema with strict mode enabled.
"""
schema = tool_schema.copy()

# Enable strict mode
schema["strict"] = True

# Ensure parameters is a valid dictionary
parameters = schema.get("parameters", {})

if isinstance(parameters, dict) and parameters.get("type") == "object":
# Set additionalProperties to False
parameters["additionalProperties"] = False
schema["parameters"] = parameters

return schema


def add_pre_execution_message(tool_schema: Dict[str, Any]) -> Dict[str, Any]:
"""Adds a `pre_execution_message` parameter to a tool schema to prompt a natural, human-like message before executing the tool.

Args:
tool_schema (Dict[str, Any]): The original tool schema.

Returns:
Dict[str, Any]: A new tool schema with the `pre_execution_message` field added at the beginning.
"""
schema = tool_schema.copy()
parameters = schema.get("parameters", {})

if not isinstance(parameters, dict) or parameters.get("type") != "object":
return schema # Do not modify if schema is not valid

properties = parameters.get("properties", {})
required = parameters.get("required", [])

# Define the new `pre_execution_message` field with a refined description
pre_execution_message_field = {
"type": "string",
"description": (
"A concise message to be uttered before executing this tool. "
"This should sound natural, as if a person is casually announcing their next action."
"You MUST also include punctuation at the end of this message."
),
}

# Ensure the pre-execution message is the first field in properties
updated_properties = OrderedDict()
updated_properties[PRE_EXECUTION_MESSAGE_ARG] = pre_execution_message_field
updated_properties.update(properties) # Retain all existing properties

# Ensure pre-execution message is the first required field
if PRE_EXECUTION_MESSAGE_ARG not in required:
required = [PRE_EXECUTION_MESSAGE_ARG] + required

# Update the schema with ordered properties and required list
schema["parameters"] = {
**parameters,
"properties": dict(updated_properties), # Convert OrderedDict back to dict
"required": required,
}

return schema


def remove_request_heartbeat(tool_schema: Dict[str, Any]) -> Dict[str, Any]:
"""Removes the `request_heartbeat` parameter from a tool schema if it exists.

Args:
tool_schema (Dict[str, Any]): The original tool schema.

Returns:
Dict[str, Any]: A new tool schema without `request_heartbeat`.
"""
schema = tool_schema.copy()
parameters = schema.get("parameters", {})

if isinstance(parameters, dict):
properties = parameters.get("properties", {})
required = parameters.get("required", [])

# Remove the `request_heartbeat` property if it exists
if "request_heartbeat" in properties:
properties.pop("request_heartbeat")

# Remove `request_heartbeat` from required fields if present
if "request_heartbeat" in required:
required = [r for r in required if r != "request_heartbeat"]

# Update parameters with modified properties and required list
schema["parameters"] = {**parameters, "properties": properties, "required": required}

return schema


# TODO: Deprecate the `execute_external_tool` function on the agent body
def execute_external_tool(
agent_state: AgentState,
function_name: str,
function_args: dict,
target_letta_tool: Tool,
actor: User,
allow_agent_state_modifications: bool = False,
) -> tuple[Any, Optional[SandboxRunResult]]:
# TODO: need to have an AgentState object that actually has full access to the block data
# this is because the sandbox tools need to be able to access block.value to edit this data
try:
if target_letta_tool.tool_type == ToolType.EXTERNAL_COMPOSIO:
action_name = generate_composio_action_from_func_name(target_letta_tool.name)
# Get entity ID from the agent_state
entity_id = None
for env_var in agent_state.tool_exec_environment_variables:
if env_var.key == COMPOSIO_ENTITY_ENV_VAR_KEY:
entity_id = env_var.value
# Get composio_api_key
composio_api_key = get_composio_api_key(actor=actor)
function_response = execute_composio_action(
action_name=action_name, args=function_args, api_key=composio_api_key, entity_id=entity_id
)
return function_response, None
elif target_letta_tool.tool_type == ToolType.CUSTOM:
# Parse the source code to extract function annotations
annotations = get_function_annotations_from_source(target_letta_tool.source_code, function_name)
# Coerce the function arguments to the correct types based on the annotations
function_args = coerce_dict_args_by_annotations(function_args, annotations)

# execute tool in a sandbox
# TODO: allow agent_state to specify which sandbox to execute tools in
# TODO: This is only temporary, can remove after we publish a pip package with this object
if allow_agent_state_modifications:
agent_state_copy = agent_state.__deepcopy__()
agent_state_copy.tools = []
agent_state_copy.tool_rules = []
else:
agent_state_copy = None

sandbox_run_result = ToolExecutionSandbox(function_name, function_args, actor).run(agent_state=agent_state_copy)
function_response, updated_agent_state = sandbox_run_result.func_return, sandbox_run_result.agent_state
# TODO: Bring this back
# if allow_agent_state_modifications and updated_agent_state is not None:
# self.update_memory_if_changed(updated_agent_state.memory)
return function_response, sandbox_run_result
except Exception as e:
# Need to catch error here, or else trunction wont happen
# TODO: modify to function execution error
function_response = get_friendly_error_msg(function_name=function_name, exception_name=type(e).__name__, exception_message=str(e))
return function_response, None
38 changes: 37 additions & 1 deletion letta/llm_api/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,14 +47,39 @@
# https://docs.anthropic.com/claude/docs/models-overview
# Sadly hardcoded
MODEL_LIST = [
## Opus
{
"name": "claude-3-opus-20240229",
"context_window": 200000,
},
## Sonnet
# 3.0
{
"name": "claude-3-sonnet-20240229",
"context_window": 200000,
},
# 3.5
{
"name": "claude-3-5-sonnet-20240620",
"context_window": 200000,
},
# 3.5 new
{
"name": "claude-3-5-sonnet-20241022",
"context_window": 200000,
},
# 3.7
{
"name": "claude-3-7-sonnet-20250219",
"context_window": 200000,
},
## Haiku
# 3.0
{
"name": "claude-3-haiku-20240307",
"context_window": 200000,
},
# 3.5
{
"name": "claude-3-5-haiku-20241022",
"context_window": 200000,
Expand All @@ -75,7 +100,18 @@ def anthropic_get_model_list(url: str, api_key: Union[str, None]) -> dict:
"""https://docs.anthropic.com/claude/docs/models-overview"""

# NOTE: currently there is no GET /models, so we need to hardcode
return MODEL_LIST
# return MODEL_LIST

anthropic_override_key = ProviderManager().get_anthropic_override_key()
if anthropic_override_key:
anthropic_client = anthropic.Anthropic(api_key=anthropic_override_key)
elif model_settings.anthropic_api_key:
anthropic_client = anthropic.Anthropic()

models = anthropic_client.models.list()
models_json = models.model_dump()
assert "data" in models_json, f"Anthropic model query response missing 'data' field: {models_json}"
return models_json["data"]


def convert_tools_to_anthropic_format(tools: List[Tool]) -> List[dict]:
Expand Down
4 changes: 2 additions & 2 deletions letta/orm/source.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,6 @@ class Source(SqlalchemyBase, OrganizationMixin):
secondary="sources_agents",
back_populates="sources",
lazy="selectin",
cascade="all, delete", # Ensures rows in sources_agents are deleted when the source is deleted
passive_deletes=True, # Allows the database to handle deletion of orphaned rows
cascade="save-update", # Only propagate save and update operations
passive_deletes=True, # Let the database handle deletions
)
2 changes: 1 addition & 1 deletion letta/schemas/openai/chat_completion_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ class ChatCompletionRequest(BaseModel):
"""https://platform.openai.com/docs/api-reference/chat/create"""

model: str
messages: List[ChatMessage]
messages: List[Union[ChatMessage, Dict]]
frequency_penalty: Optional[float] = 0
logit_bias: Optional[Dict[str, int]] = None
logprobs: Optional[bool] = False
Expand Down
47 changes: 43 additions & 4 deletions letta/schemas/providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,28 +410,67 @@ class AnthropicProvider(Provider):
base_url: str = "https://api.anthropic.com/v1"

def list_llm_models(self) -> List[LLMConfig]:
from letta.llm_api.anthropic import anthropic_get_model_list
from letta.llm_api.anthropic import MODEL_LIST, anthropic_get_model_list

models = anthropic_get_model_list(self.base_url, api_key=self.api_key)

"""
Example response:
{
"data": [
{
"type": "model",
"id": "claude-3-5-sonnet-20241022",
"display_name": "Claude 3.5 Sonnet (New)",
"created_at": "2024-10-22T00:00:00Z"
}
],
"has_more": true,
"first_id": "<string>",
"last_id": "<string>"
}
"""

configs = []
for model in models:

if model["type"] != "model":
continue

if "id" not in model:
continue

# Don't support 2.0 and 2.1
if model["id"].startswith("claude-2"):
continue

# Anthropic doesn't return the context window in their API
if "context_window" not in model:
# Remap list to name: context_window
model_library = {m["name"]: m["context_window"] for m in MODEL_LIST}
# Attempt to look it up in a hardcoded list
if model["id"] in model_library:
model["context_window"] = model_library[model["id"]]
else:
# On fallback, we can set 200k (generally safe), but we should warn the user
warnings.warn(f"Couldn't find context window size for model {model['id']}, defaulting to 200,000")
model["context_window"] = 200000

# We set this to false by default, because Anthropic can
# natively support <thinking> tags inside of content fields
# However, putting COT inside of tool calls can make it more
# reliable for tool calling (no chance of a non-tool call step)
# Since tool_choice_type 'any' doesn't work with in-content COT
# NOTE For Haiku, it can be flaky if we don't enable this by default
inner_thoughts_in_kwargs = True if "haiku" in model["name"] else False
inner_thoughts_in_kwargs = True if "haiku" in model["id"] else False

configs.append(
LLMConfig(
model=model["name"],
model=model["id"],
model_endpoint_type="anthropic",
model_endpoint=self.base_url,
context_window=model["context_window"],
handle=self.get_handle(model["name"]),
handle=self.get_handle(model["id"]),
put_inner_thoughts_in_kwargs=inner_thoughts_in_kwargs,
)
)
Expand Down
Loading