diff --git a/letta/agent.py b/letta/agent.py index 7280b316db..ebd73aa9ea 100644 --- a/letta/agent.py +++ b/letta/agent.py @@ -305,8 +305,6 @@ def __init__( printd(f"Agent.__init__ :: creating, state={agent_state.message_ids}") assert self.agent_state.id is not None and self.agent_state.user_id is not None - print("INITIALIZE MESSAGE SEQUENCE", self.agent_state.memory.get_blocks()) - # Generate a sequence of initial messages to put in the buffer init_messages = initialize_message_sequence( model=self.model, diff --git a/letta/client/client.py b/letta/client/client.py index 2cd30d4e6d..0bf773c04a 100644 --- a/letta/client/client.py +++ b/letta/client/client.py @@ -2125,7 +2125,6 @@ def create_agent( for block in memory.get_blocks(): self.server.block_manager.create_or_update_block(block, actor=user) self.server.link_block_to_agent_memory(user_id=self.user_id, agent_id=agent_state.id, block_id=block.id) - print("LINKING BLOCK", block.label, block.value) # TODO: get full agent state return self.server.get_agent(agent_state.id) diff --git a/letta/llm_api/llm_api_tools.py b/letta/llm_api/llm_api_tools.py index d776cd4864..9a6374b511 100644 --- a/letta/llm_api/llm_api_tools.py +++ b/letta/llm_api/llm_api_tools.py @@ -124,12 +124,6 @@ def create( """Return response to chat completion with backoff""" from letta.utils import printd - print("SENDING MESSAGE") - for message in messages: - from pprint import pprint - - pprint(message.text) - # Count the tokens first, if there's an overflow exit early by throwing an error up the stack # NOTE: we want to include a specific substring in the error message to trigger summarization messages_oai_format = [m.to_openai_dict() for m in messages]