Skip to content

Commit

Permalink
AutoGen misc fixes (#603)
Browse files Browse the repository at this point in the history
* don't add anything except for assistant messages to the global autogen message historoy

* properly format autogen messages when using local llms (allow naming to get passed through to the prompt formatter)

* add extra handling of autogen's name field in step()

* comments
  • Loading branch information
cpacker authored and sarahwooders committed Dec 26, 2023
1 parent 0196c77 commit 83d59cf
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 10 deletions.
11 changes: 11 additions & 0 deletions memgpt/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -512,6 +512,17 @@ def step(self, user_message, first_message=False, first_message_retry_limit=FIRS
if user_message is not None:
self.interface.user_message(user_message)
packed_user_message = {"role": "user", "content": user_message}
# Special handling for AutoGen messages with 'name' field
try:
user_message_json = json.loads(user_message)
# Treat 'name' as a special field
# If it exists in the input message, elevate it to the 'message' level
if "name" in user_message_json:
packed_user_message["name"] = user_message_json["name"]
user_message_json.pop("name", None)
packed_user_message["content"] = json.dumps(user_message_json)
except Exception as e:
print(f"{CLI_WARNING_PREFIX}handling of 'name' field failed with: {e}")
input_message_sequence = self.messages + [packed_user_message]
else:
input_message_sequence = self.messages
Expand Down
10 changes: 7 additions & 3 deletions memgpt/autogen/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@ def internal_monologue(self, msg):
if not self.show_inner_thoughts:
return
message = f"\x1B[3m{Fore.LIGHTBLACK_EX}💭 {msg}{Style.RESET_ALL}" if self.fancy else f"[inner thoughts] {msg}"
self.message_list.append(message)
print(message)
# self.message_list.append(message)

def assistant_message(self, msg):
if self.debug:
Expand All @@ -83,13 +84,15 @@ def memory_message(self, msg):
if self.debug:
print(f"memory :: {msg}")
message = f"{Fore.LIGHTMAGENTA_EX}{Style.BRIGHT}🧠 {Fore.LIGHTMAGENTA_EX}{msg}{Style.RESET_ALL}" if self.fancy else f"[memory] {msg}"
self.message_list.append(message)
# self.message_list.append(message)
print(message)

def system_message(self, msg):
if self.debug:
print(f"system :: {msg}")
message = f"{Fore.MAGENTA}{Style.BRIGHT}🖥️ [system] {Fore.MAGENTA}{msg}{Style.RESET_ALL}" if self.fancy else f"[system] {msg}"
self.message_list.append(message)
print(message)

def user_message(self, msg, raw=False):
if self.debug:
Expand Down Expand Up @@ -200,4 +203,5 @@ def function_message(self, msg):
message = f"{Fore.RED}{Style.BRIGHT}⚡ [function] {Fore.RED}{msg}{Style.RESET_ALL}" if self.fancy else f"[function] {msg}"

if message:
self.message_list.append(message)
# self.message_list.append(message)
print(message)
30 changes: 27 additions & 3 deletions memgpt/autogen/memgpt_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,12 @@ def create_memgpt_autogen_agent_from_config(
user_desc = "Work by yourself, the user won't reply. Elaborate as much as possible."

# If using azure or openai, save the credentials to the config
if llm_config["model_endpoint_type"] in ["azure", "openai"]:
config = MemGPTConfig.load()
# input(f"llm_config! {llm_config}")
# input(f"config! {config}")
if llm_config["model_endpoint_type"] in ["azure", "openai"] or llm_config["model_endpoint_type"] != config.model_endpoint_type:
# we load here to make sure we don't override existing values
# all we want to do is add extra credentials
config = MemGPTConfig.load()

if llm_config["model_endpoint_type"] == "azure":
config.azure_key = llm_config["azure_key"]
Expand All @@ -67,8 +69,15 @@ def create_memgpt_autogen_agent_from_config(
config.openai_key = llm_config["openai_key"]
llm_config.pop("openai_key")

# else:
# config.model_endpoint_type = llm_config["model_endpoint_type"]

config.save()

# if llm_config["model_endpoint"] != config.model_endpoint:
# config.model_endpoint = llm_config["model_endpoint"]
# config.save()

# Create an AgentConfig option from the inputs
llm_config.pop("name", None)
llm_config.pop("persona", None)
Expand Down Expand Up @@ -257,6 +266,20 @@ def find_new_messages(self, entire_message_list):
"""Extract the subset of messages that's actually new"""
return entire_message_list[self.messages_processed_up_to_idx :]

@staticmethod
def _format_autogen_message(autogen_message):
# {'content': "...", 'name': '...', 'role': 'user'}
if not isinstance(autogen_message, dict) or ():
print(f"Warning: AutoGen message was not a dict -- {autogen_message}")
user_message = system.package_user_message(autogen_message)
elif "content" not in autogen_message or "name" not in autogen_message or "name" not in autogen_message:
print(f"Warning: AutoGen message was missing fields -- {autogen_message}")
user_message = system.package_user_message(autogen_message)
else:
user_message = system.package_user_message(user_message=autogen_message["content"], name=autogen_message["name"])

return user_message

def _generate_reply_for_user_message(
self,
messages: Optional[List[Dict]] = None,
Expand All @@ -280,7 +303,8 @@ def _generate_reply_for_user_message(
return True, self._default_auto_reply

# Package the user message
user_message = system.package_user_message(user_message)
# user_message = system.package_user_message(user_message)
user_message = self._format_autogen_message(user_message)

# Send a single message into MemGPT
while True:
Expand Down
18 changes: 15 additions & 3 deletions memgpt/local_llm/llm_chat_completion_wrappers/airoboros.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,15 +332,27 @@ def create_function_call(function_call, inner_thoughts=None):
assert message["role"] in ["user", "assistant", "function"], message

if message["role"] == "user":
# Support for AutoGen naming of agents
if "name" in message:
user_prefix = message["name"].strip()
user_prefix = f"USER ({user_prefix})"
else:
user_prefix = "USER"
if self.simplify_json_content:
try:
content_json = json.loads(message["content"])
content_simple = content_json["message"]
prompt += f"\nUSER: {content_simple}"
prompt += f"\n{user_prefix}: {content_simple}"
except:
prompt += f"\nUSER: {message['content']}"
prompt += f"\n{user_prefix}: {message['content']}"
elif message["role"] == "assistant":
prompt += f"\nASSISTANT:"
# Support for AutoGen naming of agents
if "name" in message:
assistant_prefix = message["name"].strip()
assistant_prefix = f"ASSISTANT ({assistant_prefix})"
else:
assistant_prefix = "ASSISTANT"
prompt += f"\n{assistant_prefix}:"
# need to add the function call if there was one
inner_thoughts = message["content"]
if "function_call" in message and message["function_call"]:
Expand Down
5 changes: 4 additions & 1 deletion memgpt/system.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def get_login_event(last_login="Never (first login)", include_location=False, lo
return json.dumps(packaged_message)


def package_user_message(user_message, time=None, include_location=False, location_name="San Francisco, CA, USA"):
def package_user_message(user_message, time=None, include_location=False, location_name="San Francisco, CA, USA", name=None):
# Package the message with time and location
formatted_time = time if time else get_local_time()
packaged_message = {
Expand All @@ -91,6 +91,9 @@ def package_user_message(user_message, time=None, include_location=False, locati
if include_location:
packaged_message["location"] = location_name

if name:
packaged_message["name"] = name

return json.dumps(packaged_message)


Expand Down

0 comments on commit 83d59cf

Please sign in to comment.