Skip to content

Commit

Permalink
simplify the initiation of chat (#1131)
Browse files Browse the repository at this point in the history
* simplify the initiation of chat

* version update

* include openai

* completion
  • Loading branch information
sonichi authored Jul 18, 2023
1 parent 7665f73 commit 16f0fcd
Show file tree
Hide file tree
Showing 16 changed files with 146 additions and 121 deletions.
4 changes: 2 additions & 2 deletions flaml/autogen/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,13 +63,13 @@ def _append_oai_message(self, message: Union[Dict, str], role, conversation_id):
oai_message["role"] = "function" if message.get("role") == "function" else role
self._oai_conversations[conversation_id].append(oai_message)

def _send(self, message: Union[Dict, str], recipient):
def send(self, message: Union[Dict, str], recipient):
"""Send a message to another agent."""
# When the agent composes and sends the message, the role of the message is "assistant". (If 'role' exists and is 'function', it will remain unchanged.)
self._append_oai_message(message, "assistant", recipient.name)
recipient.receive(message, self)

def _receive(self, message: Union[Dict, str], sender):
def _receive(self, message: Union[Dict, str], sender: "Agent"):
"""Receive a message from another agent.
Args:
Expand Down
2 changes: 1 addition & 1 deletion flaml/autogen/agent/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def receive(self, message: Union[Dict, str], sender):

super().receive(message, sender)
responses = oai.ChatCompletion.create(messages=self._oai_conversations[sender.name], **self._config)
self._send(oai.ChatCompletion.extract_text_or_function_call(responses)[0], sender)
self.send(oai.ChatCompletion.extract_text_or_function_call(responses)[0], sender)

def reset(self):
self._sender_dict.clear()
Expand Down
12 changes: 6 additions & 6 deletions flaml/autogen/agent/math_user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def _reset(self):
self._previous_code = ""
self.last_reply = None

def _execute_one_python_code(self, pycode):
def execute_one_python_code(self, pycode):
"""Execute python code blocks.
Previous python code will be saved and executed together with the new code.
Expand Down Expand Up @@ -278,7 +278,7 @@ def _execute_one_python_code(self, pycode):
self._previous_code = tmp
return output, is_success

def _execute_one_wolfram_query(self, query: str):
def execute_one_wolfram_query(self, query: str):
"""
Run one wolfram query and return the output.
return:
Expand All @@ -302,7 +302,7 @@ def auto_reply(self, message, sender, default_reply=""):
# no code block is found, lang should be `UNKNOWN``
if default_reply == "":
default_reply = "Continue. Please keep solving the problem until you need to query. (If you get to the answer, put it in \\boxed{}.)"
self._send(default_reply, sender)
self.send(default_reply, sender)
else:
is_success, all_success = True, True
reply = ""
Expand All @@ -311,9 +311,9 @@ def auto_reply(self, message, sender, default_reply=""):
if not lang:
lang = infer_lang(code)
if lang == "python":
output, is_success = self._execute_one_python_code(code)
output, is_success = self.execute_one_python_code(code)
elif lang == "wolfram":
output, is_success = self._execute_one_wolfram_query(code)
output, is_success = self.execute_one_wolfram_query(code)
else:
output = "Error: Unknown language."
is_success = False
Expand All @@ -338,7 +338,7 @@ def auto_reply(self, message, sender, default_reply=""):
self._accum_invalid_q_per_step = 0
reply = "Please revisit the problem statement and your reasoning. If you think this step is correct, solve it yourself and continue the next step. Otherwise, correct this step."

self._send(reply, sender)
self.send(reply, sender)


# Imported from langchain. Langchain is licensed under MIT License:
Expand Down
41 changes: 34 additions & 7 deletions flaml/autogen/agent/user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def use_docker(self) -> Union[bool, str]:
or str value of the docker image name to use."""
return self._use_docker

def _execute_code(self, code_blocks):
def execute_code(self, code_blocks):
"""Execute the code and return the result."""
logs_all = ""
for code_block in code_blocks:
Expand Down Expand Up @@ -185,19 +185,19 @@ def _execute_function(self, func_call):
def auto_reply(self, message: dict, sender, default_reply=""):
"""Generate an auto reply."""
if "function_call" in message:
is_exec_success, func_return = self._execute_function(message["function_call"])
self._send(func_return, sender)
_, func_return = self._execute_function(message["function_call"])
self.send(func_return, sender)
return

code_blocks = extract_code(message["content"])
if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# no code block is found, lang should be `UNKNOWN`
self._send(default_reply, sender)
self.send(default_reply, sender)
else:
# try to execute the code
exitcode, logs = self._execute_code(code_blocks)
exitcode, logs = self.execute_code(code_blocks)
exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed"
self._send(f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs}", sender)
self.send(f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs}", sender)

def receive(self, message: Union[Dict, str], sender):
"""Receive a message from the sender agent.
Expand Down Expand Up @@ -230,9 +230,36 @@ def receive(self, message: Union[Dict, str], sender):
if reply:
# reset the consecutive_auto_reply_counter
self._consecutive_auto_reply_counter[sender.name] = 0
self._send(reply, sender)
self.send(reply, sender)
return

self._consecutive_auto_reply_counter[sender.name] += 1
print("\n>>>>>>>> NO HUMAN INPUT RECEIVED. USING AUTO REPLY FOR THE USER...", flush=True)
self.auto_reply(message, sender, default_reply=reply)

def generate_init_prompt(self, *args, **kwargs) -> Union[str, Dict]:
"""Generate the initial prompt for the agent.
Override this function to customize the initial prompt based on user's request.
"""
return args[0]

def initiate_chat(self, recipient, *args, **kwargs):
"""Initiate a chat with the receiver agent.
`generate_init_prompt` is called to generate the initial prompt for the agent.
Args:
receiver: the receiver agent.
*args: any additional arguments.
**kwargs: any additional keyword arguments.
"""
self.send(self.generate_init_prompt(*args, **kwargs), recipient)

def register_function(self, function_map: Dict[str, Callable]):
"""Register functions to the agent.
Args:
function_map: a dictionary mapping function names to functions.
"""
self._function_map.update(function_map)
43 changes: 15 additions & 28 deletions notebook/autogen_agent_MathChat.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
"metadata": {},
"outputs": [],
"source": [
"# %pip install flaml[mathchat]==2.0.0rc2"
"# %pip install flaml[mathchat]~=2.0.0rc4"
]
},
{
Expand Down Expand Up @@ -122,14 +122,16 @@
" system_message=\"You are a helpful assistant.\",\n",
" request_timeout=600, \n",
" seed=42, \n",
" config_list=config_list)\n",
" config_list=config_list,\n",
")\n",
"\n",
"# 2. create the MathUserProxyAgent instance named \"mathproxyagent\"\n",
"# By default, the human_input_mode is \"NEVER\", which means the agent will not ask for human input.\n",
"mathproxyagent = MathUserProxyAgent(\n",
" name=\"MathChatAgent\", \n",
" human_input_mode=\"NEVER\",\n",
" use_docker=False)"
" use_docker=False,\n",
")"
]
},
{
Expand Down Expand Up @@ -283,11 +285,8 @@
"# given a math problem, we use the mathproxyagent to generate a prompt to be sent to the assistant as the initial message.\n",
"# the assistant receives the message and generates a response. The response will be sent back to the mathproxyagent for processing.\n",
"# The conversation continues until the termination condition is met, in MathChat, the termination condition is the detect of \"\\boxed{}\" in the response.\n",
"math_problem = \"Find all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval notation.\"\n",
"assistant.receive(\n",
" message=mathproxyagent.generate_init_prompt(math_problem),\n",
" sender=mathproxyagent,\n",
")"
"math_problem = \"Find all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval notation.\"\n",
"mathproxyagent.initiate_chat(assistant, math_problem)"
]
},
{
Expand Down Expand Up @@ -429,11 +428,8 @@
"source": [
"assistant.reset()\n",
"\n",
"math_problem = \"For what negative value of $k$ is there exactly one solution to the system of equations \\\\begin{align*}\\ny &= 2x^2 + kx + 6 \\\\\\\\\\ny &= -x + 4?\\n\\\\end{align*}\"\n",
"assistant.receive(\n",
" mathproxyagent.generate_init_prompt(math_problem),\n",
" mathproxyagent,\n",
")"
"math_problem = \"For what negative value of $k$ is there exactly one solution to the system of equations \\\\begin{align*}\\ny &= 2x^2 + kx + 6 \\\\\\\\\\ny &= -x + 4?\\n\\\\end{align*}\"\n",
"mathproxyagent.initiate_chat(assistant, math_problem)"
]
},
{
Expand Down Expand Up @@ -561,11 +557,8 @@
"source": [
"assistant.reset()\n",
"\n",
"math_problem = \"Find all positive integer values of $c$ such that the equation $x^2-7x+c=0$ only has roots that are real and rational. Express them in decreasing order, separated by commas.\"\n",
"assistant.receive(\n",
" mathproxyagent.generate_init_prompt(math_problem),\n",
" mathproxyagent,\n",
")"
"math_problem = \"Find all positive integer values of $c$ such that the equation $x^2-7x+c=0$ only has roots that are real and rational. Express them in decreasing order, separated by commas.\"\n",
"mathproxyagent.initiate_chat(assistant, math_problem)"
]
},
{
Expand Down Expand Up @@ -760,11 +753,8 @@
"assistant.reset() # clear LLM assistant's message history\n",
"\n",
"# we set the prompt_type to \"python\", which is a simplied version of the default prompt.\n",
"math_problem = \"Problem: If $725x + 727y = 1500$ and $729x+ 731y = 1508$, what is the value of $x - y$ ?\"\n",
"assistant.receive(\n",
" mathproxyagent.generate_init_prompt(math_problem, prompt_type=\"python\"),\n",
" mathproxyagent,\n",
")"
"math_problem = \"Problem: If $725x + 727y = 1500$ and $729x+ 731y = 1508$, what is the value of $x - y$ ?\"\n",
"mathproxyagent.initiate_chat(assistant, math_problem, prompt_type=\"python\")"
]
},
{
Expand Down Expand Up @@ -904,11 +894,8 @@
" os.environ[\"WOLFRAM_ALPHA_APPID\"] = open(\"wolfram.txt\").read().strip()\n",
"\n",
"# we set the prompt_type to \"two_tools\", which allows the assistant to select wolfram alpha when necessary.\n",
"math_problem = \"Find all numbers $a$ for which the graph of $y=x^2+a$ and the graph of $y=ax$ intersect. Express your answer in interval notation.\"\n",
"assistant.receive(\n",
" mathproxyagent.generate_init_prompt(math_problem, prompt_type=\"two_tools\"),\n",
" mathproxyagent,\n",
")"
"math_problem = \"Find all numbers $a$ for which the graph of $y=x^2+a$ and the graph of $y=ax$ intersect. Express your answer in interval notation.\"\n",
"mathproxyagent.initiate_chat(assistant, math_problem, prompt_type=\"two_tools\")"
]
}
],
Expand Down
12 changes: 6 additions & 6 deletions notebook/autogen_agent_auto_feedback_from_code_execution.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
},
"outputs": [],
"source": [
"# %pip install flaml[autogen]==2.0.0rc3"
"# %pip install flaml[autogen]~=2.0.0rc4"
]
},
{
Expand Down Expand Up @@ -209,9 +209,9 @@
" use_docker=False, # set to True if you are using docker\n",
")\n",
"# the assistant receives a message from the user, which contains the task description\n",
"assistant.receive(\n",
"user.initiate_chat(\n",
" assistant,\n",
" \"\"\"What date is today? Compare the year-to-date gain for META and TESLA.\"\"\",\n",
" user,\n",
")"
]
},
Expand Down Expand Up @@ -314,9 +314,9 @@
],
"source": [
"# followup of the previous question\n",
"assistant.receive(\n",
" \"\"\"Plot a chart of their stock price change YTD and save to stock_price_ytd.png.\"\"\",\n",
" user\n",
"user.send(\n",
" recipient=assistant,\n",
" message=\"\"\"Plot a chart of their stock price change YTD and save to stock_price_ytd.png.\"\"\",\n",
")"
]
},
Expand Down
45 changes: 23 additions & 22 deletions notebook/autogen_agent_function_call.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
"metadata": {},
"outputs": [],
"source": [
"# %pip install flaml[mathchat]==2.0.0rc3"
"# %pip install flaml[mathchat]~=2.0.0rc4"
]
},
{
Expand Down Expand Up @@ -79,11 +79,11 @@
"source": [
"## Making Function Calls\n",
"\n",
"In this example, we demonstrate function call execution with `AssistantAgent` and `UserProxyAgent`. With the default system prompt of `AssistantAgent`, we allow the LLM assistant to perform tasks with code, and the `UserProxyAgent` would extract code blocks from the LLM response and execute them. With the new \"function_call\" feature, we define a new function using the pre-defined `_execute_code` from `UserProxyAgent` and specify the description of the function in the OpenAI config. \n",
"In this example, we demonstrate function call execution with `AssistantAgent` and `UserProxyAgent`. With the default system prompt of `AssistantAgent`, we allow the LLM assistant to perform tasks with code, and the `UserProxyAgent` would extract code blocks from the LLM response and execute them. With the new \"function_call\" feature, we define a new function using the pre-defined `execute_code` from `UserProxyAgent` and specify the description of the function in the OpenAI config. \n",
"\n",
"Then, the model has two paths to execute code:\n",
"1. Put the code blocks in the response. `UserProxyAgent` will extract and execute the code through `_execute_code` method in the class.\n",
"2. As we put a function description to OpenAI config and passed a function `execute_code_function` to `UserProxyAgent`, the model can also make function calls (will be put in `function_call` field of the API reply). `UserProxyAgent` will execute the function call through a `_execute_function` method."
"1. Put the code blocks in the response. `UserProxyAgent` will extract and execute the code through `execute_code` method in the class.\n",
"2. As we put a function description to OpenAI config and register a function `exec_code` in `UserProxyAgent`, the model can also make function calls (will be put in `function_call` field of the API reply). `UserProxyAgent` will execute the function call through the registered `exec_code` method."
]
},
{
Expand Down Expand Up @@ -234,24 +234,26 @@
"}\n",
"chatbot = AssistantAgent(\"chatbot\", config_list=config_list, **oai_config)\n",
"\n",
"# use pre-defined execute_code function from a UserProxyAgent instance\n",
"# for simplicity, we don't pass in `exec_func` directly to UserProxyAgent because it requires a list of tuple as parameter\n",
"# instead, we define a wrapper function to call `exec_func`\n",
"exec_func = UserProxyAgent(name=\"execute_code\", work_dir=\"coding\", use_docker=False)._execute_code\n",
"\n",
"def execute_code(code_type, code):\n",
" return exec_func([(code_type, code)])\n",
"\n",
"# create a UserProxyAgent instance named \"user\"\n",
"user = UserProxyAgent(\n",
" \"user\",\n",
" human_input_mode=\"NEVER\",\n",
" function_map={\"execute_code\": execute_code},\n",
" work_dir=\"coding\",\n",
")\n",
"\n",
"# define an `execute_code` function according to the function desription\n",
"def exec_code(code_type, code):\n",
" # here we reuse the method in the user proxy agent\n",
" # in general, this is not necessary\n",
" return user.execute_code([(code_type, code)])\n",
"\n",
"# register the `execute_code` function\n",
"user.register_function(function_map={\"execute_code\": exec_code})\n",
"\n",
"# start the conversation\n",
"chatbot.receive(\n",
"user.initiate_chat(\n",
" chatbot,\n",
" \"Draw a rocket and save to a file named 'rocket.svg'\",\n",
" user,\n",
")\n"
]
},
Expand Down Expand Up @@ -289,7 +291,7 @@
"source": [
"## Another example with Wolfram Alpha API\n",
"\n",
"We give another example of query Wolfram Alpha API to solve math problem. We use the predefined function from `MathUserProxyAgent()`, we directly pass the class method, `MathUserProxyAgent()._execute_one_wolfram_query`, as the function to be called."
"We give another example of query Wolfram Alpha API to solve math problem. We use the predefined function `MathUserProxyAgent().execute_one_wolfram_query` as the function to be called."
]
},
{
Expand Down Expand Up @@ -389,19 +391,18 @@
"}\n",
"chatbot = AssistantAgent(\"chatbot\", sys_prompt, config_list=config_list, **oai_config)\n",
"\n",
"\n",
"# the key in `function_map` should match the function name passed to OpenAI\n",
"# we pass a class instance directly\n",
"# the key in `function_map` should match the function name in \"functions\" above\n",
"# we register a class instance method directly\n",
"user = UserProxyAgent(\n",
" \"user\",\n",
" human_input_mode=\"NEVER\",\n",
" function_map={\"query_wolfram\": MathUserProxyAgent()._execute_one_wolfram_query},\n",
" function_map={\"query_wolfram\": MathUserProxyAgent().execute_one_wolfram_query},\n",
")\n",
"\n",
"# start the conversation\n",
"chatbot.receive(\n",
"user.initiate_chat(\n",
" chatbot,\n",
" \"Problem: Find all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval notation.\",\n",
" user,\n",
")\n"
]
}
Expand Down
2 changes: 1 addition & 1 deletion notebook/autogen_agent_human_feedback.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
},
"outputs": [],
"source": [
"# %pip install flaml[autogen]==2.0.0rc3"
"# %pip install flaml[autogen]~=2.0.0rc4"
]
},
{
Expand Down
Loading

0 comments on commit 16f0fcd

Please sign in to comment.