diff --git a/flaml/autogen/agent/chat_agent.py b/flaml/autogen/agent/chat_agent.py deleted file mode 100644 index ec386f4c0c..0000000000 --- a/flaml/autogen/agent/chat_agent.py +++ /dev/null @@ -1,25 +0,0 @@ -from .agent import Agent -from flaml.autogen.code_utils import DEFAULT_MODEL -from flaml import oai - - -class ChatAgent(Agent): - DEFAULT_SYSTEM_MESSAGE = """You are a chat agent. - """ - - DEFAULT_CONFIG = { - "model": DEFAULT_MODEL, - } - - def __init__(self, name, system_message=DEFAULT_SYSTEM_MESSAGE, **config): - super().__init__(name, system_message) - self._config = self.DEFAULT_CONFIG.copy() - self._config.update(config) - self._sender_dict = {} - - def receive(self, message, sender): - super().receive(message, sender) - responses = oai.ChatCompletion.create(messages=self._conversations[sender.name], **self._config) - # cost = oai.ChatCompletion.cost(responses) - response = oai.ChatCompletion.extract_text(responses)[0] - self._send(response, sender) diff --git a/test/autogen/test_user_proxy_agent.py b/test/autogen/test_user_proxy_agent.py deleted file mode 100644 index 6a4c78a9c1..0000000000 --- a/test/autogen/test_user_proxy_agent.py +++ /dev/null @@ -1,32 +0,0 @@ -from flaml import oai - -KEY_LOC = "test/autogen" - - -def test_user_proxy_agent(): - try: - import openai - except ImportError: - return - from flaml.autogen.agent.chat_agent import ChatAgent - from flaml.autogen.agent.user_proxy_agent import UserProxyAgent - - conversations = {} - oai.ChatCompletion.start_logging(conversations) - agent = ChatAgent("chat_agent", config_list=oai.config_list_gpt4_gpt35(key_file_path=KEY_LOC)) - user = UserProxyAgent("human_user", human_input_mode="NEVER", max_consecutive_auto_reply=2) - agent.receive( - """Write python code to solve the equation x^3=125. You must write code in the following format. You must always print the result. - Wait for me to return the result. - ```python - # your code - print(your_result) - ``` - """, - user, - ) - print(conversations) - - -if __name__ == "__main__": - test_user_proxy_agent() diff --git a/website/docs/Use-Cases/Auto-Generation.md b/website/docs/Use-Cases/Auto-Generation.md index 3aad9242e9..0ce8023eb7 100644 --- a/website/docs/Use-Cases/Auto-Generation.md +++ b/website/docs/Use-Cases/Auto-Generation.md @@ -382,8 +382,59 @@ The compact history is more efficient and the individual API call history contai [`flaml.autogen.agents`](../reference/autogen/agent/agent) contains an experimental implementation of interactive agents which can adapt to human or simulated feedback. This subpackage is under active development. -*Interested in trying it yourself? Please check the following notebook example:* -* [Use agents in FLAML to perform tasks with code](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agent_auto_feedback_from_code_execution.ipynb) +We have designed different classes of Agents that are capable of communicating with each other through the exchange of messages to collaboratively finish a task. An agent can communicate with other agents and perform actions. Different agents can differ in what actions they perform in the `receive` method. + +### `AssistantAgent` + +`AssistantAgent` is an Agent class designed to act as an assistant by responding to user requests. It could write Python code (in a Python coding block) for a user to execute when a message (typically a description of a task that needs to be solved) is received. Under the hood, the Python code is written by LLM (e.g., GPT-4). + +### `UserProxyAgent` +`UserProxyAgent` is an Agent class that serves as a proxy for the human user. Upon receiving a message, the UserProxyAgent will either solicit the human user's input or prepare an automatically generated reply. The chosen action depends on the settings of the `human_input_mode` and `max_consecutive_auto_reply` when the `UserProxyAgent` instance is constructed, and whether a human user input is available. + +Currently, the automatically generated reply is crafted based on automatic code execution. The `UserProxyAgent` triggers code execution automatically when it detects an executable code block in the received message and no human user input is provided. We plan to add more capabilities in `UserProxyAgent` beyond code execution. One can also easily extend it by overriding the `auto_reply` function of the `UserProxyAgent` to add or modify responses to the `AssistantAgent`'s specific type of message. For example, one can easily extend it to execute function calls to external API, which is especially useful with the newly added [function calling capability of OpenAI's Chat Completions API](https://openai.com/blog/function-calling-and-other-api-updates?ref=upstract.com). This auto-reply capability allows for more autonomous user-agent communication while retaining the possibility of human intervention. + +Example usage of the agents to solve a task with code: +```python +from flaml.autogen.agent import AssistantAgent, UserProxyAgent + +# create an AssistantAgent instance named "assistant" +assistant = AssistantAgent(name="assistant") + +# create a UserProxyAgent instance named "user_proxy" +user_proxy = UserProxyAgent( + name="user_proxy", + human_input_mode="NEVER", # in this mode, the agent will never solicit human input but always auto reply + max_consecutive_auto_reply=10, # the maximum number of consecutive auto replies + is_termination_msg=lambda x: x.rstrip().endswith("TERMINATE") or x.rstrip().endswith('"TERMINATE".'), # the function to determine whether a message is a termination message + work_dir=".", +) + +# the assistant receives a message from the user, which contains the task description +assistant.receive( + """What date is today? Which big tech stock has the largest year-to-date gain this year? How much is the gain?""", + user_proxy, +) +``` +In the example above, we create an AssistantAgent named "assistant" to serve as the assistant and a UserProxyAgent named "user_proxy" to serve as a proxy for the human user. +1. The assistant receives a message from the user_proxy, which contains the task description. +2. The assistant then tries to write Python code to solve the task and sends the response to the user_proxy. +3. Once the user_proxy receives a response from the assistant, it tries to reply by either soliciting human input or preparing an automatically generated reply. In this specific example, since `human_input_mode` is set to `"NEVER"`, the user_proxy will not solicit human input but prepare an automatically generated reply (auto reply). More specifically, the user_proxy executes the code and uses the result as the auto-reply. +4. The assistant then generates a further response for the user_proxy. The user_proxy can then decide whether to terminate the conversation. If not, steps 3 and 4 are repeated. + +Please find a visual illustration of how UserProxyAgent and AssistantAgent collaboratively solve the above task below: +![Agent Example](images/agent_example.png) + +Notes: +- Under the mode `human_input_mode="NEVER"`, the multi-turn conversation between the assistant and the user_proxy stops when the number of auto-reply reaches the upper limit specified by `max_consecutive_auto_reply` or the received message is a termination message according to `is_termination_msg`. +- When `human_input_mode` is set to `"ALWAYS"`, the user proxy agent solicits human input every time a message is received; and the conversation stops when the human input is "exit", or when the received message is a termination message and no human input is provided. +- When `human_input_mode` is set to `"TERMINATE"`, the user proxy agent solicits human input only when a termination message is received or the number of auto reply reaches `max_consecutive_auto_reply`. + +*Interested in trying it yourself? Please check the following notebook examples:* +* [Interactive LLM Agent with Auto Feedback from Code Execution](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agent_auto_feedback_from_code_execution.ipynb) + +* [Interactive LLM Agent with Human Feedback](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agent_human_feedback.ipynb) + +* [Interactive LLM Agent Dealing with Web Info](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agent_web_info.ipynb) ## Utilities for Applications diff --git a/website/docs/Use-Cases/images/agent_example.png b/website/docs/Use-Cases/images/agent_example.png new file mode 100644 index 0000000000..5bce17cb26 Binary files /dev/null and b/website/docs/Use-Cases/images/agent_example.png differ