From 6e6b891a2f165e81ebf42a0c5a86573c471c9569 Mon Sep 17 00:00:00 2001 From: wangchongshi Date: Fri, 7 Jun 2024 16:21:59 +0800 Subject: [PATCH 01/12] feat: add reAct agent in agentUniverse. --- .../agent/default/react_agent/__init__.py | 7 +++ .../react_agent/default_cn_prompt.yaml | 35 +++++++++++++++ .../react_agent/default_en_prompt.yaml | 31 +++++++++++++ .../agent/default/react_agent/react_agent.py | 44 +++++++++++++++++++ .../default/react_agent/react_agent.yaml | 17 +++++++ .../plan/planner/react_planner/__init__.py | 7 +++ .../planner/react_planner/react_planner.yaml | 6 +++ .../app/core/tool/google_search_tool.yaml | 4 +- .../app/test/test_react_agent.py | 28 ++++++++++++ sample_standard_app/config/config.toml | 2 +- 10 files changed, 179 insertions(+), 2 deletions(-) create mode 100644 agentuniverse/agent/default/react_agent/__init__.py create mode 100644 agentuniverse/agent/default/react_agent/default_cn_prompt.yaml create mode 100644 agentuniverse/agent/default/react_agent/default_en_prompt.yaml create mode 100644 agentuniverse/agent/default/react_agent/react_agent.py create mode 100644 agentuniverse/agent/default/react_agent/react_agent.yaml create mode 100644 agentuniverse/agent/plan/planner/react_planner/__init__.py create mode 100644 agentuniverse/agent/plan/planner/react_planner/react_planner.yaml create mode 100644 sample_standard_app/app/test/test_react_agent.py diff --git a/agentuniverse/agent/default/react_agent/__init__.py b/agentuniverse/agent/default/react_agent/__init__.py new file mode 100644 index 00000000..89d328f4 --- /dev/null +++ b/agentuniverse/agent/default/react_agent/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/6/4 21:21 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/agentuniverse/agent/default/react_agent/default_cn_prompt.yaml b/agentuniverse/agent/default/react_agent/default_cn_prompt.yaml new file mode 100644 index 00000000..dfdf0057 --- /dev/null +++ b/agentuniverse/agent/default/react_agent/default_cn_prompt.yaml @@ -0,0 +1,35 @@ +introduction: 你是一位精通信息分析的ai助手。 +target: 你的目标是根据用户的问题以及给出的背景信息给出答案。 +instruction: | + 你必须优先选择使用提供的工具回答用户提出的问题,若用户没有提供工具可以根据你的通识能力解决问题。 + 你在回答时问题必须使用中文回答。 + 你必须从多个角度、维度分析用户的问题,帮助用户获取最全面的信息,需要根据背景和问题,决定搜索哪些信息可以回答问题。 + 请注意: 你在给出最终答案时需要从多角度给出更详细的原因,而不是一个简单的结论。 + + 您可以使用以下工具: + {tools} + + 使用以下格式: + + Question: 您必须回答的问题 + Thought: 你应该经常想想该怎么做 + Action: 要采取的行动应该是 one of [{tool_names}] + Action Input: 行动的输入 + Observation: 行动的结果 + ... (Thought/Action/Action Input/Observation 的过程可以重复 N 次) + Thought: 我现在知道最终答案了 + Final Answer: 原输入问题的最终答案 + + 之前的对话: + {chat_history} + + 背景信息是: + {background} + + 开始! + + Question: {input} + Thought:{agent_scratchpad} +metadata: + type: 'PROMPT' + version: 'default_react_agent.cn' diff --git a/agentuniverse/agent/default/react_agent/default_en_prompt.yaml b/agentuniverse/agent/default/react_agent/default_en_prompt.yaml new file mode 100644 index 00000000..3f7bff67 --- /dev/null +++ b/agentuniverse/agent/default/react_agent/default_en_prompt.yaml @@ -0,0 +1,31 @@ +introduction: You are an AI assistant who is proficient in information analysis. +target: Your goal is to give answers based on the user's question and the context given. +instruction: | + Answer the following questions as best you can. You have access to the following tools: + + {tools} + + Use the following format: + + Question: the input question you must answer + Thought: you should always think about what to do + Action: the action to take, should be one of [{tool_names}] + Action Input: the input to the action + Observation: the result of the action + ... (this Thought/Action/Action Input/Observation can repeat N times) + Thought: I now know the final answer + Final Answer: the final answer to the original input question + + Previous conversion: + {chat_history} + + Background: + {background} + + Begin! + + Question: {input} + Thought:{agent_scratchpad} +metadata: + type: 'PROMPT' + version: 'default_react_agent.en' diff --git a/agentuniverse/agent/default/react_agent/react_agent.py b/agentuniverse/agent/default/react_agent/react_agent.py new file mode 100644 index 00000000..f50e7057 --- /dev/null +++ b/agentuniverse/agent/default/react_agent/react_agent.py @@ -0,0 +1,44 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/5/31 21:22 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: react_agent.py +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.input_object import InputObject + + +class ReActAgent(Agent): + """ReAct Agent class.""" + + def input_keys(self) -> list[str]: + """Return the input keys of the Agent.""" + return ['input'] + + def output_keys(self) -> list[str]: + """Return the output keys of the Agent.""" + return ['output'] + + def parse_input(self, input_object: InputObject, agent_input: dict) -> dict: + """Agent parameter parsing. + + Args: + input_object (InputObject): input parameters passed by the user. + agent_input (dict): agent input preparsed by the agent. + Returns: + dict: agent input parsed from `input_object` by the user. + """ + agent_input['input'] = input_object.get_data('input') + self.agent_model.profile.setdefault('prompt_version', 'default_react_agent.cn') + return agent_input + + def parse_result(self, planner_result: dict) -> dict: + """Planner result parser. + + Args: + planner_result(dict): Planner result + Returns: + dict: Agent result object. + """ + return planner_result diff --git a/agentuniverse/agent/default/react_agent/react_agent.yaml b/agentuniverse/agent/default/react_agent/react_agent.yaml new file mode 100644 index 00000000..48309fc5 --- /dev/null +++ b/agentuniverse/agent/default/react_agent/react_agent.yaml @@ -0,0 +1,17 @@ +info: + name: 'react_agent' + description: 'react agent' +profile: + llm_model: + name: 'default_openai_llm' + model_name: 'gpt-4o' +action: + tool: + - '' +plan: + planner: + name: 'react_planner' +metadata: + type: 'AGENT' + module: 'agentuniverse.agent.default.react_agent.react_agent' + class: 'ReActAgent' \ No newline at end of file diff --git a/agentuniverse/agent/plan/planner/react_planner/__init__.py b/agentuniverse/agent/plan/planner/react_planner/__init__.py new file mode 100644 index 00000000..233deb77 --- /dev/null +++ b/agentuniverse/agent/plan/planner/react_planner/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/6/4 21:00 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/agentuniverse/agent/plan/planner/react_planner/react_planner.yaml b/agentuniverse/agent/plan/planner/react_planner/react_planner.yaml new file mode 100644 index 00000000..8af5eefb --- /dev/null +++ b/agentuniverse/agent/plan/planner/react_planner/react_planner.yaml @@ -0,0 +1,6 @@ +name: 'react_planner' +description: 'react planner' +metadata: + type: 'PLANNER' + module: 'agentuniverse.agent.plan.planner.react_planner.react_planner' + class: 'ReActPlanner' \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/google_search_tool.yaml b/sample_standard_app/app/core/tool/google_search_tool.yaml index 93ca5659..7a8f323a 100644 --- a/sample_standard_app/app/core/tool/google_search_tool.yaml +++ b/sample_standard_app/app/core/tool/google_search_tool.yaml @@ -1,5 +1,7 @@ name: 'google_search_tool' -description: 'demo google search tool' +description: | + tool used for google search, the tool is passed in the following way: + input='xxxx' tool_type: 'api' input_keys: ['input'] metadata: diff --git a/sample_standard_app/app/test/test_react_agent.py b/sample_standard_app/app/test/test_react_agent.py new file mode 100644 index 00000000..3f0179c6 --- /dev/null +++ b/sample_standard_app/app/test/test_react_agent.py @@ -0,0 +1,28 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/6/4 21:27 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: test_react_agent.py +import unittest + +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.agent_manager import AgentManager +from agentuniverse.agent.output_object import OutputObject +from agentuniverse.base.agentuniverse import AgentUniverse + + +class ReActAgentTest(unittest.TestCase): + + def setUp(self) -> None: + AgentUniverse().start(config_path='../../config/config.toml') + + def test_react_agent(self): + """Test demo reAct agent.""" + instance: Agent = AgentManager().get_instance_obj('react_agent') + output_object: OutputObject = instance.run(input='分析下巴菲特减持比亚迪的原因') + + +if __name__ == '__main__': + unittest.main() diff --git a/sample_standard_app/config/config.toml b/sample_standard_app/config/config.toml index 4a662064..6abfc733 100644 --- a/sample_standard_app/config/config.toml +++ b/sample_standard_app/config/config.toml @@ -28,7 +28,7 @@ sqldb_wrapper = ['sample_standard_app.app.core.sqldb_wrapper'] # Log config file path, an absolute path or a relative path based on the dir where the current config file is located. log_config_path = './log_config.toml' # Custom key file path, use to save your own secret key like open ai or sth else. REMEMBER TO ADD IT TO .gitignore. -#custom_key_path = './custom_key.toml' +custom_key_path = './custom_key.toml' [DB] # A sqlalchemy db uri used for storing various info, for example, service request, generated during application running. From 6e0e9966ac0cee3fcdf5f46c468682abea85521a Mon Sep 17 00:00:00 2001 From: wangchongshi Date: Fri, 7 Jun 2024 16:22:33 +0800 Subject: [PATCH 02/12] feat: add reAct agent in agentUniverse. --- .../planner/react_planner/react_planner.py | 107 ++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 agentuniverse/agent/plan/planner/react_planner/react_planner.py diff --git a/agentuniverse/agent/plan/planner/react_planner/react_planner.py b/agentuniverse/agent/plan/planner/react_planner/react_planner.py new file mode 100644 index 00000000..de623a78 --- /dev/null +++ b/agentuniverse/agent/plan/planner/react_planner/react_planner.py @@ -0,0 +1,107 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/5/31 21:22 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: react_planner.py +from langchain_core.chat_history import InMemoryChatMessageHistory +from langchain.agents import AgentExecutor, create_react_agent +from langchain.tools import Tool as LangchainTool + +from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.agent.action.tool.tool_manager import ToolManager +from agentuniverse.agent.agent_model import AgentModel +from agentuniverse.agent.input_object import InputObject +from agentuniverse.agent.memory.chat_memory import ChatMemory +from agentuniverse.agent.plan.planner.planner import Planner +from agentuniverse.base.util.prompt_util import process_llm_token +from agentuniverse.llm.llm import LLM +from agentuniverse.prompt.chat_prompt import ChatPrompt +from agentuniverse.prompt.prompt import Prompt +from agentuniverse.prompt.prompt_manager import PromptManager +from agentuniverse.prompt.prompt_model import AgentPromptModel + + +class ReActPlanner(Planner): + """ReAct planner class.""" + + tools: list[LangchainTool] = None + + def invoke(self, agent_model: AgentModel, planner_input: dict, + input_object: InputObject) -> dict: + """Invoke the planner. + + Args: + agent_model (AgentModel): Agent model object. + planner_input (dict): Planner input object. + input_object (InputObject): The input parameters passed by the user. + Returns: + dict: The planner result. + """ + + memory: ChatMemory = self.handle_memory(agent_model, planner_input) + + llm: LLM = self.handle_llm(agent_model) + + self.tools = self.acquire_tools(agent_model.action) + + prompt: Prompt = self.handle_prompt(agent_model, planner_input) + process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input) + + chat_history = memory.as_langchain().chat_memory if memory else InMemoryChatMessageHistory() + + agent = create_react_agent(llm.as_langchain(), self.tools, prompt.as_langchain()) + + agent_executor = AgentExecutor(agent=agent, tools=self.tools, verbose=True) + + return agent_executor.invoke(input=planner_input, memory=memory.as_langchain() if memory else None) + + @staticmethod + def acquire_tools(action) -> list[LangchainTool]: + tool_names: list = action.get('tool') or list() + lc_tools: list[LangchainTool] = list() + for tool_name in tool_names: + tool: Tool = ToolManager().get_instance_obj(tool_name) + lc_tools.append(tool.as_langchain()) + return lc_tools + + def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt: + """Prompt module processing. + + Args: + agent_model (AgentModel): Agent model object. + planner_input (dict): Planner input object. + Returns: + ChatPrompt: The chat prompt instance. + """ + lc_tools_str: str = '' + for lc_tool in self.tools: + lc_tools_str += "tool name:" + lc_tool.name + " " + "tool description:" + lc_tool.description + '\n' + lc_tool_names = "|".join([lc_tool.name for lc_tool in self.tools]) + planner_input['tool_names'] = lc_tool_names + planner_input['tools'] = lc_tools_str + planner_input['agent_scratchpad'] = '' + + profile: dict = agent_model.profile + + profile_prompt_model: AgentPromptModel = AgentPromptModel(introduction=profile.get('introduction'), + target=profile.get('target'), + instruction=profile.get('instruction')) + + # get the prompt by the prompt version + prompt_version: str = profile.get('prompt_version') + version_prompt: Prompt = PromptManager().get_instance_obj(prompt_version) + + if version_prompt is None and not profile_prompt_model: + raise Exception("Either the `prompt_version` or `introduction & target & instruction`" + " in agent profile configuration should be provided.") + if version_prompt: + version_prompt_model: AgentPromptModel = AgentPromptModel( + introduction=getattr(version_prompt, 'introduction', ''), + target=getattr(version_prompt, 'target', ''), + instruction=getattr(version_prompt, 'instruction', '')) + profile_prompt_model = profile_prompt_model + version_prompt_model + + prompt = Prompt().build_prompt(profile_prompt_model, self.prompt_assemble_order) + return prompt From 2d0277d9426e4d824df42de49e459ebb505ab5c5 Mon Sep 17 00:00:00 2001 From: weizjajj Date: Wed, 12 Jun 2024 14:28:34 +0800 Subject: [PATCH 03/12] add react --- .../knowledge/embedding/openai_embedding.py | 1 + .../knowledge/embedding/qwen_embedding.py | 82 +++++++++++++ agentuniverse/agent/action/tool/tool.py | 12 +- .../default/nl2api_agent/nl2api_agent.py | 45 +++++++ .../default/nl2api_agent/nl2api_agent.yaml | 17 +++ .../agent/default/react_agent/react_agent.py | 1 + .../planner/nl2api_planner/nl2api_planner.py | 111 ++++++++++++++++++ .../nl2api_planner/nl2api_planner.yaml | 6 + .../planner/react_planner/react_planner.py | 17 +-- .../llm/openai_style_langchain_instance.py | 109 ++++++++++++++++- agentuniverse/llm/openai_style_llm.py | 2 + .../app/bootstrap/server_application.py | 8 +- .../agent/nl2api_agent_case/nl2api_agent.yaml | 21 ++++ .../app/core/tool/mock_search_tool.py | 2 +- .../app/core/tool/mul_simple_tool.yaml | 8 ++ .../app/core/tool/search_api_tool.py | 67 +++++++++++ .../app/core/tool/simple_math_tool.py | 39 ++++++ .../app/core/tool/sub_simple_tool.yaml | 8 ++ .../app/test/test_nl2api_agent.py | 29 +++++ .../app/test/test_playwight.py | 10 ++ .../app/test/test_react_agent.py | 4 +- .../app/test/test_search_tool.py | 23 ++++ 22 files changed, 604 insertions(+), 18 deletions(-) create mode 100644 agentuniverse/agent/action/knowledge/embedding/qwen_embedding.py create mode 100644 agentuniverse/agent/default/nl2api_agent/nl2api_agent.py create mode 100644 agentuniverse/agent/default/nl2api_agent/nl2api_agent.yaml create mode 100644 agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.py create mode 100644 agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.yaml create mode 100644 sample_standard_app/app/core/agent/nl2api_agent_case/nl2api_agent.yaml create mode 100644 sample_standard_app/app/core/tool/mul_simple_tool.yaml create mode 100644 sample_standard_app/app/core/tool/search_api_tool.py create mode 100644 sample_standard_app/app/core/tool/simple_math_tool.py create mode 100644 sample_standard_app/app/core/tool/sub_simple_tool.yaml create mode 100644 sample_standard_app/app/test/test_nl2api_agent.py create mode 100644 sample_standard_app/app/test/test_playwight.py create mode 100644 sample_standard_app/app/test/test_search_tool.py diff --git a/agentuniverse/agent/action/knowledge/embedding/openai_embedding.py b/agentuniverse/agent/action/knowledge/embedding/openai_embedding.py index 7fa660cc..f54bbd0d 100644 --- a/agentuniverse/agent/action/knowledge/embedding/openai_embedding.py +++ b/agentuniverse/agent/action/knowledge/embedding/openai_embedding.py @@ -5,6 +5,7 @@ # @Author : wangchongshi # @Email : wangchongshi.wcs@antgroup.com # @FileName: openai_embedding.py + from typing import List, Optional, Any from langchain_community.embeddings.openai import OpenAIEmbeddings diff --git a/agentuniverse/agent/action/knowledge/embedding/qwen_embedding.py b/agentuniverse/agent/action/knowledge/embedding/qwen_embedding.py new file mode 100644 index 00000000..74455f9a --- /dev/null +++ b/agentuniverse/agent/action/knowledge/embedding/qwen_embedding.py @@ -0,0 +1,82 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/6/11 16:30 +# @Author : weizjajj +# @Email : weizhongjie.wzj@antgroup.com +# @FileName: qwen_embedding.py + + +from typing import List, Optional + +import dashscope +from langchain_community.embeddings import DashScopeEmbeddings +from pydantic import Field + +from agentuniverse.agent.action.knowledge.embedding.embedding import Embedding +from agentuniverse.base.util.env_util import get_from_env + + +class QwenEmbedding(Embedding): + """The openai embedding class.""" + + dashscope_api_key: Optional[str] = Field(default_factory=lambda: get_from_env("DASHSCOPE_API_KEY")) + """The DashScope client.""" + model: Optional[str] = "text-embedding-v1" + + def get_embeddings(self, texts: List[str]) -> List[List[float]]: + """Get the OpenAI embeddings. + + Note: + The `embedding_model_name` parameter of the openai embedding class must be provided. + The `dimensions` parameter of the openai embedding class is optional. + + Args: + texts (List[str]): A list of texts that need to be embedded. + + Returns: + List[List[float]]: Each text gets a float list, and the result is a list of the results for each text. + + Raises: + ValueError: If texts exceed the embedding model token limit or missing some required parameters. + """ + result = [] + resp = dashscope.TextEmbedding.call( + model=self.model, + input=texts, + api_key=self.dashscope_api_key + ) + if resp.status_code == 200: + result = [item['embedding'] for item in resp.output.get('embeddings')] + elif resp.status_code in [400, 401]: + raise ValueError( + f"status_code: {resp.status_code} \n " + f"code: {resp.code} \n message: {resp.message}" + ) + else: + raise Exception(f"status_code: {resp.status_code} \n code: {resp.code} \n message: {resp.message}") + return result + + async def async_get_embeddings(self, texts: List[str]) -> List[List[float]]: + """Asynchronously get the OpenAI embeddings. + + Note: + The `embedding_model_name` parameter of the openai embedding class must be provided. + The `dimensions` parameter of the openai embedding class is optional. + + Args: + texts (List[str]): A list of texts that need to be embedded. + + Returns: + List[List[float]]: Each text gets a float list, and the result is a list of the results for each text. + Raises: + ValueError: If texts exceed the embedding model token limit or missing some required parameters. + """ + raise NotImplementedError + + def as_langchain(self) -> DashScopeEmbeddings: + """Convert the agentUniverse(aU) openai embedding class to the langchain openai embedding class.""" + return DashScopeEmbeddings( + model=self.model, + dashscope_api_key=self.dashscope_api_key, + ) diff --git a/agentuniverse/agent/action/tool/tool.py b/agentuniverse/agent/action/tool/tool.py index 2190592b..457bee94 100644 --- a/agentuniverse/agent/action/tool/tool.py +++ b/agentuniverse/agent/action/tool/tool.py @@ -64,8 +64,7 @@ def __init__(self, **kwargs): def run(self, **kwargs): """The callable method that runs the tool.""" self.input_check(kwargs) - tool_input = ToolInput(kwargs) - return self.execute(tool_input) + return self.execute(**kwargs) def input_check(self, kwargs: dict) -> None: """Check whether the input parameters of the tool contain input keys of the tool""" @@ -73,14 +72,19 @@ def input_check(self, kwargs: dict) -> None: if key not in kwargs.keys(): raise Exception(f'{self.get_instance_code()} - The input must include key: {key}.') + def langchain_run(self, *args, callbacks=None, **kwargs): + """The callable method that runs the tool.""" + kwargs["callbacks"] = callbacks + return self.execute(*args, **kwargs) + @abstractmethod - def execute(self, tool_input: ToolInput): + def execute(self, *args, **kwargs): raise NotImplementedError def as_langchain(self) -> LangchainTool: """Convert the agentUniverse(aU) tool class to the langchain tool class.""" return LangchainTool(name=self.name, - func=self.run, + func=self.langchain_run, description=self.description) def get_instance_code(self) -> str: diff --git a/agentuniverse/agent/default/nl2api_agent/nl2api_agent.py b/agentuniverse/agent/default/nl2api_agent/nl2api_agent.py new file mode 100644 index 00000000..a88892a8 --- /dev/null +++ b/agentuniverse/agent/default/nl2api_agent/nl2api_agent.py @@ -0,0 +1,45 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/6/11 17:14 +# @Author : weizjajj +# @Email : weizhongjie.wzj@antgroup.com +# @FileName: nl2api_agent.py + +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.input_object import InputObject + + +class Nl2ApiAgent(Agent): + """ReAct Agent class.""" + + def input_keys(self) -> list[str]: + """Return the input keys of the Agent.""" + return ['input'] + + def output_keys(self) -> list[str]: + """Return the output keys of the Agent.""" + return ['output'] + + def parse_input(self, input_object: InputObject, agent_input: dict) -> dict: + """Agent parameter parsing. + + Args: + input_object (InputObject): input parameters passed by the user. + agent_input (dict): agent input preparsed by the agent. + Returns: + dict: agent input parsed from `input_object` by the user. + """ + agent_input['input'] = input_object.get_data('input') + self.agent_model.profile.setdefault('prompt_version', 'default_nl2api_agent.cn') + return agent_input + + def parse_result(self, planner_result: dict) -> dict: + """Planner result parser. + + Args: + planner_result(dict): Planner result + Returns: + dict: Agent result object. + """ + return planner_result diff --git a/agentuniverse/agent/default/nl2api_agent/nl2api_agent.yaml b/agentuniverse/agent/default/nl2api_agent/nl2api_agent.yaml new file mode 100644 index 00000000..79245c65 --- /dev/null +++ b/agentuniverse/agent/default/nl2api_agent/nl2api_agent.yaml @@ -0,0 +1,17 @@ +info: + name: 'nl2api_agent' + description: 'nl2api agent' +profile: + llm_model: + name: 'default_openai_llm' + model_name: 'gpt-4o' +action: + tool: + - '' +plan: + planner: + name: 'nl2api_planner' +metadata: + type: 'AGENT' + module: 'agentuniverse.agent.default.nl2api_agent.nl2api_agent' + class: 'Nl2ApiAgent' \ No newline at end of file diff --git a/agentuniverse/agent/default/react_agent/react_agent.py b/agentuniverse/agent/default/react_agent/react_agent.py index f50e7057..50de90c4 100644 --- a/agentuniverse/agent/default/react_agent/react_agent.py +++ b/agentuniverse/agent/default/react_agent/react_agent.py @@ -1,5 +1,6 @@ # !/usr/bin/env python3 # -*- coding:utf-8 -*- +from langchain.agents import create_openai_tools_agent # @Time : 2024/5/31 21:22 # @Author : wangchongshi diff --git a/agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.py b/agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.py new file mode 100644 index 00000000..55d7d8d7 --- /dev/null +++ b/agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.py @@ -0,0 +1,111 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/3/18 10:50 +# @Author : heji +# @Email : lc299034@antgroup.com +# @FileName: expressing_planner.py +"""Expressing planner module.""" +import asyncio + +from langchain_core.chat_history import InMemoryChatMessageHistory +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.runnables.history import RunnableWithMessageHistory +from langchain.tools import Tool as LangchainTool + +from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.agent.action.tool.tool_manager import ToolManager +from agentuniverse.agent.agent_model import AgentModel +from agentuniverse.agent.input_object import InputObject +from agentuniverse.agent.memory.chat_memory import ChatMemory +from agentuniverse.agent.plan.planner.planner import Planner +from agentuniverse.base.util.memory_util import generate_memories +from agentuniverse.base.util.prompt_util import process_llm_token +from agentuniverse.llm.llm import LLM +from agentuniverse.prompt.prompt import Prompt +from agentuniverse.prompt.prompt_manager import PromptManager +from agentuniverse.prompt.prompt_model import AgentPromptModel + + +class Nl2ApiPlanner(Planner): + """Expressing planner class.""" + + def invoke(self, agent_model: AgentModel, planner_input: dict, input_object: InputObject) -> dict: + """Invoke the planner. + + Args: + agent_model (AgentModel): Agent model object. + planner_input (dict): Planner input object. + input_object (InputObject): The input parameters passed by the user. + Returns: + dict: The planner result. + """ + memory: ChatMemory = self.handle_memory(agent_model, planner_input) + + llm: LLM = self.handle_llm(agent_model) + + prompt: Prompt = self.handle_prompt(agent_model, planner_input) + + process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input) + + chat_history = memory.as_langchain().chat_memory if memory else InMemoryChatMessageHistory() + + chain_with_history = RunnableWithMessageHistory( + prompt.as_langchain() | llm.as_langchain(), + lambda session_id: chat_history, + history_messages_key="chat_history", + input_messages_key=self.input_key, + ) | JsonOutputParser() + res = asyncio.run( + chain_with_history.ainvoke(input=planner_input, config={"configurable": {"session_id": "unused"}})) + return {**planner_input, self.output_key: res, 'chat_history': generate_memories(chat_history)} + + @staticmethod + def acquire_tools(action) -> list[LangchainTool]: + tool_names: list = action.get('tool') or list() + lc_tools: list[LangchainTool] = list() + for tool_name in tool_names: + tool: Tool = ToolManager().get_instance_obj(tool_name) + lc_tools.append(tool.as_langchain()) + return lc_tools + + def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt: + """ + Prompt module processing. + + Args: + agent_model (AgentModel): Agent model object. + planner_input (dict): Planner input object. + Returns: + ChatPrompt: The chat prompt instance. + """ + tools = self.acquire_tools(action=agent_model.action) + lc_tools_str: str = '' + for lc_tool in tools: + lc_tools_str += "tool name:" + lc_tool.name + " " + "tool description:" + lc_tool.description + '\n' + lc_tool_names = "|".join([lc_tool.name for lc_tool in tools]) + planner_input['tool_names'] = lc_tool_names + planner_input['tools'] = lc_tools_str + planner_input['agent_scratchpad'] = '' + # + profile: dict = agent_model.profile + # + profile_prompt_model: AgentPromptModel = AgentPromptModel(introduction=profile.get('introduction'), + target=profile.get('target'), + instruction=profile.get('instruction')) + + # get the prompt by the prompt version + prompt_version: str = profile.get('prompt_version') + version_prompt: Prompt = PromptManager().get_instance_obj(prompt_version) + + if version_prompt is None and not profile_prompt_model: + raise Exception("Either the `prompt_version` or `introduction & target & instruction`" + " in agent profile configuration should be provided.") + if version_prompt: + version_prompt_model: AgentPromptModel = AgentPromptModel( + introduction=getattr(version_prompt, 'introduction', ''), + target=getattr(version_prompt, 'target', ''), + instruction=getattr(version_prompt, 'instruction', '')) + profile_prompt_model = profile_prompt_model + version_prompt_model + + prompt = Prompt().build_prompt(profile_prompt_model, self.prompt_assemble_order) + return prompt \ No newline at end of file diff --git a/agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.yaml b/agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.yaml new file mode 100644 index 00000000..1de5943f --- /dev/null +++ b/agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.yaml @@ -0,0 +1,6 @@ +name: 'nl2api_planner' +description: 'nl2api planner' +metadata: + type: 'PLANNER' + module: 'agentuniverse.agent.plan.planner.nl2api_planner.nl2api_planner' + class: 'Nl2ApiPlanner' \ No newline at end of file diff --git a/agentuniverse/agent/plan/planner/react_planner/react_planner.py b/agentuniverse/agent/plan/planner/react_planner/react_planner.py index de623a78..cb8c4314 100644 --- a/agentuniverse/agent/plan/planner/react_planner/react_planner.py +++ b/agentuniverse/agent/plan/planner/react_planner/react_planner.py @@ -6,7 +6,7 @@ # @Email : wangchongshi.wcs@antgroup.com # @FileName: react_planner.py from langchain_core.chat_history import InMemoryChatMessageHistory -from langchain.agents import AgentExecutor, create_react_agent +from langchain.agents import AgentExecutor, create_react_agent, create_structured_chat_agent from langchain.tools import Tool as LangchainTool from agentuniverse.agent.action.tool.tool import Tool @@ -44,18 +44,18 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, llm: LLM = self.handle_llm(agent_model) - self.tools = self.acquire_tools(agent_model.action) + tools = self.acquire_tools(agent_model.action) prompt: Prompt = self.handle_prompt(agent_model, planner_input) process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input) chat_history = memory.as_langchain().chat_memory if memory else InMemoryChatMessageHistory() - agent = create_react_agent(llm.as_langchain(), self.tools, prompt.as_langchain()) + agent = create_react_agent(llm.as_langchain(), tools, prompt.as_langchain()) + agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) - agent_executor = AgentExecutor(agent=agent, tools=self.tools, verbose=True) - - return agent_executor.invoke(input=planner_input, memory=memory.as_langchain() if memory else None) + return agent_executor.invoke(input=planner_input, memory=memory.as_langchain() if memory else None, + chat_history=chat_history) @staticmethod def acquire_tools(action) -> list[LangchainTool]: @@ -76,9 +76,10 @@ def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt: ChatPrompt: The chat prompt instance. """ lc_tools_str: str = '' - for lc_tool in self.tools: + tools = self.acquire_tools(agent_model.action) + for lc_tool in self.acquire_tools(agent_model.action): lc_tools_str += "tool name:" + lc_tool.name + " " + "tool description:" + lc_tool.description + '\n' - lc_tool_names = "|".join([lc_tool.name for lc_tool in self.tools]) + lc_tool_names = "|".join([lc_tool.name for lc_tool in tools]) planner_input['tool_names'] = lc_tool_names planner_input['tools'] = lc_tools_str planner_input['agent_scratchpad'] = '' diff --git a/agentuniverse/llm/openai_style_langchain_instance.py b/agentuniverse/llm/openai_style_langchain_instance.py index 8b9506ce..f17e01e8 100644 --- a/agentuniverse/llm/openai_style_langchain_instance.py +++ b/agentuniverse/llm/openai_style_langchain_instance.py @@ -6,11 +6,12 @@ # @Email : weizhongjie.wzj@antgroup.com # @FileName: langchain_openai_style_instance.py -from typing import Any, List, Optional, AsyncIterator +from typing import Any, List, Optional, AsyncIterator, Iterator from langchain.callbacks.manager import AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun from langchain.schema import BaseMessage, ChatResult -from langchain_community.chat_models.openai import _convert_delta_to_message_chunk +from langchain_community.chat_models.openai import _convert_delta_to_message_chunk, _create_retry_decorator +from langchain_community.utils.openai import is_openai_v1 from langchain_core.language_models.chat_models import generate_from_stream, agenerate_from_stream from langchain_core.messages import AIMessageChunk, get_buffer_string from langchain_core.outputs import ChatGenerationChunk @@ -19,6 +20,25 @@ from agentuniverse.llm.llm import LLM +async def acompletion_with_retry( + llm: ChatOpenAI, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, +) -> Any: + """Use tenacity to retry the async completion call.""" + if is_openai_v1(): + return await llm.llm.acall(**kwargs) + + retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) + + @retry_decorator + async def _completion_with_retry(**kwargs: Any) -> Any: + # Use OpenAI's async api https://github.com/openai/openai-python#async-api + return await llm.llm.acall(**kwargs) + + return await _completion_with_retry(**kwargs) + + class LangchainOpenAIStyleInstance(ChatOpenAI): """Langchain OpenAI LLM wrapper.""" @@ -134,3 +154,88 @@ async def as_langchain_achunk(stream_iterator: AsyncIterator, run_manager=None) def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: messages_str = get_buffer_string(messages) return self.llm.get_num_tokens(messages_str) + + def completion_with_retry( + self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any + ) -> Any: + """Use tenacity to retry the completion call.""" + if is_openai_v1(): + return self.llm.call(**kwargs) + + retry_decorator = _create_retry_decorator(self, run_manager=run_manager) + + @retry_decorator + def _completion_with_retry(**kwargs: Any) -> Any: + return self.llm.call(**kwargs) + + return _completion_with_retry(**kwargs) + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + message_dicts, params = self._create_message_dicts(messages, stop) + params = {**params, **kwargs, "stream": True} + + default_chunk_class = AIMessageChunk + for chunk in self.completion_with_retry( + messages=message_dicts, run_manager=run_manager, **params + ): + chunk = chunk.raw + if not isinstance(chunk, dict): + chunk = chunk.dict() + if len(chunk["choices"]) == 0: + continue + choice = chunk["choices"][0] + chunk = _convert_delta_to_message_chunk( + choice["delta"], default_chunk_class + ) + finish_reason = choice.get("finish_reason") + generation_info = ( + dict(finish_reason=finish_reason) if finish_reason is not None else None + ) + default_chunk_class = chunk.__class__ + cg_chunk = ChatGenerationChunk( + message=chunk, generation_info=generation_info + ) + if run_manager: + run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk) + yield cg_chunk + + async def _astream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[ChatGenerationChunk]: + message_dicts, params = self._create_message_dicts(messages, stop) + params = {**params, **kwargs, "stream": True} + + default_chunk_class = AIMessageChunk + async for chunk in await acompletion_with_retry( + self, messages=message_dicts, run_manager=run_manager, **params + ): + chunk = chunk.raw + if not isinstance(chunk, dict): + chunk = chunk.dict() + if len(chunk["choices"]) == 0: + continue + choice = chunk["choices"][0] + chunk = _convert_delta_to_message_chunk( + choice["delta"], default_chunk_class + ) + finish_reason = choice.get("finish_reason") + generation_info = ( + dict(finish_reason=finish_reason) if finish_reason is not None else None + ) + default_chunk_class = chunk.__class__ + cg_chunk = ChatGenerationChunk( + message=chunk, generation_info=generation_info + ) + if run_manager: + await run_manager.on_llm_new_token(token=cg_chunk.text, chunk=cg_chunk) + yield cg_chunk diff --git a/agentuniverse/llm/openai_style_llm.py b/agentuniverse/llm/openai_style_llm.py index c4a84da4..2d5a3ea0 100644 --- a/agentuniverse/llm/openai_style_llm.py +++ b/agentuniverse/llm/openai_style_llm.py @@ -71,6 +71,8 @@ def call(self, messages: list, **kwargs: Any) -> Union[LLMOutput, Iterator[LLMOu **kwargs: Arbitrary keyword arguments. """ streaming = kwargs.pop("streaming") if "streaming" in kwargs else self.streaming + if 'stream' in kwargs: + streaming = kwargs.pop('stream') self.client = self._new_client() client = self.client chat_completion = client.chat.completions.create( diff --git a/sample_standard_app/app/bootstrap/server_application.py b/sample_standard_app/app/bootstrap/server_application.py index 3f59fb94..7399f367 100644 --- a/sample_standard_app/app/bootstrap/server_application.py +++ b/sample_standard_app/app/bootstrap/server_application.py @@ -21,4 +21,10 @@ def start(cls): if __name__ == "__main__": - ServerApplication.start() + from langchain_community.document_loaders import AsyncHtmlLoader + + urls = ["https://www.espn.com", "https://lilianweng.github.io/posts/2023-06-23-agent/"] + loader = AsyncHtmlLoader(urls) + docs = loader.load() + print(docs) +# ServerApplication.start() diff --git a/sample_standard_app/app/core/agent/nl2api_agent_case/nl2api_agent.yaml b/sample_standard_app/app/core/agent/nl2api_agent_case/nl2api_agent.yaml new file mode 100644 index 00000000..26dfd2a2 --- /dev/null +++ b/sample_standard_app/app/core/agent/nl2api_agent_case/nl2api_agent.yaml @@ -0,0 +1,21 @@ +info: + name: 'demo_nl2api_agent' + description: 'demo nl2api agent' +profile: + llm_model: + name: 'default_openai_llm' + model_name: 'gpt-4o' +action: + tool: + - 'add' + - 'sub' + - 'mul' + - 'div' + - 'google_search_tool' +plan: + planner: + name: 'nl2api_planner' +metadata: + type: 'AGENT' + module: 'agentuniverse.agent.default.nl2api_agent.nl2api_agent' + class: 'Nl2ApiAgent' \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/mock_search_tool.py b/sample_standard_app/app/core/tool/mock_search_tool.py index ebd78516..33deb9aa 100644 --- a/sample_standard_app/app/core/tool/mock_search_tool.py +++ b/sample_standard_app/app/core/tool/mock_search_tool.py @@ -40,6 +40,6 @@ class MockSearchTool(Tool): We recommend that you configure your `SERPER_API_KEY` and use google_search_tool to get information. """ - def execute(self, tool_input: ToolInput): + def execute(self, input:str): """Demonstrates the execute method of the Tool class.""" return MOCK_SEARCH_RESULT diff --git a/sample_standard_app/app/core/tool/mul_simple_tool.yaml b/sample_standard_app/app/core/tool/mul_simple_tool.yaml new file mode 100644 index 00000000..4278bc4e --- /dev/null +++ b/sample_standard_app/app/core/tool/mul_simple_tool.yaml @@ -0,0 +1,8 @@ +name: 'mul' +description: 'Multiplying two float number, input_params with 2 num spilt with comma, return result. For example, a = 2, 1 , the result = 2' +tool_type: 'api' +input_keys: +metadata: + type: 'TOOL' + module: 'sample_standard_app.app.core.tool.simple_math_tool' + class: 'MultiplyTool' \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/search_api_tool.py b/sample_standard_app/app/core/tool/search_api_tool.py new file mode 100644 index 00000000..78ab009d --- /dev/null +++ b/sample_standard_app/app/core/tool/search_api_tool.py @@ -0,0 +1,67 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +import os +# @Time : 2024/6/12 09:44 +# @Author : weizjajj +# @Email : weizhongjie.wzj@antgroup.com +# @FileName: search_api_tool.py + + +from typing import Optional + +from langchain_community.utilities import SearchApiAPIWrapper +from pydantic import Field + +from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.base.config.component_configer.configers.tool_configer import ToolConfiger +from agentuniverse.base.util.env_util import get_from_env + + +class SearchAPITool(Tool): + """ + The demo search tool. + + Implement the execute method of demo google search tool, using the `SearchApiAPIWrapper` to implement a simple search. + + Note: + You need to sign up for a free account at https://www.searchapi.io/ and get the SEARCHAPI_API_KEY api key (100 free queries). + + Args: + search_api_key: Optional[str] = Field(default_factory=lambda: get_from_env("SEARCHAPI_API_KEY")), + engine: str = "google" engine type you want to use + search_params: dict = {} engine search parameters + search_type: str = "common" result type you want to get ,common string or json + """ + + search_api_key: Optional[str] = Field(default_factory=lambda: get_from_env("SEARCHAPI_API_KEY")) + engine: str = "google" + search_params: dict = {} + search_api_wrapper: Optional[SearchApiAPIWrapper] = None + search_type: str = "common" + + def _load_api_wapper(self): + if not self.search_api_key: + raise ValueError("Please set the SEARCHAPI_API_KEY environment variable.") + if not self.search_api_wrapper: + self.search_api_wrapper = SearchApiAPIWrapper(searchapi_api_key=self.search_api_key, engine=self.engine) + return self.search_api_wrapper + + def execute(self, input: str, **kwargs): + self._load_api_wapper() + search_params = {} + for k, v in self.search_params.items(): + if k in kwargs: + search_params[k] = kwargs[k] + continue + search_params[k] = v + if self.search_type == "json": + return self.search_api_wrapper.results(query=input, **search_params) + return self.search_api_wrapper.run(query=input, **search_params) + + def initialize_by_component_configer(self, component_configer: ToolConfiger) -> 'Tool': + """Initialize the tool by the component configer.""" + super().initialize_by_component_configer(component_configer) + self.engine = component_configer.configer.value.get('engine', 'google') + self.search_params = component_configer.configer.value.get('search_params', {}) + self.search_type = component_configer.configer.value.get('search_type', 'common') + return self diff --git a/sample_standard_app/app/core/tool/simple_math_tool.py b/sample_standard_app/app/core/tool/simple_math_tool.py new file mode 100644 index 00000000..3559208a --- /dev/null +++ b/sample_standard_app/app/core/tool/simple_math_tool.py @@ -0,0 +1,39 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +from langchain_core.utils.json import parse_json_markdown + +# @Time : 2024/6/11 09:49 +# @Author : weizjajj +# @Email : weizhongjie.wzj@antgroup.com +# @FileName: simple_math_tool.py + + +from agentuniverse.agent.action.tool.tool import Tool + + +class AddTool(Tool): + def execute(self, input_params, **kwargs): + a, b = input_params.split(',') + result = float(a) + float(b) + return result + + +class SubtractTool(Tool): + def execute(self, input_params, **kwargs): + a, b = input_params.split(',') + result = float(a) - float(b) + return result + + +class MultiplyTool(Tool): + def execute(self, input_params, **kwargs): + a, b = input_params.split(',') + result = float(a) * float(b) + return result + + +class DivideTool(Tool): + def execute(self, input_params, **kwargs): + a, b = input_params.split(',') + result = float(a) / float(b) + return result diff --git a/sample_standard_app/app/core/tool/sub_simple_tool.yaml b/sample_standard_app/app/core/tool/sub_simple_tool.yaml new file mode 100644 index 00000000..0fb7097e --- /dev/null +++ b/sample_standard_app/app/core/tool/sub_simple_tool.yaml @@ -0,0 +1,8 @@ +name: 'sub' +description: 'Subtracting two float number, input_params with 2 num spilt with comma, return result. For example, input_params = 2, 1 , the result = 1' +tool_type: 'api' +input_keys: +metadata: + type: 'TOOL' + module: 'sample_standard_app.app.core.tool.simple_math_tool' + class: 'SubtractTool' \ No newline at end of file diff --git a/sample_standard_app/app/test/test_nl2api_agent.py b/sample_standard_app/app/test/test_nl2api_agent.py new file mode 100644 index 00000000..6ab794c8 --- /dev/null +++ b/sample_standard_app/app/test/test_nl2api_agent.py @@ -0,0 +1,29 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/6/4 21:27 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: test_react_agent.py +import unittest + +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.agent_manager import AgentManager +from agentuniverse.agent.output_object import OutputObject +from agentuniverse.base.agentuniverse import AgentUniverse + + +class ReActAgentTest(unittest.TestCase): + + def setUp(self) -> None: + AgentUniverse().start(config_path='../../config/config.toml') + + def test_react_agent(self): + """Test demo reAct agent.""" + instance: Agent = AgentManager().get_instance_obj('demo_nl2api_agent') + output_object: OutputObject = instance.run(input='1+3/2+10-4*3等于多少') + print(output_object.to_dict()) + + +if __name__ == '__main__': + unittest.main() diff --git a/sample_standard_app/app/test/test_playwight.py b/sample_standard_app/app/test/test_playwight.py new file mode 100644 index 00000000..964bc2dd --- /dev/null +++ b/sample_standard_app/app/test/test_playwight.py @@ -0,0 +1,10 @@ + +from langchain_community.agent_toolkits import PlayWrightBrowserToolkit +from langchain_community.tools.playwright.utils import create_async_playwright_browser + +if __name__ == '__main__': + from langchain_community.document_loaders import AsyncHtmlLoader + + urls = ["https://www.espn.com", "https://lilianweng.github.io/posts/2023-06-23-agent/"] + loader = AsyncHtmlLoader(urls) + docs = loader.load() \ No newline at end of file diff --git a/sample_standard_app/app/test/test_react_agent.py b/sample_standard_app/app/test/test_react_agent.py index 3f0179c6..fad03b9b 100644 --- a/sample_standard_app/app/test/test_react_agent.py +++ b/sample_standard_app/app/test/test_react_agent.py @@ -20,8 +20,8 @@ def setUp(self) -> None: def test_react_agent(self): """Test demo reAct agent.""" - instance: Agent = AgentManager().get_instance_obj('react_agent') - output_object: OutputObject = instance.run(input='分析下巴菲特减持比亚迪的原因') + instance: Agent = AgentManager().get_instance_obj('demo_react_agent') + output_object: OutputObject = instance.run(input='黄金和沪深300的价格分别是多少') if __name__ == '__main__': diff --git a/sample_standard_app/app/test/test_search_tool.py b/sample_standard_app/app/test/test_search_tool.py new file mode 100644 index 00000000..f442fad5 --- /dev/null +++ b/sample_standard_app/app/test/test_search_tool.py @@ -0,0 +1,23 @@ +import os +import unittest + +from agentuniverse.agent.action.tool.tool_manager import ToolManager +from agentuniverse.base.agentuniverse import AgentUniverse + + +class SearchToolTest(unittest.TestCase): + """ + Test cases for the rag agent + """ + + def setUp(self) -> None: + AgentUniverse().start(config_path='../../config/config.toml') + + def test_rag_agent(self): + """Test demo rag agent.""" + res = ToolManager().get_instance_obj("baidu_search_tool").run(input="今日黄金价格") + print(res) + + +if __name__ == '__main__': + unittest.main() From eb87a8ef8eef81945ef6dd1521b4f1f545250b22 Mon Sep 17 00:00:00 2001 From: weizjajj Date: Wed, 12 Jun 2024 14:28:40 +0800 Subject: [PATCH 04/12] add react --- .../agent/default/nl2api_agent/__init__.py | 0 .../nl2api_agent/default_cn_prompt.yaml | 27 +++++++++++++++++++ .../nl2api_agent/default_en_prompt.yaml | 26 ++++++++++++++++++ .../plan/planner/nl2api_planner/__init__.py | 0 .../core/agent/nl2api_agent_case/__init__.py | 0 .../core/agent/react_agent_case/__init__.py | 0 .../react_agent_case/demo_react_agent.yaml | 23 ++++++++++++++++ .../app/core/tool/add_simple_tool.yaml | 8 ++++++ .../app/core/tool/baidu_search_tool.yaml | 12 +++++++++ .../app/core/tool/bing_search_tool.yaml | 12 +++++++++ .../app/core/tool/div_simple_tool.yaml | 8 ++++++ .../app/core/tool/google_search_tool.py | 8 +++--- 12 files changed, 120 insertions(+), 4 deletions(-) create mode 100644 agentuniverse/agent/default/nl2api_agent/__init__.py create mode 100644 agentuniverse/agent/default/nl2api_agent/default_cn_prompt.yaml create mode 100644 agentuniverse/agent/default/nl2api_agent/default_en_prompt.yaml create mode 100644 agentuniverse/agent/plan/planner/nl2api_planner/__init__.py create mode 100644 sample_standard_app/app/core/agent/nl2api_agent_case/__init__.py create mode 100644 sample_standard_app/app/core/agent/react_agent_case/__init__.py create mode 100644 sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml create mode 100644 sample_standard_app/app/core/tool/add_simple_tool.yaml create mode 100644 sample_standard_app/app/core/tool/baidu_search_tool.yaml create mode 100644 sample_standard_app/app/core/tool/bing_search_tool.yaml create mode 100644 sample_standard_app/app/core/tool/div_simple_tool.yaml diff --git a/agentuniverse/agent/default/nl2api_agent/__init__.py b/agentuniverse/agent/default/nl2api_agent/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/agentuniverse/agent/default/nl2api_agent/default_cn_prompt.yaml b/agentuniverse/agent/default/nl2api_agent/default_cn_prompt.yaml new file mode 100644 index 00000000..b2ec805f --- /dev/null +++ b/agentuniverse/agent/default/nl2api_agent/default_cn_prompt.yaml @@ -0,0 +1,27 @@ +introduction: 你是一位精通工具选择ai助手。 +target: 你的目标是根据用户的问题选择出合适的工具。 +instruction: | + 你需要根据问题和用户提供的工具,选择其中的一个或几个工具用来回答用户提出的问题。 + 你必须从多个角度、维度分析用户的问题,需要根据背景和问题,决定使用哪些工具可以回答用户问题。 + + 您可以使用以下工具: + {tools} + + 之前的对话: + {chat_history} + + 背景信息是: + {background} + + 回答必须是按照以下格式化的Json代码片段。 + 1. tools字段代表选择的几个工具的完整名称,列表格式。例如:[add, sub, mul, div] + 2. thought字段代表选择工具的思考过程和原因。 + ```{{ + "tools": list, + "thought": string + }}``` + + 当前的问题:{input} +metadata: + type: 'PROMPT' + version: 'default_nl2api_agent.cn' diff --git a/agentuniverse/agent/default/nl2api_agent/default_en_prompt.yaml b/agentuniverse/agent/default/nl2api_agent/default_en_prompt.yaml new file mode 100644 index 00000000..31f8b411 --- /dev/null +++ b/agentuniverse/agent/default/nl2api_agent/default_en_prompt.yaml @@ -0,0 +1,26 @@ +introduction: You are an AI assistant proficient in tool selection. +target: Your goal is to select the appropriate tools based on the user's questions. +instruction: | + Your task is to select one or several tools from those provided by the user, based on their question and the context, in order to answer the user's query. + You must analyze the user's problem from multiple angles and dimensions, taking into account the background and context of the question, and decide which tools can be used to answer the user's question. + You may use the following tools: + {tools} + + Previous conversation: + {chat_history} + + The background information is: + {background} + + The response must follow the format below as a formatted JSON code snippet. + 1. The tools field represents the full names of the selected tools in a list format, such as:[add, sub, mul, div] + 2. The thought field represents the thinking process and reasons behind the selection of tools. + ```{{ + "tools": list, + "thought": string + }}``` + + Question: {input} +metadata: + type: 'PROMPT' + version: 'default_nl2api_agent.en' diff --git a/agentuniverse/agent/plan/planner/nl2api_planner/__init__.py b/agentuniverse/agent/plan/planner/nl2api_planner/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sample_standard_app/app/core/agent/nl2api_agent_case/__init__.py b/sample_standard_app/app/core/agent/nl2api_agent_case/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sample_standard_app/app/core/agent/react_agent_case/__init__.py b/sample_standard_app/app/core/agent/react_agent_case/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml b/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml new file mode 100644 index 00000000..175d44c7 --- /dev/null +++ b/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml @@ -0,0 +1,23 @@ +info: + name: 'demo_react_agent' + description: 'react agent' +profile: + llm_model: + name: 'default_qwen_llm' +# model_name: 'qwen1.5-72b-chat' +action: + tool: +# - 'add' +# - 'sub' +# - 'mul' +# - 'div' + - 'google_search_tool' + - 'gold_search_tool' + - 'stock_search_tool' +plan: + planner: + name: 'react_planner' +metadata: + type: 'AGENT' + module: 'agentuniverse.agent.default.react_agent.react_agent' + class: 'ReActAgent' \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/add_simple_tool.yaml b/sample_standard_app/app/core/tool/add_simple_tool.yaml new file mode 100644 index 00000000..1a1c2026 --- /dev/null +++ b/sample_standard_app/app/core/tool/add_simple_tool.yaml @@ -0,0 +1,8 @@ +name: 'add' +description: 'Adding two float number, input_params with 2 num spilt with comma, return result. For example, input_params = 1, 2 , the result = 3' +tool_type: 'api' +input_keys: +metadata: + type: 'TOOL' + module: 'sample_standard_app.app.core.tool.simple_math_tool' + class: 'AddTool' \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/baidu_search_tool.yaml b/sample_standard_app/app/core/tool/baidu_search_tool.yaml new file mode 100644 index 00000000..ead9d4b6 --- /dev/null +++ b/sample_standard_app/app/core/tool/baidu_search_tool.yaml @@ -0,0 +1,12 @@ +name: 'baidu_search_tool' +description: '百度(必应)搜索工具,输入为一个要搜索内容的字符串,例如:input=黄金价格是多少' +tool_type: 'api' +input_keys: ['input'] +engine: 'baidu' +search_type: 'json' +search_params: + num: 10 +metadata: + type: 'TOOL' + module: 'sample_standard_app.app.core.tool.search_api_tool' + class: 'SearchAPITool' \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/bing_search_tool.yaml b/sample_standard_app/app/core/tool/bing_search_tool.yaml new file mode 100644 index 00000000..45f16424 --- /dev/null +++ b/sample_standard_app/app/core/tool/bing_search_tool.yaml @@ -0,0 +1,12 @@ +name: 'bing_search_tool' +description: 'Bing(必应)搜索工具,输入为一个要搜索内容的字符串,例如:input=黄金价格是多少' +tool_type: 'api' +input_keys: ['input'] +engine: 'bing' +search_type: 'common' +search_params: + num: 10 +metadata: + type: 'TOOL' + module: 'sample_standard_app.app.core.tool.search_api_tool' + class: 'SearchAPITool' \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/div_simple_tool.yaml b/sample_standard_app/app/core/tool/div_simple_tool.yaml new file mode 100644 index 00000000..69b9560f --- /dev/null +++ b/sample_standard_app/app/core/tool/div_simple_tool.yaml @@ -0,0 +1,8 @@ +name: 'div' +description: 'Dividing two float number, input_params with 2 num spilt with comma, return result. For example, input_params = 6, 3 , the result = 2' +tool_type: 'api' +input_keys: +metadata: + type: 'TOOL' + module: 'sample_standard_app.app.core.tool.simple_math_tool' + class: 'MultiplyTool' \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/google_search_tool.py b/sample_standard_app/app/core/tool/google_search_tool.py index 50b14d7e..2ab955aa 100644 --- a/sample_standard_app/app/core/tool/google_search_tool.py +++ b/sample_standard_app/app/core/tool/google_search_tool.py @@ -9,6 +9,7 @@ from pydantic import Field from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper from agentuniverse.agent.action.tool.tool import Tool, ToolInput +from agentuniverse.base.config.component_configer.configers.tool_configer import ToolConfiger from agentuniverse.base.util.env_util import get_from_env from sample_standard_app.app.core.tool.mock_search_tool import MockSearchTool @@ -25,10 +26,9 @@ class GoogleSearchTool(Tool): serper_api_key: Optional[str] = Field(default_factory=lambda: get_from_env("SERPER_API_KEY")) - def execute(self, tool_input: ToolInput): + def execute(self, input: str, **kwargs): if self.serper_api_key is None: - return MockSearchTool().execute(tool_input) - query = tool_input.get_data("input") + return MockSearchTool().execute(input=input) # get top10 results from Google search. search = GoogleSerperAPIWrapper(serper_api_key=self.serper_api_key, k=10, gl="us", hl="en", type="search") - return search.run(query=query) + return search.run(query=input) \ No newline at end of file From 40ffa4b89f998e5ec122f139fe4d56621353a3ed Mon Sep 17 00:00:00 2001 From: weizjajj Date: Thu, 13 Jun 2024 15:03:44 +0800 Subject: [PATCH 05/12] add ollama support react --- .../react_agent/default_cn_prompt.yaml | 18 ++-- .../react_agent/default_en_prompt.yaml | 16 +-- .../agent/default/react_agent/react_agent.py | 1 - .../planner/react_planner/react_planner.py | 18 +++- .../planner/react_planner/stream_callback.py | 64 +++++++++++ .../llm/default/default_ollama_llm.py | 100 ++++++++++++++++++ .../llm/default/default_ollama_llm.yaml | 9 ++ agentuniverse/llm/llm.py | 6 +- .../llm/ollama_langchain_instance.py | 39 +++++++ .../app/bootstrap/server_application.py | 9 +- .../react_agent_case/demo_react_agent.yaml | 12 +-- .../app/core/prompt/demo_react_prompt.yaml | 40 +++++++ .../app/core/service/demo_react_service.yaml | 5 + .../app/core/tool/google_search_tool.yaml | 6 +- .../app/core/tool/python_repl.py | 40 +++++++ .../app/core/tool/python_repl_tool.yaml | 19 ++++ ...h_tool.yaml => search_api_baidu_tool.yaml} | 0 ...ch_tool.yaml => search_api_bing_tool.yaml} | 0 18 files changed, 361 insertions(+), 41 deletions(-) create mode 100644 agentuniverse/agent/plan/planner/react_planner/stream_callback.py create mode 100644 agentuniverse/llm/default/default_ollama_llm.py create mode 100644 agentuniverse/llm/default/default_ollama_llm.yaml create mode 100644 agentuniverse/llm/ollama_langchain_instance.py create mode 100644 sample_standard_app/app/core/prompt/demo_react_prompt.yaml create mode 100644 sample_standard_app/app/core/service/demo_react_service.yaml create mode 100644 sample_standard_app/app/core/tool/python_repl.py create mode 100644 sample_standard_app/app/core/tool/python_repl_tool.yaml rename sample_standard_app/app/core/tool/{baidu_search_tool.yaml => search_api_baidu_tool.yaml} (100%) rename sample_standard_app/app/core/tool/{bing_search_tool.yaml => search_api_bing_tool.yaml} (100%) diff --git a/agentuniverse/agent/default/react_agent/default_cn_prompt.yaml b/agentuniverse/agent/default/react_agent/default_cn_prompt.yaml index dfdf0057..df8d8659 100644 --- a/agentuniverse/agent/default/react_agent/default_cn_prompt.yaml +++ b/agentuniverse/agent/default/react_agent/default_cn_prompt.yaml @@ -9,16 +9,16 @@ instruction: | 您可以使用以下工具: {tools} - 使用以下格式: + 你的回答必须严格使用以下格式: - Question: 您必须回答的问题 - Thought: 你应该经常想想该怎么做 - Action: 要采取的行动应该是 one of [{tool_names}] - Action Input: 行动的输入 - Observation: 行动的结果 - ... (Thought/Action/Action Input/Observation 的过程可以重复 N 次) - Thought: 我现在知道最终答案了 - Final Answer: 原输入问题的最终答案 + Question: 您必须回答的问题 + Thought: 你应该经常想想该怎么做 + Action: 要采取的行动应该是 one of [{tool_names}] + Action Input: 行动的输入 + Observation: 行动的结果 + ... (Thought/Action/Action Input/Observation 的过程可以重复 N 次) + Thought: 我现在知道最终答案了 + Final Answer: 原输入问题的最终答案 之前的对话: {chat_history} diff --git a/agentuniverse/agent/default/react_agent/default_en_prompt.yaml b/agentuniverse/agent/default/react_agent/default_en_prompt.yaml index 3f7bff67..92137f90 100644 --- a/agentuniverse/agent/default/react_agent/default_en_prompt.yaml +++ b/agentuniverse/agent/default/react_agent/default_en_prompt.yaml @@ -7,14 +7,14 @@ instruction: | Use the following format: - Question: the input question you must answer - Thought: you should always think about what to do - Action: the action to take, should be one of [{tool_names}] - Action Input: the input to the action - Observation: the result of the action - ... (this Thought/Action/Action Input/Observation can repeat N times) - Thought: I now know the final answer - Final Answer: the final answer to the original input question + Question: the input question you must answer + Thought: you should always think about what to do + Action: the action to take, should be one of [{tool_names}] + Action Input: the input to the action + Observation: the result of the action + ... (this Thought/Action/Action Input/Observation can repeat N times) + Thought: I now know the final answer + Final Answer: the final answer to the original input question Previous conversion: {chat_history} diff --git a/agentuniverse/agent/default/react_agent/react_agent.py b/agentuniverse/agent/default/react_agent/react_agent.py index 50de90c4..f50e7057 100644 --- a/agentuniverse/agent/default/react_agent/react_agent.py +++ b/agentuniverse/agent/default/react_agent/react_agent.py @@ -1,6 +1,5 @@ # !/usr/bin/env python3 # -*- coding:utf-8 -*- -from langchain.agents import create_openai_tools_agent # @Time : 2024/5/31 21:22 # @Author : wangchongshi diff --git a/agentuniverse/agent/plan/planner/react_planner/react_planner.py b/agentuniverse/agent/plan/planner/react_planner/react_planner.py index cb8c4314..7e2fa2e7 100644 --- a/agentuniverse/agent/plan/planner/react_planner/react_planner.py +++ b/agentuniverse/agent/plan/planner/react_planner/react_planner.py @@ -5,9 +5,11 @@ # @Author : wangchongshi # @Email : wangchongshi.wcs@antgroup.com # @FileName: react_planner.py + from langchain_core.chat_history import InMemoryChatMessageHistory -from langchain.agents import AgentExecutor, create_react_agent, create_structured_chat_agent +from langchain.agents import AgentExecutor, create_react_agent from langchain.tools import Tool as LangchainTool +from langchain_core.runnables import RunnableConfig from agentuniverse.agent.action.tool.tool import Tool from agentuniverse.agent.action.tool.tool_manager import ToolManager @@ -15,6 +17,7 @@ from agentuniverse.agent.input_object import InputObject from agentuniverse.agent.memory.chat_memory import ChatMemory from agentuniverse.agent.plan.planner.planner import Planner +from agentuniverse.agent.plan.planner.react_planner.stream_callback import StreamOutPutCallbackHandler from agentuniverse.base.util.prompt_util import process_llm_token from agentuniverse.llm.llm import LLM from agentuniverse.prompt.chat_prompt import ChatPrompt @@ -52,10 +55,19 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, chat_history = memory.as_langchain().chat_memory if memory else InMemoryChatMessageHistory() agent = create_react_agent(llm.as_langchain(), tools, prompt.as_langchain()) - agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) + agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True) return agent_executor.invoke(input=planner_input, memory=memory.as_langchain() if memory else None, - chat_history=chat_history) + chat_history=chat_history, config=self.get_run_config(input_object)) + + @staticmethod + def get_run_config(input_object: InputObject) -> RunnableConfig: + config = RunnableConfig() + callbacks = [] + output_stream = input_object.get_data('output_stream') + callbacks.append(StreamOutPutCallbackHandler(output_stream)) + config.setdefault("callbacks", callbacks) + return config @staticmethod def acquire_tools(action) -> list[LangchainTool]: diff --git a/agentuniverse/agent/plan/planner/react_planner/stream_callback.py b/agentuniverse/agent/plan/planner/react_planner/stream_callback.py new file mode 100644 index 00000000..87ec6acd --- /dev/null +++ b/agentuniverse/agent/plan/planner/react_planner/stream_callback.py @@ -0,0 +1,64 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/6/13 11:03 +# @Author : weizjajj +# @Email : weizhongjie.wzj@antgroup.com +# @FileName: stream_callback.py + +import asyncio +from typing import Optional, Dict, Any + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.callbacks import BaseCallbackHandler + + +class StreamOutPutCallbackHandler(BaseCallbackHandler): + """Callback Handler that prints to std out.""" + + def __init__(self, queue_stream: asyncio.Queue, color: Optional[str] = None) -> None: + """Initialize callback handler.""" + self.queueStream = queue_stream + self.color = color + + def on_chain_start( + self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any + ) -> None: + return + + def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: + """Print out that we finished a chain.""" + + def on_agent_action( + self, action: AgentAction, color: Optional[str] = None, **kwargs: Any + ) -> Any: + self.queueStream.put_nowait("Thought:"+action.log) + + def on_tool_end( + self, + output: str, + color: Optional[str] = None, + observation_prefix: Optional[str] = None, + llm_prefix: Optional[str] = None, + **kwargs: Any, + ) -> None: + """If not the final action, print out observation.""" + if observation_prefix is not None: + self.queueStream.put_nowait(observation_prefix + output) + else: + self.queueStream.put_nowait('Observation:'+output) + + def on_text( + self, + text: str, + color: Optional[str] = None, + end: str = "", + **kwargs: Any, + ) -> None: + """Run when agent ends.""" + + def on_agent_finish( + self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any + ) -> None: + """Run on agent end.""" + self.queueStream.put_nowait("Thought:" + finish.log + "\n") diff --git a/agentuniverse/llm/default/default_ollama_llm.py b/agentuniverse/llm/default/default_ollama_llm.py new file mode 100644 index 00000000..e8cec875 --- /dev/null +++ b/agentuniverse/llm/default/default_ollama_llm.py @@ -0,0 +1,100 @@ +import json +from typing import Any, Union, AsyncIterator, Iterator, Optional, List, Sequence + +import tiktoken +from langchain_core.language_models import BaseLanguageModel +from ollama import Options +from pydantic import Field + +from agentuniverse.base.annotation.trace import trace_llm +from agentuniverse.base.config.component_configer.configers.llm_configer import LLMConfiger +from agentuniverse.base.util.env_util import get_from_env +from agentuniverse.llm.llm import LLM +from agentuniverse.llm.llm_output import LLMOutput +from agentuniverse.llm.ollama_langchain_instance import OllamaLangchainInstance + + +class OllamaLLM(LLM): + base_url: Optional[str] = Field( + default_factory=lambda: get_from_env("OLLAMA_BASE_URL") if get_from_env( + "OLLAMA_BASE_URL") else "http://localhost:11434") + """Base url the model is hosted under.""" + + streaming: bool = True + + def _new_client(self): + if self.client: + return self.client + from ollama import Client + return Client( + host=self.base_url, + ) + + def _new_async_client(self): + if self.async_client: + return self.async_client + from ollama import AsyncClient + return AsyncClient( + host=self.base_url, + ) + + def _options(self): + return Options(**{ + "num_ctx": self.max_context_length(), + "num_predict": self.max_tokens, + "temperature": self.temperature, + "timeout": self.request_timeout, + **(self.ext_info if self.ext_info else {}), + }) + + @trace_llm + def call(self, messages, stop=None, **kwargs) -> Union[LLMOutput, Iterator[LLMOutput]]: + should_stream = kwargs.pop("stream", self.streaming) + client = self._new_client() + options = self._options() + options.setdefault("stop", stop) + res = client.chat(model=self.model_name, messages=messages, options=options, stream=should_stream) + if should_stream: + return self.generate_result(res) + else: + return LLMOutput(text=res.get("message").get('content'), raw=json.dumps(res)) + + @trace_llm + async def acall(self, messages, stop=None, **kwargs) -> Union[LLMOutput, AsyncIterator[LLMOutput]]: + client = self._new_async_client() + should_stream = kwargs.pop("stream", self.streaming) + options = self._options() + options.setdefault("stop", stop) + res = await client.chat(model=self.model_name, messages=messages, options=options, stream=should_stream) + if not should_stream: + return LLMOutput(text=res.get("message").get('content'), raw=json.dumps(res)) + if should_stream: + return self.agenerate_result(res) + + def generate_result(self, data): + for line in data: + yield LLMOutput(text=line.get("message").get('content'), raw=json.dumps(line)) + + async def agenerate_result(self, data): + async for line in data: + yield LLMOutput(text=line.get("message").get('content'), raw=json.dumps(line)) + + def as_langchain(self) -> BaseLanguageModel: + return OllamaLangchainInstance( + self + ) + + def initialize_by_component_configer(self, component_configer: LLMConfiger) -> 'LLM': + super().initialize_by_component_configer(component_configer) + if 'base_url' in component_configer.configer.value: + self.base_url = component_configer.configer.value['base_url'] + if 'max_context_length' in component_configer.configer.value: + self._max_context_length = component_configer.configer.value['max_context_length'] + return self + + def get_num_tokens(self, text: str) -> int: + try: + encoding = tiktoken.encoding_for_model(self.model_name) + except KeyError: + encoding = tiktoken.get_encoding("cl100k_base") + return len(encoding.encode(text)) diff --git a/agentuniverse/llm/default/default_ollama_llm.yaml b/agentuniverse/llm/default/default_ollama_llm.yaml new file mode 100644 index 00000000..84b7a284 --- /dev/null +++ b/agentuniverse/llm/default/default_ollama_llm.yaml @@ -0,0 +1,9 @@ +name: 'default_ollama_llm' +description: 'default openai llm with spi' +model_name: 'qwen2:7b' +max_tokens: 1000 +max_context_length: 32000 +metadata: + type: 'LLM' + module: 'agentuniverse.llm.default.default_ollama_llm' + class: 'OllamaLLM' \ No newline at end of file diff --git a/agentuniverse/llm/llm.py b/agentuniverse/llm/llm.py index 736e7f67..53c4fd67 100644 --- a/agentuniverse/llm/llm.py +++ b/agentuniverse/llm/llm.py @@ -45,7 +45,7 @@ class LLM(ComponentBase): streaming: Optional[bool] = False ext_info: Optional[dict] = None tracing: Optional[bool] = None - __max_context_length: Optional[int] = None + _max_context_length: Optional[int] = None def __init__(self, **kwargs): """Initialize the llm.""" @@ -120,14 +120,14 @@ def set_by_agent_model(self, **kwargs) -> None: if 'streaming' in kwargs and kwargs['streaming']: self.streaming = kwargs['streaming'] if 'max_context_length' in kwargs and kwargs['max_context_length']: - self.__max_context_length = kwargs['max_context_length'] + self._max_context_length = kwargs['max_context_length'] def max_context_length(self) -> int: """Max context length. The total length of input tokens and generated tokens is limited by the model's context length. """ - return self.__max_context_length + return self._max_context_length @abstractmethod def get_num_tokens(self, text: str) -> int: diff --git a/agentuniverse/llm/ollama_langchain_instance.py b/agentuniverse/llm/ollama_langchain_instance.py new file mode 100644 index 00000000..845ffa64 --- /dev/null +++ b/agentuniverse/llm/ollama_langchain_instance.py @@ -0,0 +1,39 @@ +from typing import List, Optional, Iterator, Any, AsyncIterator + +from langchain_community.chat_models import ChatOllama +from langchain_core.messages import BaseMessage + +from agentuniverse.llm.llm import LLM + + +class OllamaLangchainInstance(ChatOllama): + llm: LLM = None + + def __init__(self, llm: LLM): + super().__init__() + self.llm = llm + self.model = llm.model_name + + def _create_chat_stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + **kwargs: Any, + ) -> Iterator[str]: + data = self.llm.call( + messages=self._convert_messages_to_ollama_messages(messages), stop=stop, **kwargs + ) + for llm_output in data: + yield llm_output.raw + + async def _acreate_chat_stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + **kwargs: Any, + ) -> AsyncIterator[str]: + data = await self.llm.acall( + messages=self._convert_messages_to_ollama_messages(messages), stop=stop, **kwargs + ) + async for llm_output in data: + yield llm_output.raw diff --git a/sample_standard_app/app/bootstrap/server_application.py b/sample_standard_app/app/bootstrap/server_application.py index 7399f367..cb5cb908 100644 --- a/sample_standard_app/app/bootstrap/server_application.py +++ b/sample_standard_app/app/bootstrap/server_application.py @@ -5,6 +5,7 @@ # @Author : jerry.zzw # @Email : jerry.zzw@antgroup.com # @FileName: server_application.py + from agentuniverse.agent_serve.web.web_booster import start_web_server from agentuniverse.base.agentuniverse import AgentUniverse @@ -21,10 +22,4 @@ def start(cls): if __name__ == "__main__": - from langchain_community.document_loaders import AsyncHtmlLoader - - urls = ["https://www.espn.com", "https://lilianweng.github.io/posts/2023-06-23-agent/"] - loader = AsyncHtmlLoader(urls) - docs = loader.load() - print(docs) -# ServerApplication.start() + ServerApplication.start() diff --git a/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml b/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml index 175d44c7..5ce49200 100644 --- a/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml +++ b/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml @@ -2,18 +2,14 @@ info: name: 'demo_react_agent' description: 'react agent' profile: + prompt_version: qwen_react_agent.cn llm_model: - name: 'default_qwen_llm' -# model_name: 'qwen1.5-72b-chat' + name: 'default_ollama_llm' + temperature: 0 action: tool: -# - 'add' -# - 'sub' -# - 'mul' -# - 'div' - 'google_search_tool' - - 'gold_search_tool' - - 'stock_search_tool' + - 'python_repl' plan: planner: name: 'react_planner' diff --git a/sample_standard_app/app/core/prompt/demo_react_prompt.yaml b/sample_standard_app/app/core/prompt/demo_react_prompt.yaml new file mode 100644 index 00000000..4282da89 --- /dev/null +++ b/sample_standard_app/app/core/prompt/demo_react_prompt.yaml @@ -0,0 +1,40 @@ +introduction: 你是一个精通工具使用的AI助手。 +target: 你的目标是根据用户的问题以及给出的背景信息,使用工具回答用户的问题。尤其擅长使用搜索或者使用执行python代码解决问题 +instruction: | + 你必须优先选择使用提供的工具回答用户提出的问题,若用户没有提供工具可以根据你的通识能力解决问题。 + 你在回答时问题必须使用中文回答。 + 你必须从多个角度、维度分析用户的问题,帮助用户获取最全面的信息,需要根据背景和问题,决定搜索哪些信息可以回答问题。 + 你必须把大问题拆解为多个小问题,并规划解决步骤。 + + 您可以使用以下工具: + {tools} + + 你的回答必须严格使用以下格式: + + Question: 您必须回答的问题 + Thought: 我这一步应该做什么,为什么要这么做,我现在要使用一个工具, 不允许回答Final Answer + Action: 要使用的工具应该,值必须是 [{tool_names}] 之一 + Action Input: 工具的输入 + Observation: 工具的执行结果 + ... (Thought/Action/Action Input/Observation 的过程可以重复 N 次) + Thought: 我现在知道所有问题的最终答案了 + Final Answer: 所有问题的最终答案 + + 之前的对话: + {chat_history} + + 背景信息是: + {background} + + 开始! + 注意: + 1.你的回答必须是(Thought/Action/Observation)与(Thought/Final Answer)两种格式之一 + 2.你现在必须根据上一步Observation的结果(成功、失败、报错,信息不完整),判断下一步要执行的动作 + + Question: {input} + Thought: {agent_scratchpad} + + +metadata: + type: 'PROMPT' + version: 'qwen_react_agent.cn' diff --git a/sample_standard_app/app/core/service/demo_react_service.yaml b/sample_standard_app/app/core/service/demo_react_service.yaml new file mode 100644 index 00000000..a8718c13 --- /dev/null +++ b/sample_standard_app/app/core/service/demo_react_service.yaml @@ -0,0 +1,5 @@ +name: 'demo_react_service' +description: 'demo react service of demo agent' +agent: 'demo_react_agent' +metadata: + type: 'SERVICE' \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/google_search_tool.yaml b/sample_standard_app/app/core/tool/google_search_tool.yaml index 7a8f323a..c8ed4af3 100644 --- a/sample_standard_app/app/core/tool/google_search_tool.yaml +++ b/sample_standard_app/app/core/tool/google_search_tool.yaml @@ -1,7 +1,9 @@ name: 'google_search_tool' description: | - tool used for google search, the tool is passed in the following way: - input='xxxx' + 该工具可以用来进行谷歌搜索,工具的输入是你想搜索的内容。 + 工具输入示例: + 示例1: 你想要搜索上海的天气时,工具的输入应该是:上海今天的天气 + 示例2: 你想要搜索日本的天气时,工具的输入应该是:日本的天气 tool_type: 'api' input_keys: ['input'] metadata: diff --git a/sample_standard_app/app/core/tool/python_repl.py b/sample_standard_app/app/core/tool/python_repl.py new file mode 100644 index 00000000..ae1a6e29 --- /dev/null +++ b/sample_standard_app/app/core/tool/python_repl.py @@ -0,0 +1,40 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/6/12 16:36 +# @Author : weizjajj +# @Email : weizhongjie.wzj@antgroup.com +# @FileName: python_repl.py + +import re + +from langchain_community.utilities import PythonREPL +from pydantic import Field +from agentuniverse.agent.action.tool.tool import Tool + + +class PythonREPLTool(Tool): + """The mock search tool. + + In this tool, we mocked the search engine's answers to search for information about BYD and Warren Buffett. + + Note: + The tool is only suitable for users searching for Buffett or BYD related queries. + We recommend that you configure your `SERPER_API_KEY` and use google_search_tool to get information. + """ + client: PythonREPL = Field(default_factory=lambda: PythonREPL()) + + def execute(self, input: str,**kwargs): + """Demonstrates the execute method of the Tool class.""" + pattern = re.compile(r"```python(.*?)``", re.DOTALL) + matches = pattern.findall(input) + if len(matches) == 0: + pattern = re.compile(r"```py(.*?)``", re.DOTALL) + matches = pattern.findall(input) + if len(matches) == 0: + return self.client.run(input) + res = self.client.run(matches[0]) + if res == "" or res is None: + return "ERROR: 你的python代码中没有使用print输出任何内容,请参考工具示例" + else: + return res diff --git a/sample_standard_app/app/core/tool/python_repl_tool.yaml b/sample_standard_app/app/core/tool/python_repl_tool.yaml new file mode 100644 index 00000000..e50183f6 --- /dev/null +++ b/sample_standard_app/app/core/tool/python_repl_tool.yaml @@ -0,0 +1,19 @@ +name: 'python_repl' +description: '使用该工具可以执行python代码.工具的输入必须时一段有效的python代码. 如何你想要查看工具的执行结果, 必须在python代码中使用print(...)打印你想查看的内容。 + 工具输入示例: + 你想要计算1+3等于多少时,工具的输入应该是: + ```py + print(1+3) + ``` + 你想要获取百度页面的信息时,工具的输入应该是: + ```py + import requests + resp=requests.get("https://www.baidu.com") + print(resp.content) + ```' +tool_type: 'api' +input_keys: +metadata: + type: 'TOOL' + module: 'sample_standard_app.app.core.tool.python_repl' + class: 'PythonREPLTool' \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/baidu_search_tool.yaml b/sample_standard_app/app/core/tool/search_api_baidu_tool.yaml similarity index 100% rename from sample_standard_app/app/core/tool/baidu_search_tool.yaml rename to sample_standard_app/app/core/tool/search_api_baidu_tool.yaml diff --git a/sample_standard_app/app/core/tool/bing_search_tool.yaml b/sample_standard_app/app/core/tool/search_api_bing_tool.yaml similarity index 100% rename from sample_standard_app/app/core/tool/bing_search_tool.yaml rename to sample_standard_app/app/core/tool/search_api_bing_tool.yaml From 7e3120b75034719d6edb2646f35b96ea80045e80 Mon Sep 17 00:00:00 2001 From: weizjajj Date: Thu, 13 Jun 2024 15:09:57 +0800 Subject: [PATCH 06/12] fix config --- sample_standard_app/config/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sample_standard_app/config/config.toml b/sample_standard_app/config/config.toml index 6abfc733..4a662064 100644 --- a/sample_standard_app/config/config.toml +++ b/sample_standard_app/config/config.toml @@ -28,7 +28,7 @@ sqldb_wrapper = ['sample_standard_app.app.core.sqldb_wrapper'] # Log config file path, an absolute path or a relative path based on the dir where the current config file is located. log_config_path = './log_config.toml' # Custom key file path, use to save your own secret key like open ai or sth else. REMEMBER TO ADD IT TO .gitignore. -custom_key_path = './custom_key.toml' +#custom_key_path = './custom_key.toml' [DB] # A sqlalchemy db uri used for storing various info, for example, service request, generated during application running. From b530f63c935db6a6e2e646ad83b2ca323e9eee30 Mon Sep 17 00:00:00 2001 From: weizjajj Date: Thu, 13 Jun 2024 15:31:25 +0800 Subject: [PATCH 07/12] fix tool problem --- agentuniverse/agent/action/tool/tool.py | 9 ++++++--- .../core/agent/rag_agent_case/demo_rag_agent.yaml | 4 ++-- .../app/core/tool/add_simple_tool.yaml | 2 +- .../app/core/tool/div_simple_tool.yaml | 2 +- .../app/core/tool/google_search_tool.py | 3 ++- .../app/core/tool/mock_search_tool.py | 3 ++- .../app/core/tool/mul_simple_tool.yaml | 2 +- sample_standard_app/app/core/tool/python_repl.py | 5 +++-- .../app/core/tool/python_repl_tool.yaml | 2 +- .../app/core/tool/search_api_tool.py | 9 +++++---- .../app/core/tool/simple_math_tool.py | 14 +++++++++----- .../app/core/tool/sub_simple_tool.yaml | 2 +- sample_standard_app/app/test/test_react_agent.py | 2 +- 13 files changed, 35 insertions(+), 24 deletions(-) diff --git a/agentuniverse/agent/action/tool/tool.py b/agentuniverse/agent/action/tool/tool.py index 457bee94..b64b661b 100644 --- a/agentuniverse/agent/action/tool/tool.py +++ b/agentuniverse/agent/action/tool/tool.py @@ -64,7 +64,8 @@ def __init__(self, **kwargs): def run(self, **kwargs): """The callable method that runs the tool.""" self.input_check(kwargs) - return self.execute(**kwargs) + tool_input = ToolInput(kwargs) + return self.execute(tool_input) def input_check(self, kwargs: dict) -> None: """Check whether the input parameters of the tool contain input keys of the tool""" @@ -75,10 +76,12 @@ def input_check(self, kwargs: dict) -> None: def langchain_run(self, *args, callbacks=None, **kwargs): """The callable method that runs the tool.""" kwargs["callbacks"] = callbacks - return self.execute(*args, **kwargs) + tool_input = ToolInput(kwargs) + tool_input.add_data(self.input_keys[0], args[0]) + return self.execute(tool_input) @abstractmethod - def execute(self, *args, **kwargs): + def execute(self, tool_input: ToolInput): raise NotImplementedError def as_langchain(self) -> LangchainTool: diff --git a/sample_standard_app/app/core/agent/rag_agent_case/demo_rag_agent.yaml b/sample_standard_app/app/core/agent/rag_agent_case/demo_rag_agent.yaml index 8457d2df..efef0526 100644 --- a/sample_standard_app/app/core/agent/rag_agent_case/demo_rag_agent.yaml +++ b/sample_standard_app/app/core/agent/rag_agent_case/demo_rag_agent.yaml @@ -24,8 +24,8 @@ profile: 需要回答的问题是: {input} llm_model: - name: 'demo_llm' - model_name: 'gpt-4o' + name: 'default_ollama_llm' +# model_name: 'gpt-4o' plan: planner: name: 'rag_planner' diff --git a/sample_standard_app/app/core/tool/add_simple_tool.yaml b/sample_standard_app/app/core/tool/add_simple_tool.yaml index 1a1c2026..d5dda037 100644 --- a/sample_standard_app/app/core/tool/add_simple_tool.yaml +++ b/sample_standard_app/app/core/tool/add_simple_tool.yaml @@ -1,7 +1,7 @@ name: 'add' description: 'Adding two float number, input_params with 2 num spilt with comma, return result. For example, input_params = 1, 2 , the result = 3' tool_type: 'api' -input_keys: +input_keys: ['input'] metadata: type: 'TOOL' module: 'sample_standard_app.app.core.tool.simple_math_tool' diff --git a/sample_standard_app/app/core/tool/div_simple_tool.yaml b/sample_standard_app/app/core/tool/div_simple_tool.yaml index 69b9560f..eea3425d 100644 --- a/sample_standard_app/app/core/tool/div_simple_tool.yaml +++ b/sample_standard_app/app/core/tool/div_simple_tool.yaml @@ -1,7 +1,7 @@ name: 'div' description: 'Dividing two float number, input_params with 2 num spilt with comma, return result. For example, input_params = 6, 3 , the result = 2' tool_type: 'api' -input_keys: +input_keys: ['input'] metadata: type: 'TOOL' module: 'sample_standard_app.app.core.tool.simple_math_tool' diff --git a/sample_standard_app/app/core/tool/google_search_tool.py b/sample_standard_app/app/core/tool/google_search_tool.py index 2ab955aa..7211ca71 100644 --- a/sample_standard_app/app/core/tool/google_search_tool.py +++ b/sample_standard_app/app/core/tool/google_search_tool.py @@ -26,7 +26,8 @@ class GoogleSearchTool(Tool): serper_api_key: Optional[str] = Field(default_factory=lambda: get_from_env("SERPER_API_KEY")) - def execute(self, input: str, **kwargs): + def execute(self, tool_input: ToolInput): + input = tool_input.get_data("input") if self.serper_api_key is None: return MockSearchTool().execute(input=input) # get top10 results from Google search. diff --git a/sample_standard_app/app/core/tool/mock_search_tool.py b/sample_standard_app/app/core/tool/mock_search_tool.py index 33deb9aa..f0b85660 100644 --- a/sample_standard_app/app/core/tool/mock_search_tool.py +++ b/sample_standard_app/app/core/tool/mock_search_tool.py @@ -40,6 +40,7 @@ class MockSearchTool(Tool): We recommend that you configure your `SERPER_API_KEY` and use google_search_tool to get information. """ - def execute(self, input:str): + def execute(self, tool_input: ToolInput): + input = tool_input.get_data("input") """Demonstrates the execute method of the Tool class.""" return MOCK_SEARCH_RESULT diff --git a/sample_standard_app/app/core/tool/mul_simple_tool.yaml b/sample_standard_app/app/core/tool/mul_simple_tool.yaml index 4278bc4e..ca5e95c7 100644 --- a/sample_standard_app/app/core/tool/mul_simple_tool.yaml +++ b/sample_standard_app/app/core/tool/mul_simple_tool.yaml @@ -1,7 +1,7 @@ name: 'mul' description: 'Multiplying two float number, input_params with 2 num spilt with comma, return result. For example, a = 2, 1 , the result = 2' tool_type: 'api' -input_keys: +input_keys: ['input'] metadata: type: 'TOOL' module: 'sample_standard_app.app.core.tool.simple_math_tool' diff --git a/sample_standard_app/app/core/tool/python_repl.py b/sample_standard_app/app/core/tool/python_repl.py index ae1a6e29..04af11c1 100644 --- a/sample_standard_app/app/core/tool/python_repl.py +++ b/sample_standard_app/app/core/tool/python_repl.py @@ -10,7 +10,7 @@ from langchain_community.utilities import PythonREPL from pydantic import Field -from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.agent.action.tool.tool import Tool, ToolInput class PythonREPLTool(Tool): @@ -24,7 +24,8 @@ class PythonREPLTool(Tool): """ client: PythonREPL = Field(default_factory=lambda: PythonREPL()) - def execute(self, input: str,**kwargs): + def execute(self, tool_input: ToolInput): + input = tool_input.get_data("input") """Demonstrates the execute method of the Tool class.""" pattern = re.compile(r"```python(.*?)``", re.DOTALL) matches = pattern.findall(input) diff --git a/sample_standard_app/app/core/tool/python_repl_tool.yaml b/sample_standard_app/app/core/tool/python_repl_tool.yaml index e50183f6..acb732f5 100644 --- a/sample_standard_app/app/core/tool/python_repl_tool.yaml +++ b/sample_standard_app/app/core/tool/python_repl_tool.yaml @@ -12,7 +12,7 @@ description: '使用该工具可以执行python代码.工具的输入必须时 print(resp.content) ```' tool_type: 'api' -input_keys: +input_keys: ['input'] metadata: type: 'TOOL' module: 'sample_standard_app.app.core.tool.python_repl' diff --git a/sample_standard_app/app/core/tool/search_api_tool.py b/sample_standard_app/app/core/tool/search_api_tool.py index 78ab009d..20032ca1 100644 --- a/sample_standard_app/app/core/tool/search_api_tool.py +++ b/sample_standard_app/app/core/tool/search_api_tool.py @@ -12,7 +12,7 @@ from langchain_community.utilities import SearchApiAPIWrapper from pydantic import Field -from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.agent.action.tool.tool import Tool, ToolInput from agentuniverse.base.config.component_configer.configers.tool_configer import ToolConfiger from agentuniverse.base.util.env_util import get_from_env @@ -46,14 +46,15 @@ def _load_api_wapper(self): self.search_api_wrapper = SearchApiAPIWrapper(searchapi_api_key=self.search_api_key, engine=self.engine) return self.search_api_wrapper - def execute(self, input: str, **kwargs): + def execute(self, tool_input: ToolInput): self._load_api_wapper() search_params = {} for k, v in self.search_params.items(): - if k in kwargs: - search_params[k] = kwargs[k] + if k in tool_input.to_dict(): + search_params[k] = tool_input.get_data(k) continue search_params[k] = v + input = tool_input.get_data("input") if self.search_type == "json": return self.search_api_wrapper.results(query=input, **search_params) return self.search_api_wrapper.run(query=input, **search_params) diff --git a/sample_standard_app/app/core/tool/simple_math_tool.py b/sample_standard_app/app/core/tool/simple_math_tool.py index 3559208a..bca1e0e2 100644 --- a/sample_standard_app/app/core/tool/simple_math_tool.py +++ b/sample_standard_app/app/core/tool/simple_math_tool.py @@ -8,32 +8,36 @@ # @FileName: simple_math_tool.py -from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.agent.action.tool.tool import Tool, ToolInput class AddTool(Tool): - def execute(self, input_params, **kwargs): + def execute(self, tool_input: ToolInput): + input_params = tool_input.get_data('input') a, b = input_params.split(',') result = float(a) + float(b) return result class SubtractTool(Tool): - def execute(self, input_params, **kwargs): + def execute(self, tool_input: ToolInput): + input_params = tool_input.get_data('input') a, b = input_params.split(',') result = float(a) - float(b) return result class MultiplyTool(Tool): - def execute(self, input_params, **kwargs): + def execute(self, tool_input: ToolInput): + input_params = tool_input.get_data('input') a, b = input_params.split(',') result = float(a) * float(b) return result class DivideTool(Tool): - def execute(self, input_params, **kwargs): + def execute(self, tool_input: ToolInput): + input_params = tool_input.get_data('input') a, b = input_params.split(',') result = float(a) / float(b) return result diff --git a/sample_standard_app/app/core/tool/sub_simple_tool.yaml b/sample_standard_app/app/core/tool/sub_simple_tool.yaml index 0fb7097e..2a2e6706 100644 --- a/sample_standard_app/app/core/tool/sub_simple_tool.yaml +++ b/sample_standard_app/app/core/tool/sub_simple_tool.yaml @@ -1,7 +1,7 @@ name: 'sub' description: 'Subtracting two float number, input_params with 2 num spilt with comma, return result. For example, input_params = 2, 1 , the result = 1' tool_type: 'api' -input_keys: +input_keys: ['input'] metadata: type: 'TOOL' module: 'sample_standard_app.app.core.tool.simple_math_tool' diff --git a/sample_standard_app/app/test/test_react_agent.py b/sample_standard_app/app/test/test_react_agent.py index fad03b9b..77534dbd 100644 --- a/sample_standard_app/app/test/test_react_agent.py +++ b/sample_standard_app/app/test/test_react_agent.py @@ -21,7 +21,7 @@ def setUp(self) -> None: def test_react_agent(self): """Test demo reAct agent.""" instance: Agent = AgentManager().get_instance_obj('demo_react_agent') - output_object: OutputObject = instance.run(input='黄金和沪深300的价格分别是多少') + output_object: OutputObject = instance.run(input='请给出一段python代码,可以计算三数之和,给出之前必须验证代码是否可以运行,最少验证三次') if __name__ == '__main__': From 7b3ce110207c0cfd95ee4842fa7184260e614239 Mon Sep 17 00:00:00 2001 From: weizjajj Date: Thu, 13 Jun 2024 15:44:11 +0800 Subject: [PATCH 08/12] fix tool input --- agentuniverse/agent/action/tool/tool.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/agentuniverse/agent/action/tool/tool.py b/agentuniverse/agent/action/tool/tool.py index b64b661b..a005a41a 100644 --- a/agentuniverse/agent/action/tool/tool.py +++ b/agentuniverse/agent/action/tool/tool.py @@ -77,9 +77,20 @@ def langchain_run(self, *args, callbacks=None, **kwargs): """The callable method that runs the tool.""" kwargs["callbacks"] = callbacks tool_input = ToolInput(kwargs) - tool_input.add_data(self.input_keys[0], args[0]) + parse_result = self.parse_react_input(args[0]) + for key in self.input_keys: + tool_input.add_data(key, parse_result[key]) return self.execute(tool_input) + def parse_react_input(self, input_str: str): + """ + parse react string to you input + you can define your own logic here by override this function + """ + return { + self.input_keys[0]: input_str + } + @abstractmethod def execute(self, tool_input: ToolInput): raise NotImplementedError From dda5a4ecfc70ebb14b029eba54a879d3106b31d6 Mon Sep 17 00:00:00 2001 From: weizjajj Date: Thu, 13 Jun 2024 16:01:01 +0800 Subject: [PATCH 09/12] support config react max_iterations --- .../agent/plan/planner/react_planner/react_planner.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/agentuniverse/agent/plan/planner/react_planner/react_planner.py b/agentuniverse/agent/plan/planner/react_planner/react_planner.py index 7e2fa2e7..ef1f76f9 100644 --- a/agentuniverse/agent/plan/planner/react_planner/react_planner.py +++ b/agentuniverse/agent/plan/planner/react_planner/react_planner.py @@ -51,11 +51,13 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, prompt: Prompt = self.handle_prompt(agent_model, planner_input) process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input) - chat_history = memory.as_langchain().chat_memory if memory else InMemoryChatMessageHistory() agent = create_react_agent(llm.as_langchain(), tools, prompt.as_langchain()) - agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True) + agent_executor = AgentExecutor(agent=agent, tools=tools, + verbose=True, + handle_parsing_errors=True, + max_iterations=agent_model.plan.get('planner').get("max_iterations", 15)) return agent_executor.invoke(input=planner_input, memory=memory.as_langchain() if memory else None, chat_history=chat_history, config=self.get_run_config(input_object)) From abf3d20d68b4b4ef21fce149d6397f5a87c11bff Mon Sep 17 00:00:00 2001 From: weizjajj Date: Thu, 13 Jun 2024 20:38:37 +0800 Subject: [PATCH 10/12] add claude support --- .../llm/claude_langchain_instance.py | 93 +++++++++++ agentuniverse/llm/default/claude_llm.py | 154 ++++++++++++++++++ agentuniverse/llm/default/claude_llm.yaml | 8 + docs/guidebook/en/3_1_2_Claude_LLM_Use.md | 34 ++++ .../3_1_2_Claude\344\275\277\347\224\250.md" | 34 ++++ pyproject.toml | 2 + 6 files changed, 325 insertions(+) create mode 100644 agentuniverse/llm/claude_langchain_instance.py create mode 100644 agentuniverse/llm/default/claude_llm.py create mode 100644 agentuniverse/llm/default/claude_llm.yaml create mode 100644 docs/guidebook/en/3_1_2_Claude_LLM_Use.md create mode 100644 "docs/guidebook/zh/3_1_2_Claude\344\275\277\347\224\250.md" diff --git a/agentuniverse/llm/claude_langchain_instance.py b/agentuniverse/llm/claude_langchain_instance.py new file mode 100644 index 00000000..e7876543 --- /dev/null +++ b/agentuniverse/llm/claude_langchain_instance.py @@ -0,0 +1,93 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/5/22 17:07 +# @Author : weizjajj +# @Email : weizhongjie.wzj@antgroup.com +# @FileName: claude_langchain_instance.py + +import warnings +from typing import List, Optional, Any + +from langchain_anthropic import ChatAnthropic +from langchain_anthropic.chat_models import _tools_in_params +from langchain_core.callbacks import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun +from langchain_core.language_models.chat_models import generate_from_stream, agenerate_from_stream +from langchain_core.messages import BaseMessage, SystemMessage, HumanMessage +from langchain_core.outputs import ChatResult + +from agentuniverse.llm.llm import LLM + + +class ClaudeLangChainInstance(ChatAnthropic): + """ + This class wraps the Claude API into a LangChain API. + """ + llm: LLM = None + + def __init__(self, llm: LLM): + init_params = {} + init_params['model'] = llm.model_name if llm.model_name else 'Claude-Instant-V1.3' + init_params['temperature'] = llm.temperature if llm.temperature else 0.7 + init_params['default_request_timeout'] = llm.request_timeout + init_params['streaming'] = llm.streaming if llm.streaming else False + init_params['anthropic_api_key'] = llm.anthropic_api_key if llm.anthropic_api_key else 'blank' + init_params['max_tokens'] = llm.max_tokens + init_params['max_retries'] = llm.max_retries if llm.max_retries else 2 + init_params['anthropic_api_url'] = llm.anthropic_api_url + init_params['streaming'] = llm.streaming + init_params['llm'] = llm + super().__init__(**init_params) + + def _generate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + if len(messages) > 1 and isinstance(messages[1], SystemMessage): + messages[1] = HumanMessage(content=messages[1].content) + params = self._format_params(messages=messages, stop=stop, **kwargs) + if self.streaming: + if _tools_in_params(params): + warnings.warn( + "stream: Tool use is not yet supported in streaming mode." + ) + else: + stream_iter = self._stream( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + return generate_from_stream(stream_iter) + if _tools_in_params(params): + data = self._client.beta.tools.messages.create(**params) + else: + data = self.llm.call(**params).raw + return self._format_output(data, **kwargs) + + async def _agenerate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + if len(messages) > 1 and isinstance(messages[1], SystemMessage): + messages[1] = HumanMessage(content=messages[1].content) + params = self._format_params(messages=messages, stop=stop, **kwargs) + if self.streaming: + if _tools_in_params(params): + warnings.warn( + "stream: Tool use is not yet supported in streaming mode." + ) + else: + stream_iter = self._astream( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + return await agenerate_from_stream(stream_iter) + if _tools_in_params(params): + data = await self._async_client.beta.tools.messages.create(**params) + else: + data = await self.llm.acall(**params) + data = data.raw + return self._format_output(data, **kwargs) diff --git a/agentuniverse/llm/default/claude_llm.py b/agentuniverse/llm/default/claude_llm.py new file mode 100644 index 00000000..bee1f820 --- /dev/null +++ b/agentuniverse/llm/default/claude_llm.py @@ -0,0 +1,154 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/5/21 17:49 +# @Author : weizjajj +# @Email : weizhongjie.wzj@antgroup.com +# @FileName: claude_llm.py + +import asyncio +from typing import Optional, Any, Union, Iterator, AsyncIterator + +import anthropic +import httpx +from langchain_core.language_models import BaseLanguageModel +from pydantic import Field + +from agentuniverse.base.annotation.trace import trace_llm +from agentuniverse.base.util.env_util import get_from_env +from agentuniverse.llm.claude_langchain_instance import ClaudeLangChainInstance +from agentuniverse.llm.llm import LLM +from agentuniverse.llm.llm_output import LLMOutput + +ClaudeMAXCONTETNLENGTH = { + "claude-3-opus-20240229": 200000, + "claude-3-sonnet-20240229": 200000, + "claude-3-haiku-20240307": 200000, + "claude-2.1": 200000, + "claude-2.0": 100000, + "claude-instant-1.2": 100000 +} + + +class ClaudeLLM(LLM): + """ + This class implements an interface for interacting with the Anthropic Claude model. + + Attribute: + anthropic_api_key (Optional[str]): The API key for the Anthropic API. Defaults to the value of the environment variable ANTHROPIC_API_KEY. + anthropic_api_url (Optional[str]): The URL for the Anthropic API. Defaults to the value of the environment variable ANTHROPIC_API_URL. + anthropic_proxy (Optional[str]): The proxy to use for the Anthropic API. Defaults to None. + connection_pool_limits (Optional[int]): The maximum number of connections to keep in a pool. Defaults to None. + """ + anthropic_api_key: Optional[str] = Field(default_factory=lambda: get_from_env('ANTHROPIC_API_KEY')) + anthropic_api_url: Optional[str] = Field(default_factory=lambda: get_from_env('ANTHROPIC_API_URL')) + anthropic_proxy: Optional[str] = None + connection_pool_limits: Optional[int] = None + + def _new_client(self): + client = anthropic.Anthropic( + api_key=self.anthropic_api_key, + base_url=self.anthropic_api_url, + timeout=self.request_timeout if self.request_timeout else 60, + max_retries=self.max_retries if self.max_retries else 2, + http_client=httpx.Client(proxy=self.anthropic_proxy) if self.anthropic_proxy else None, + connection_pool_limits=self.connection_pool_limits + ) + return client + + def _new_async_client(self): + client = anthropic.AsyncAnthropic( + api_key=self.anthropic_api_key, + base_url=self.anthropic_api_url, + timeout=self.request_timeout if self.request_timeout else 60, + max_retries=self.max_retries if self.max_retries else 2, + http_client=httpx.AsyncClient(proxy=self.anthropic_proxy) if self.anthropic_proxy else None, + connection_pool_limits=self.connection_pool_limits + ) + return client + @trace_llm + def call(self, messages: list, **kwargs: Any) -> Union[LLMOutput, Iterator[LLMOutput]]: + """Run the Claude LLM. + + Args: + messages (list): The messages to send to the LLM. + **kwargs: Arbitrary keyword arguments. + """ + streaming = kwargs.pop("streaming") if "streaming" in kwargs else self.streaming + self.client = self._new_client() + chat_completion = self.client.messages.create( + messages=messages, + model=kwargs.pop('model', self.model_name), + temperature=kwargs.pop('temperature', self.temperature), + stream=kwargs.pop('stream', streaming), + max_tokens=kwargs.pop('max_tokens', self.max_tokens), + **kwargs, + ) + if not streaming: + self.close() + return self.parse_result(chat_completion) + return self.generate_stream_result(chat_completion) + + @trace_llm + async def acall(self, messages: list, **kwargs: Any) -> Union[LLMOutput, AsyncIterator[LLMOutput]]: + streaming = kwargs.pop("streaming") if "streaming" in kwargs else self.streaming + self.client = self._new_async_client() + chat_completion = await self.client.messages.create( + messages=messages, + model=kwargs.pop('model', self.model_name), + temperature=kwargs.pop('temperature', self.temperature), + stream=kwargs.pop('stream', streaming), + max_tokens=kwargs.pop('max_tokens', self.max_tokens), + **kwargs, + ) + if not streaming: + await self.aclose() + return self.parse_result(chat_completion) + return self.agenerate_stream_result(chat_completion) + + @staticmethod + def parse_result(data): + text = data.content[0].text + if not text: + return + return LLMOutput(text=text, raw=data) + + def generate_stream_result(self, chat_completion: Iterator): + for chunk in chat_completion: + if chunk.type != 'content_block_delta': + continue + yield LLMOutput(text=chunk.delta.text, raw=chunk.model_dump()) + self.close() + + async def agenerate_stream_result(self, chat_completion: AsyncIterator): + async for chunk in chat_completion: + print(chunk) + if chunk.type != 'content_block_delta': + continue + yield LLMOutput(text=chunk.delta.text, raw=chunk.model_dump()) + await self.aclose() + + def as_langchain(self) -> BaseLanguageModel: + """ + Convert this instance into a LangChain compatible object + """ + return ClaudeLangChainInstance(self) + + def get_num_tokens(self, text: str) -> int: + encode = self._new_client().count_tokens(text) + return encode + + def max_context_length(self) -> int: + if super().max_context_length(): + return super().max_context_length() + return ClaudeMAXCONTETNLENGTH[self.model_name] + + def close(self): + """Close the client.""" + if hasattr(self, 'client') and self.client: + self.client.close() + + async def aclose(self): + """Async close the client.""" + if hasattr(self, 'async_client') and self.async_client: + await self.async_client.aclose() diff --git a/agentuniverse/llm/default/claude_llm.yaml b/agentuniverse/llm/default/claude_llm.yaml new file mode 100644 index 00000000..31538a3a --- /dev/null +++ b/agentuniverse/llm/default/claude_llm.yaml @@ -0,0 +1,8 @@ +name: 'default_claude_llm' +description: 'default kimi llm with spi' +model_name: 'claude-3-opus-20240229' +max_tokens: 4096 +metadata: + type: 'LLM' + module: 'agentuniverse.llm.default.claude_llm' + class: 'ClaudeLLM' \ No newline at end of file diff --git a/docs/guidebook/en/3_1_2_Claude_LLM_Use.md b/docs/guidebook/en/3_1_2_Claude_LLM_Use.md new file mode 100644 index 00000000..b18de2fd --- /dev/null +++ b/docs/guidebook/en/3_1_2_Claude_LLM_Use.md @@ -0,0 +1,34 @@ +# Calude Usage +## 1. Create the relevant file. +Create a YAML file, for example, user_claude.yaml +Paste the following content into your user_claude.yaml file. +```yaml +name: 'user_claude_llm' +description: 'user claude llm with spi' +model_name: 'claude-3-opus-20240229' +max_tokens: 4096 +metadata: + type: 'LLM' + module: 'agentuniverse.llm.default.claude_llm' + class: 'ClaudeLLM' +``` +## 2. Environment Setup +Must be configured: ANTHROPIC_API_KEY +Optional: ANTHROPIC_API_URL +### 2.1 Configure through Python code +```python +import os +os.environ['ANTHROPIC_API_KEY'] = 'sk-***' +os.environ['ANTHROPIC_API_URL'] = 'https://xxxxxx' +``` +### 2.2 Configure through the configuration file +In the custom_key.toml file under the config directory of the project, add the configuration: +```toml +ANTHROPIC_API_KEY="sk-******" +ANTHROPIC_API_URL="https://xxxxxx" +``` +## 3. Obtaining the ANTHROPIC API KEY +Reference Claude Official Documentation: https://docs.anthropic.com/zh-CN/docs/getting-access-to-claude + +## 4. Note +In agentuniverse, we have already created an llm with the name default_claude_llm. After configuring the ANTHROPIC_API_KEY, users can directly use it. \ No newline at end of file diff --git "a/docs/guidebook/zh/3_1_2_Claude\344\275\277\347\224\250.md" "b/docs/guidebook/zh/3_1_2_Claude\344\275\277\347\224\250.md" new file mode 100644 index 00000000..af62c192 --- /dev/null +++ "b/docs/guidebook/zh/3_1_2_Claude\344\275\277\347\224\250.md" @@ -0,0 +1,34 @@ +# Claude 使用 +## 1. 创建相关文件 +创建一个yaml文件,例如 user_claude.yaml +将以下内容粘贴到您的user_claude.yaml文件当中 +```yaml +name: 'user_claude_llm' +description: 'user claude llm with spi' +model_name: 'claude-3-opus-20240229' +max_tokens: 4096 +metadata: + type: 'LLM' + module: 'agentuniverse.llm.default.claude_llm' + class: 'ClaudeLLM' +``` +## 2. 环境设置 +必须配置:ANTHROPIC_API_KEY +选配:ANTHROPIC_API_URL +### 2.1 通过python代码配置 +```python +import os +os.environ['ANTHROPIC_API_KEY'] = 'sk-***' +os.environ['ANTHROPIC_API_URL'] = 'https://xxxxxx' +``` +### 2.2 通过配置文件配置 +在项目的config目录下的custom_key.toml当中,添加配置: +```toml +ANTHROPIC_API_KEY="sk-******" +ANTHROPIC_API_URL="https://xxxxxx" +``` +## 3. Claude API KEY 获取 +参考 Claude 官方文档:https://docs.anthropic.com/zh-CN/docs/getting-access-to-claude + +## 4. 注意 +在agentuniverse中,我们已经创建了一个name为default_claude_llm的llm,用户在配置ANTHROPIC_API_KEY之后可以直接使用 \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index d725b8f9..88d4103e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,6 +48,8 @@ myst-parser = "^2.0.0" qianfan = "^0.3.12" dashscope = "^1.19.1" anthropic = "^0.26.0" +ollama = '^0.2.1' +langchain-anthropic = '^0.1.13' [tool.poetry.extras] log_ext = ["aliyun-log-python-sdk"] From 5b22812c5b601eebd85c9fa19feca0d61c26ce84 Mon Sep 17 00:00:00 2001 From: weizjajj Date: Thu, 13 Jun 2024 21:05:06 +0800 Subject: [PATCH 11/12] fix support --- .../app/core/agent/nl2api_agent_case/nl2api_agent.yaml | 3 +-- .../app/core/agent/rag_agent_case/demo_rag_agent.yaml | 2 +- .../app/core/agent/react_agent_case/demo_react_agent.yaml | 4 ++-- sample_standard_app/app/core/tool/google_search_tool.py | 2 +- sample_standard_app/app/core/tool/python_repl_tool.yaml | 4 ++-- 5 files changed, 7 insertions(+), 8 deletions(-) diff --git a/sample_standard_app/app/core/agent/nl2api_agent_case/nl2api_agent.yaml b/sample_standard_app/app/core/agent/nl2api_agent_case/nl2api_agent.yaml index 26dfd2a2..2bf6c79d 100644 --- a/sample_standard_app/app/core/agent/nl2api_agent_case/nl2api_agent.yaml +++ b/sample_standard_app/app/core/agent/nl2api_agent_case/nl2api_agent.yaml @@ -3,8 +3,7 @@ info: description: 'demo nl2api agent' profile: llm_model: - name: 'default_openai_llm' - model_name: 'gpt-4o' + name: 'default_qwen_llm' action: tool: - 'add' diff --git a/sample_standard_app/app/core/agent/rag_agent_case/demo_rag_agent.yaml b/sample_standard_app/app/core/agent/rag_agent_case/demo_rag_agent.yaml index efef0526..fecdd274 100644 --- a/sample_standard_app/app/core/agent/rag_agent_case/demo_rag_agent.yaml +++ b/sample_standard_app/app/core/agent/rag_agent_case/demo_rag_agent.yaml @@ -24,7 +24,7 @@ profile: 需要回答的问题是: {input} llm_model: - name: 'default_ollama_llm' + name: 'default_qwen_llm' # model_name: 'gpt-4o' plan: planner: diff --git a/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml b/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml index 5ce49200..8a38838d 100644 --- a/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml +++ b/sample_standard_app/app/core/agent/react_agent_case/demo_react_agent.yaml @@ -4,12 +4,12 @@ info: profile: prompt_version: qwen_react_agent.cn llm_model: - name: 'default_ollama_llm' + name: 'default_qwen_llm' temperature: 0 action: tool: - 'google_search_tool' - - 'python_repl' + - 'python_runner' plan: planner: name: 'react_planner' diff --git a/sample_standard_app/app/core/tool/google_search_tool.py b/sample_standard_app/app/core/tool/google_search_tool.py index 7211ca71..22da460a 100644 --- a/sample_standard_app/app/core/tool/google_search_tool.py +++ b/sample_standard_app/app/core/tool/google_search_tool.py @@ -29,7 +29,7 @@ class GoogleSearchTool(Tool): def execute(self, tool_input: ToolInput): input = tool_input.get_data("input") if self.serper_api_key is None: - return MockSearchTool().execute(input=input) + return MockSearchTool().execute(tool_input=tool_input) # get top10 results from Google search. search = GoogleSerperAPIWrapper(serper_api_key=self.serper_api_key, k=10, gl="us", hl="en", type="search") return search.run(query=input) \ No newline at end of file diff --git a/sample_standard_app/app/core/tool/python_repl_tool.yaml b/sample_standard_app/app/core/tool/python_repl_tool.yaml index acb732f5..5ec69b46 100644 --- a/sample_standard_app/app/core/tool/python_repl_tool.yaml +++ b/sample_standard_app/app/core/tool/python_repl_tool.yaml @@ -1,5 +1,5 @@ -name: 'python_repl' -description: '使用该工具可以执行python代码.工具的输入必须时一段有效的python代码. 如何你想要查看工具的执行结果, 必须在python代码中使用print(...)打印你想查看的内容。 +name: 'python_runner' +description: '使用该工具可以执行python代码,可以在pycharm中直接运行的代码.工具的输入必须时一段有效的python代码. 如何你想要查看工具的执行结果, 必须在python代码中使用print(...)打印你想查看的内容。 工具输入示例: 你想要计算1+3等于多少时,工具的输入应该是: ```py From 05c032314248ee01bacf63f6c2b4c429a0247ab2 Mon Sep 17 00:00:00 2001 From: weizjajj Date: Thu, 13 Jun 2024 21:18:57 +0800 Subject: [PATCH 12/12] remove used file --- sample_standard_app/app/test/test_playwight.py | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 sample_standard_app/app/test/test_playwight.py diff --git a/sample_standard_app/app/test/test_playwight.py b/sample_standard_app/app/test/test_playwight.py deleted file mode 100644 index 964bc2dd..00000000 --- a/sample_standard_app/app/test/test_playwight.py +++ /dev/null @@ -1,10 +0,0 @@ - -from langchain_community.agent_toolkits import PlayWrightBrowserToolkit -from langchain_community.tools.playwright.utils import create_async_playwright_browser - -if __name__ == '__main__': - from langchain_community.document_loaders import AsyncHtmlLoader - - urls = ["https://www.espn.com", "https://lilianweng.github.io/posts/2023-06-23-agent/"] - loader = AsyncHtmlLoader(urls) - docs = loader.load() \ No newline at end of file