Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev tool doc #96

Merged
merged 7 commits into from
Jun 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 38 additions & 1 deletion agentuniverse/agent/action/knowledge/knowledge.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,10 @@
# @Author : wangchongshi
# @Email : wangchongshi.wcs@antgroup.com
# @FileName: knowledge.py
from typing import Optional, Dict, List, Any
from typing import Optional, Dict, List

from langchain_core.utils.json import parse_json_markdown
from langchain.tools import Tool as LangchainTool

from agentuniverse.agent.action.knowledge.reader.reader import Reader
from agentuniverse.agent.action.knowledge.store.document import Document
Expand Down Expand Up @@ -77,3 +80,37 @@ def initialize_by_component_configer(self, component_configer: KnowledgeConfiger
if component_configer.ext_info:
self.ext_info = component_configer.ext_info
return self

def langchain_query(self, query: str) -> str:
"""Query the knowledge using LangChain.

Query documents from the store and return the results.
"""
parse_query = parse_json_markdown(query)
query = Query(**parse_query)
knowledge = self.store.query(query)
res = ['This is Query Result']
for doc in knowledge:
res.append(doc.text)
return "\n=========================================\n".join(res)

def as_langchain_tool(self) -> LangchainTool:
"""Convert the Knowledge object to a LangChain tool.

Returns:
Any: the LangChain tool object
"""
args_description = """
This is a knowledge base tool, which stores the content you may need. To use this tool, you need to give a json string with the following format:
```json
{
"query_str": "<your query here>",
"top_k": <number of results to return>,
}
```
"""
return LangchainTool(
name=self.name,
description=self.description + args_description,
func=self.langchain_query,
)
36 changes: 36 additions & 0 deletions agentuniverse/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,13 @@
# @Email : lc299034@antgroup.com
# @FileName: agent.py
"""The definition of agent paradigm."""
import json
from abc import abstractmethod
from datetime import datetime
from typing import Optional

from langchain_core.utils.json import parse_json_markdown

from agentuniverse.agent.agent_model import AgentModel
from agentuniverse.agent.input_object import InputObject
from agentuniverse.agent.output_object import OutputObject
Expand All @@ -21,6 +24,7 @@
import ApplicationConfigManager
from agentuniverse.base.config.component_configer.configers.agent_configer \
import AgentConfiger
from agentuniverse.base.util.logging.logging_util import LOGGER
from agentuniverse.llm.llm import LLM


Expand Down Expand Up @@ -156,3 +160,35 @@ def initialize_by_component_configer(self, component_configer: AgentConfiger) ->
plan=plan, memory=memory, action=action)
self.agent_model = agent_model
return self

def langchain_run(self, input: str, callbacks=None, **kwargs):
"""Run the agent model using LangChain."""
try:
parse_result = parse_json_markdown(input)
except Exception as e:
LOGGER.error(f"langchain run parse_json_markdown error,input(parse_result) error({str(e)})")
return "Error , Your Action Input is not a valid JSON string"
output_object = self.run(**parse_result, callbacks=callbacks, **kwargs)
result_dict = {}
for key in self.output_keys():
result_dict[key] = output_object.get_data(key)
return result_dict

def as_langchain_tool(self):
"""Convert to LangChain tool."""
from langchain.agents.tools import Tool
format_dict = {}
for key in self.input_keys():
format_dict.setdefault(key, "input val")
format_str = json.dumps(format_dict)

args_description = f"""
to use this tool,your input must be a json string,must contain all keys of {self.input_keys()},
and the value of the key must be a json string,the format of the json string is as follows:
```{format_str}```
"""
return Tool(
name=self.agent_model.info.get("name"),
func=self.langchain_run,
description=self.agent_model.info.get("description") + args_description
)
13 changes: 13 additions & 0 deletions agentuniverse/agent/plan/planner/react_planner/react_planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,12 @@
from langchain.tools import Tool as LangchainTool
from langchain_core.runnables import RunnableConfig

from agentuniverse.agent.action.knowledge.knowledge import Knowledge
from agentuniverse.agent.action.knowledge.knowledge_manager import KnowledgeManager
from agentuniverse.agent.action.tool.tool import Tool
from agentuniverse.agent.action.tool.tool_manager import ToolManager
from agentuniverse.agent.agent import Agent
from agentuniverse.agent.agent_manager import AgentManager
from agentuniverse.agent.agent_model import AgentModel
from agentuniverse.agent.input_object import InputObject
from agentuniverse.agent.memory.chat_memory import ChatMemory
Expand Down Expand Up @@ -78,6 +82,15 @@ def acquire_tools(action) -> list[LangchainTool]:
for tool_name in tool_names:
tool: Tool = ToolManager().get_instance_obj(tool_name)
lc_tools.append(tool.as_langchain())
knowledge: list = action.get('knowledge') or list()
for knowledge_name in knowledge:
knowledge_tool: Knowledge = KnowledgeManager().get_instance_obj(knowledge_name)
lc_tools.append(knowledge_tool.as_langchain_tool())

agents: list = action.get('agent') or list()
for agent_name in agents:
agent_tool: Agent = AgentManager().get_instance_obj(agent_name)
lc_tools.append(agent_tool.as_langchain_tool())
return lc_tools

def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt:
Expand Down
70 changes: 70 additions & 0 deletions agentuniverse/llm/default/deep_seek_openai_style_llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# !/usr/bin/env python3
# -*- coding:utf-8 -*-

# @Time : 2024/5/21 17:49
# @Author : weizjajj
# @Email : weizhongjie.wzj@antgroup.com
# @FileName: claude_llm.py

from typing import Optional, Any, Union, Iterator, AsyncIterator

from pydantic import Field

from agentuniverse.base.annotation.trace import trace_llm
from agentuniverse.base.util.env_util import get_from_env
from agentuniverse.llm.llm_output import LLMOutput
from agentuniverse.llm.openai_style_llm import OpenAIStyleLLM

DEEpSEEkMAXCONTETNLENGTH = {
"deepseek-chat": 32000,
"deepseek-coder": 32000,
"claude-3-haiku-20240307": 200000,
"claude-2.1": 200000,
"claude-2.0": 100000,
"claude-instant-1.2": 100000
}


class DefaultDeepSeekLLM(OpenAIStyleLLM):
"""The agentUniverse default openai llm module.

LLM parameters, such as name/description/model_name/max_tokens,
are injected into this class by the default_openai_llm.yaml configuration.
"""

api_key: Optional[str] = Field(default_factory=lambda: get_from_env("DEEPSEEK_API_KEY"))
organization: Optional[str] = Field(default_factory=lambda: get_from_env("DEEPSEEK_ORGANIZATION"))
api_base: Optional[str] = Field(default_factory=lambda: get_from_env("DEEPSEEK_API_BASE"))
proxy: Optional[str] = Field(default_factory=lambda: get_from_env("DEEPSEEK_PROXY"))

@trace_llm
def call(self, messages: list, **kwargs: Any) -> Union[LLMOutput, Iterator[LLMOutput]]:
""" The call method of the LLM.

Users can customize how the model interacts by overriding call method of the LLM class.

Args:
messages (list): The messages to send to the LLM.
**kwargs: Arbitrary keyword arguments.
"""
return super().call(messages, **kwargs)

@trace_llm
async def acall(self, messages: list, **kwargs: Any) -> Union[LLMOutput, AsyncIterator[LLMOutput]]:
""" The async call method of the LLM.

Users can customize how the model interacts by overriding acall method of the LLM class.

Args:
messages (list): The messages to send to the LLM.
**kwargs: Arbitrary keyword arguments.
"""
return await super().acall(messages, **kwargs)

def max_context_length(self) -> int:
"""Max context length.

The total length of input tokens and generated tokens is limited by the openai model's context length.
"""
return DEEpSEEkMAXCONTETNLENGTH.get(self.model_name, 4096)

8 changes: 8 additions & 0 deletions agentuniverse/llm/default/deep_seek_openai_style_llm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
name: 'default_deepseek_llm'
description: 'default default_deepseek_llm llm with spi'
model_name: 'deepseek-chat'
max_tokens: 1000
metadata:
type: 'LLM'
module: 'agentuniverse.llm.default.deep_seek_openai_style_llm'
class: 'DefaultDeepSeekLLM'
2 changes: 2 additions & 0 deletions agentuniverse/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ def initialize_by_component_configer(self, component_configer: LLMConfiger) -> '
if component_configer.ext_info:
self.ext_info = component_configer.ext_info
self.tracing = component_configer.tracing
if 'max_context_length' in component_configer.configer.value:
self._max_context_length = component_configer.configer.value['max_context_length']
return self

def set_by_agent_model(self, **kwargs) -> None:
Expand Down
19 changes: 19 additions & 0 deletions agentuniverse/llm/openai_style_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
from langchain_core.language_models.base import BaseLanguageModel
from openai import OpenAI, AsyncOpenAI

from agentuniverse.base.config.component_configer.configers.llm_configer import LLMConfiger
from agentuniverse.base.util.env_util import get_from_env
from agentuniverse.llm.llm import LLM, LLMOutput
from agentuniverse.llm.openai_style_langchain_instance import LangchainOpenAIStyleInstance

Expand Down Expand Up @@ -161,6 +163,17 @@ async def agenerate_stream_result(self, stream: AsyncIterator) -> AsyncIterator[

await self.aclose()

def initialize_by_component_configer(self, component_configer: LLMConfiger) -> 'LLM':
if 'api_base' in component_configer.configer.value:
self.api_base = component_configer.configer.value.get('api_base')
elif 'api_base_env' in component_configer.configer.value:
self.api_base = get_from_env(component_configer.configer.value.get('api_base_env'))
if 'api_key' in component_configer .configer.value:
self.api_key = component_configer.configer.value.get('api_key')
elif 'api_key_env' in component_configer.configer.value:
self.api_key = get_from_env(component_configer.configer.value.get('api_key_env'))
return super().initialize_by_component_configer(component_configer)

def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text.

Expand All @@ -187,3 +200,9 @@ async def aclose(self):
"""Async close the client."""
if hasattr(self, 'async_client') and self.async_client:
await self.async_client.close()

def max_context_length(self) -> int:
"""Return the maximum length of the context."""
if super().max_context_length():
return super().max_context_length()
return 4000
Loading