diff --git a/docs/tools.md b/docs/tools.md index 4b40e78818..7205faddba 100644 --- a/docs/tools.md +++ b/docs/tools.md @@ -770,7 +770,7 @@ from pydantic_ai.ext.langchain import LangChainToolset toolkit = SlackToolkit() -toolset = LangChainToolset(toolkit.get_tools()) +toolset = LangChainToolset('slack', toolkit.get_tools()) agent = Agent('openai:gpt-4o', toolsets=[toolset]) # ... @@ -818,6 +818,7 @@ from pydantic_ai.ext.aci import ACIToolset toolset = ACIToolset( + 'open_weather_map', [ 'OPEN_WEATHER_MAP__CURRENT_WEATHER', 'OPEN_WEATHER_MAP__FORECAST', diff --git a/docs/toolsets.md b/docs/toolsets.md index 5caac22c09..7a8cbaf9d2 100644 --- a/docs/toolsets.md +++ b/docs/toolsets.md @@ -29,9 +29,9 @@ def override_tool(): return "I override all other tools" -agent_toolset = FunctionToolset(tools=[agent_tool]) # (1)! -extra_toolset = FunctionToolset(tools=[extra_tool]) -override_toolset = FunctionToolset(tools=[override_tool]) +agent_toolset = FunctionToolset('agent', tools=[agent_tool]) # (1)! +extra_toolset = FunctionToolset('extra', tools=[extra_tool]) +override_toolset = FunctionToolset('override', tools=[override_tool]) test_model = TestModel() # (2)! agent = Agent(test_model, toolsets=[agent_toolset]) @@ -84,7 +84,7 @@ def temperature_fahrenheit(city: str) -> float: return 69.8 -weather_toolset = FunctionToolset(tools=[temperature_celsius, temperature_fahrenheit]) +weather_toolset = FunctionToolset('weather', tools=[temperature_celsius, temperature_fahrenheit]) @weather_toolset.tool @@ -95,7 +95,7 @@ def conditions(ctx: RunContext, city: str) -> str: return "It's raining" -datetime_toolset = FunctionToolset() +datetime_toolset = FunctionToolset('datetime') datetime_toolset.add_function(lambda: datetime.now(), name='now') test_model = TestModel() # (1)! @@ -417,7 +417,7 @@ test_model = TestModel() # (1)! agent = Agent( test_model, deps_type=WrapperToolset, # (2)! - toolsets=[togglable_toolset, FunctionToolset([toggle])] + toolsets=[togglable_toolset, FunctionToolset('toggle', [toggle])] ) result = agent.run_sync('Toggle the toolset', deps=togglable_toolset) print([t.name for t in test_model.last_model_request_parameters.function_tools]) # (3)! @@ -462,7 +462,7 @@ from pydantic import BaseModel from pydantic_ai import Agent from pydantic_ai.toolsets.function import FunctionToolset -toolset = FunctionToolset() +toolset = FunctionToolset('user_info') @toolset.tool @@ -502,7 +502,7 @@ from pydantic_ai.messages import ModelMessage def run_agent( messages: list[ModelMessage] = [], frontend_tools: list[ToolDefinition] = {} ) -> tuple[Union[PersonalizedGreeting, DeferredToolCalls], list[ModelMessage]]: - deferred_toolset = DeferredToolset(frontend_tools) + deferred_toolset = DeferredToolset('frontend', frontend_tools) result = agent.run_sync( toolsets=[deferred_toolset], # (1)! output_type=[agent.output_type, DeferredToolCalls], # (2)! @@ -609,7 +609,7 @@ from pydantic_ai.ext.langchain import LangChainToolset toolkit = SlackToolkit() -toolset = LangChainToolset(toolkit.get_tools()) +toolset = LangChainToolset('slack', toolkit.get_tools()) agent = Agent('openai:gpt-4o', toolsets=[toolset]) # ... @@ -629,6 +629,7 @@ from pydantic_ai.ext.aci import ACIToolset toolset = ACIToolset( + 'open_weather_map', [ 'OPEN_WEATHER_MAP__CURRENT_WEATHER', 'OPEN_WEATHER_MAP__FORECAST', diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index a43d8bda49..cd266daa18 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -266,14 +266,15 @@ async def run( # Pydantic AI events and actual AG-UI tool names, preventing the tool from being called. If any # conflicts arise, the AG-UI tool should be renamed or a `PrefixedToolset` used for local toolsets. toolset = DeferredToolset[AgentDepsT]( - [ + name='AG-UI frontend tools', + tool_defs=[ ToolDefinition( name=tool.name, description=tool.description, parameters_json_schema=tool.parameters, ) for tool in run_input.tools - ] + ], ) toolsets = [*toolsets, toolset] if toolsets else [toolset] diff --git a/pydantic_ai_slim/pydantic_ai/agent.py b/pydantic_ai_slim/pydantic_ai/agent.py index b2e1667ddc..0ae2a3bf6d 100644 --- a/pydantic_ai_slim/pydantic_ai/agent.py +++ b/pydantic_ai_slim/pydantic_ai/agent.py @@ -420,7 +420,7 @@ def __init__( if self._output_toolset: self._output_toolset.max_retries = self._max_result_retries - self._function_toolset = FunctionToolset(tools, max_retries=retries) + self._function_toolset = FunctionToolset('Agent tools', tools, max_retries=retries) self._user_toolsets = toolsets or () self.history_processors = history_processors or [] diff --git a/pydantic_ai_slim/pydantic_ai/ext/aci.py b/pydantic_ai_slim/pydantic_ai/ext/aci.py index 6cd43402a1..8c228cf2de 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/aci.py +++ b/pydantic_ai_slim/pydantic_ai/ext/aci.py @@ -71,5 +71,5 @@ def implementation(*args: Any, **kwargs: Any) -> str: class ACIToolset(FunctionToolset): """A toolset that wraps ACI.dev tools.""" - def __init__(self, aci_functions: Sequence[str], linked_account_owner_id: str): - super().__init__([tool_from_aci(aci_function, linked_account_owner_id) for aci_function in aci_functions]) + def __init__(self, name: str, aci_functions: Sequence[str], linked_account_owner_id: str): + super().__init__(name, [tool_from_aci(aci_function, linked_account_owner_id) for aci_function in aci_functions]) diff --git a/pydantic_ai_slim/pydantic_ai/ext/langchain.py b/pydantic_ai_slim/pydantic_ai/ext/langchain.py index 3fb4079386..575c539c22 100644 --- a/pydantic_ai_slim/pydantic_ai/ext/langchain.py +++ b/pydantic_ai_slim/pydantic_ai/ext/langchain.py @@ -65,5 +65,5 @@ def proxy(*args: Any, **kwargs: Any) -> str: class LangChainToolset(FunctionToolset): """A toolset that wraps LangChain tools.""" - def __init__(self, tools: list[LangChainTool]): - super().__init__([tool_from_langchain(tool) for tool in tools]) + def __init__(self, name: str, tools: list[LangChainTool]): + super().__init__(name, [tool_from_langchain(tool) for tool in tools]) diff --git a/pydantic_ai_slim/pydantic_ai/mcp.py b/pydantic_ai_slim/pydantic_ai/mcp.py index 2ca7950b3e..83b79fdead 100644 --- a/pydantic_ai_slim/pydantic_ai/mcp.py +++ b/pydantic_ai_slim/pydantic_ai/mcp.py @@ -61,10 +61,12 @@ class MCPServer(AbstractToolset[Any], ABC): timeout: float = 5 process_tool_call: ProcessToolCallback | None = None allow_sampling: bool = True - max_retries: int = 1 sampling_model: models.Model | None = None + max_retries: int = 1 # } end of "abstract fields" + _name: str + _enter_lock: Lock = field(compare=False) _running_count: int _exit_stack: AsyncExitStack | None @@ -73,7 +75,26 @@ class MCPServer(AbstractToolset[Any], ABC): _read_stream: MemoryObjectReceiveStream[SessionMessage | Exception] _write_stream: MemoryObjectSendStream[SessionMessage] - def __post_init__(self): + def __init__( + self, + tool_prefix: str | None = None, + log_level: mcp_types.LoggingLevel | None = None, + log_handler: LoggingFnT | None = None, + timeout: float = 5, + process_tool_call: ProcessToolCallback | None = None, + allow_sampling: bool = True, + sampling_model: models.Model | None = None, + max_retries: int = 1, + ): + self.tool_prefix = tool_prefix + self.log_level = log_level + self.log_handler = log_handler + self.timeout = timeout + self.process_tool_call = process_tool_call + self.allow_sampling = allow_sampling + self.sampling_model = sampling_model + self.max_retries = max_retries + self._enter_lock = Lock() self._running_count = 0 self._exit_stack = None @@ -94,7 +115,7 @@ async def client_streams( @property def name(self) -> str: - return repr(self) + return self._name @property def tool_name_conflict_hint(self) -> str: @@ -294,7 +315,7 @@ def _map_tool_result_part( assert_never(part) -@dataclass +@dataclass(init=False) class MCPServerStdio(MCPServer): """Runs an MCP server in a subprocess and communicates with it over stdin/stdout. @@ -378,11 +399,62 @@ async def main(): allow_sampling: bool = True """Whether to allow MCP sampling through this client.""" + sampling_model: models.Model | None = None + """The model to use for sampling.""" + max_retries: int = 1 """The maximum number of times to retry a tool call.""" - sampling_model: models.Model | None = None - """The model to use for sampling.""" + def __init__( + self, + command: str, + args: Sequence[str], + env: dict[str, str] | None = None, + cwd: str | Path | None = None, + name: str | None = None, + tool_prefix: str | None = None, + log_level: mcp_types.LoggingLevel | None = None, + log_handler: LoggingFnT | None = None, + timeout: float = 5, + process_tool_call: ProcessToolCallback | None = None, + allow_sampling: bool = True, + sampling_model: models.Model | None = None, + max_retries: int = 1, + ): + """Build a new MCP server. + + Args: + command: The command to run. + args: The arguments to pass to the command. + env: The environment variables to set in the subprocess. + cwd: The working directory to use when spawning the process. + name: The unique name of the MCP server. + tool_prefix: A prefix to add to all tools that are registered with the server. + log_level: The log level to set when connecting to the server, if any. + log_handler: A handler for logging messages from the server. + timeout: The timeout in seconds to wait for the client to initialize. + process_tool_call: Hook to customize tool calling and optionally pass extra metadata. + allow_sampling: Whether to allow MCP sampling through this client. + sampling_model: The model to use for sampling. + max_retries: The maximum number of times to retry a tool call. + """ + self.command = command + self.args = args + self.env = env + self.cwd = cwd + + self._name = name or tool_prefix or ' '.join([command, *args]) + + super().__init__( + tool_prefix, + log_level, + log_handler, + timeout, + process_tool_call, + allow_sampling, + sampling_model, + max_retries, + ) @asynccontextmanager async def client_streams( @@ -401,7 +473,7 @@ def __repr__(self) -> str: return f'MCPServerStdio(command={self.command!r}, args={self.args!r}, tool_prefix={self.tool_prefix!r})' -@dataclass +@dataclass(init=False) class _MCPServerHTTP(MCPServer): url: str """The URL of the endpoint on the MCP server.""" @@ -479,11 +551,62 @@ class _MCPServerHTTP(MCPServer): allow_sampling: bool = True """Whether to allow MCP sampling through this client.""" + sampling_model: models.Model | None = None + """The model to use for sampling.""" + max_retries: int = 1 """The maximum number of times to retry a tool call.""" - sampling_model: models.Model | None = None - """The model to use for sampling.""" + def __init__( + self, + url: str, + headers: dict[str, Any] | None = None, + http_client: httpx.AsyncClient | None = None, + sse_read_timeout: float = 5 * 60, + name: str | None = None, + tool_prefix: str | None = None, + log_level: mcp_types.LoggingLevel | None = None, + log_handler: LoggingFnT | None = None, + timeout: float = 5, + process_tool_call: ProcessToolCallback | None = None, + allow_sampling: bool = True, + sampling_model: models.Model | None = None, + max_retries: int = 1, + ): + """Build a new MCP server. + + Args: + url: The URL of the endpoint on the MCP server. + headers: Optional HTTP headers to be sent with each request to the endpoint. + http_client: An `httpx.AsyncClient` to use with the endpoint. + sse_read_timeout: Maximum time in seconds to wait for new SSE messages before timing out. + name: The unique name of the MCP server. + tool_prefix: A prefix to add to all tools that are registered with the server. + log_level: The log level to set when connecting to the server, if any. + log_handler: A handler for logging messages from the server. + timeout: The timeout in seconds to wait for the client to initialize. + process_tool_call: Hook to customize tool calling and optionally pass extra metadata. + allow_sampling: Whether to allow MCP sampling through this client. + sampling_model: The model to use for sampling. + max_retries: The maximum number of times to retry a tool call. + """ + self.url = url + self.headers = headers + self.http_client = http_client + self.sse_read_timeout = sse_read_timeout + + self._name = name or tool_prefix or url + + super().__init__( + tool_prefix, + log_level, + log_handler, + timeout, + process_tool_call, + allow_sampling, + sampling_model, + max_retries, + ) @property @abstractmethod @@ -583,7 +706,6 @@ def _transport_client(self): @deprecated('The `MCPServerHTTP` class is deprecated, use `MCPServerSSE` instead.') -@dataclass class MCPServerHTTP(MCPServerSSE): """An MCP server that connects over HTTP using the old SSE transport. @@ -612,7 +734,6 @@ async def main(): """ -@dataclass class MCPServerStreamableHTTP(_MCPServerHTTP): """An MCP server that connects over HTTP using the Streamable HTTP transport. diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/abstract.py b/pydantic_ai_slim/pydantic_ai/toolsets/abstract.py index 455336418f..d3a7e883ef 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/abstract.py @@ -71,8 +71,11 @@ class AbstractToolset(ABC, Generic[AgentDepsT]): @property def name(self) -> str: - """The name of the toolset for use in error messages.""" - return self.__class__.__name__.replace('Toolset', ' toolset') + """A unique name for the toolset. + + If you're defining a subclass that can be instantiated by a user, you should let them pass a custom name to the constructor and return that here. + """ + raise NotImplementedError() @property def tool_name_conflict_hint(self) -> str: diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/combined.py b/pydantic_ai_slim/pydantic_ai/toolsets/combined.py index 4b1511fae1..fdd29e4a00 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/combined.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/combined.py @@ -40,6 +40,10 @@ def __post_init__(self): self._entered_count = 0 self._exit_stack = None + @property + def name(self) -> str: + return f'{self.__class__.__name__}({", ".join(toolset.name for toolset in self.toolsets)})' + async def __aenter__(self) -> Self: async with self._enter_lock: if self._entered_count == 0: diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/deferred.py b/pydantic_ai_slim/pydantic_ai/toolsets/deferred.py index 3ad2e976ba..ede0f5e08f 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/deferred.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/deferred.py @@ -1,6 +1,6 @@ from __future__ import annotations -from dataclasses import dataclass, replace +from dataclasses import replace from typing import Any from pydantic_core import SchemaValidator, core_schema @@ -12,7 +12,6 @@ TOOL_SCHEMA_VALIDATOR = SchemaValidator(schema=core_schema.any_schema()) -@dataclass class DeferredToolset(AbstractToolset[AgentDepsT]): """A toolset that holds deferred tools whose results will be produced outside of the Pydantic AI agent run in which they were called. @@ -21,6 +20,14 @@ class DeferredToolset(AbstractToolset[AgentDepsT]): tool_defs: list[ToolDefinition] + def __init__(self, name: str, tool_defs: list[ToolDefinition]): + self._name = name + self.tool_defs = tool_defs + + @property + def name(self) -> str: + return self._name + async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: return { tool_def.name: ToolsetTool( diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/function.py b/pydantic_ai_slim/pydantic_ai/toolsets/function.py index 63f44a1f0c..b31b6bc030 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/function.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/function.py @@ -36,13 +36,17 @@ class FunctionToolset(AbstractToolset[AgentDepsT]): max_retries: int = field(default=1) tools: dict[str, Tool[Any]] = field(default_factory=dict) - def __init__(self, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = [], max_retries: int = 1): + def __init__( + self, name: str, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = [], max_retries: int = 1 + ): """Build a new function toolset. Args: + name: The unique name of the toolset. tools: The tools to add to the toolset. max_retries: The maximum number of retries for each tool during a run. """ + self._name = name self.max_retries = max_retries self.tools = {} for tool in tools: @@ -51,6 +55,10 @@ def __init__(self, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, else: self.add_function(tool) + @property + def name(self) -> str: + return self._name + @overload def tool(self, func: ToolFuncEither[AgentDepsT, ToolParams], /) -> ToolFuncEither[AgentDepsT, ToolParams]: ... @@ -96,7 +104,7 @@ def tool( from pydantic_ai import Agent, RunContext from pydantic_ai.toolsets.function import FunctionToolset - toolset = FunctionToolset() + toolset = FunctionToolset('foobar_spam') @toolset.tool def foobar(ctx: RunContext[int], x: int) -> int: diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/wrapper.py b/pydantic_ai_slim/pydantic_ai/toolsets/wrapper.py index 8440f1c466..0c5a839cef 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/wrapper.py @@ -18,6 +18,10 @@ class WrapperToolset(AbstractToolset[AgentDepsT]): wrapped: AbstractToolset[AgentDepsT] + @property + def name(self) -> str: + return f'{self.__class__.__name__}({self.wrapped.name})' + async def __aenter__(self) -> Self: await self.wrapped.__aenter__() return self diff --git a/tests/ext/test_langchain.py b/tests/ext/test_langchain.py index 926a228194..f337fb82ce 100644 --- a/tests/ext/test_langchain.py +++ b/tests/ext/test_langchain.py @@ -77,7 +77,7 @@ def test_langchain_tool_conversion(): def test_langchain_toolset(): - toolset = LangChainToolset([langchain_tool]) + toolset = LangChainToolset('langchain', [langchain_tool]) agent = Agent('test', toolsets=[toolset], retries=7) result = agent.run_sync('foobar') assert result.output == snapshot("{\"file_search\":\"I was called with {'dir_path': '.', 'pattern': 'a'}\"}") diff --git a/tests/test_agent.py b/tests/test_agent.py index a5a9285d52..b29cae2130 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -3652,7 +3652,7 @@ def test_deprecated_kwargs_mixed_valid_invalid(): def test_override_toolsets(): - foo_toolset = FunctionToolset() + foo_toolset = FunctionToolset('foo') @foo_toolset.tool def foo() -> str: @@ -3675,7 +3675,7 @@ def baz() -> str: assert available_tools[-1] == snapshot(['baz', 'foo']) assert result.output == snapshot('{"baz":"Hello from baz","foo":"Hello from foo"}') - bar_toolset = FunctionToolset() + bar_toolset = FunctionToolset('bar') @bar_toolset.tool def bar() -> str: @@ -3702,7 +3702,7 @@ def bar() -> str: def test_adding_tools_during_run(): - toolset = FunctionToolset() + toolset = FunctionToolset('foo') def foo() -> str: return 'Hello from foo' diff --git a/tests/test_tools.py b/tests/test_tools.py index c72cd1e086..13d94a77c5 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -605,7 +605,7 @@ def tool(x: int) -> int: def foo_tool(x: str) -> str: return x + 'foo' # pragma: no cover - function_toolset = FunctionToolset([tool]) + function_toolset = FunctionToolset('toolset', [tool]) prefixed_toolset = PrefixedToolset(function_toolset, 'foo') Agent('test', tools=[foo_tool], toolsets=[prefixed_toolset]).run_sync('') @@ -1199,13 +1199,14 @@ def infinite_retry_tool(ctx: RunContext[None]) -> int: def test_deferred_tool(): deferred_toolset = DeferredToolset( + 'deferred', [ ToolDefinition( name='my_tool', description='', parameters_json_schema={'type': 'object', 'properties': {'x': {'type': 'integer'}}, 'required': ['x']}, ), - ] + ], ) agent = Agent(TestModel(), output_type=[str, DeferredToolCalls], toolsets=[deferred_toolset]) @@ -1234,13 +1235,14 @@ class MyModel(BaseModel): foo: str deferred_toolset = DeferredToolset( + 'deferred', [ ToolDefinition( name='my_tool', description='', parameters_json_schema={'type': 'object', 'properties': {'x': {'type': 'integer'}}, 'required': ['x']}, ), - ] + ], ) agent = Agent(TestModel(call_tools=[]), output_type=[MyModel, DeferredToolCalls], toolsets=[deferred_toolset]) @@ -1253,13 +1255,14 @@ class MyModel(BaseModel): foo: str deferred_toolset = DeferredToolset( + 'deferred', [ ToolDefinition( name='my_tool', description='', parameters_json_schema={'type': 'object', 'properties': {'x': {'type': 'integer'}}, 'required': ['x']}, ), - ] + ], ) agent = Agent( TestModel(call_tools=[]), @@ -1273,13 +1276,14 @@ class MyModel(BaseModel): async def test_deferred_tool_without_output_type(): deferred_toolset = DeferredToolset( + 'deferred', [ ToolDefinition( name='my_tool', description='', parameters_json_schema={'type': 'object', 'properties': {'x': {'type': 'integer'}}, 'required': ['x']}, ), - ] + ], ) agent = Agent(TestModel(), toolsets=[deferred_toolset]) diff --git a/tests/test_toolsets.py b/tests/test_toolsets.py index eac0dc78a7..92003abb74 100644 --- a/tests/test_toolsets.py +++ b/tests/test_toolsets.py @@ -41,7 +41,7 @@ async def test_function_toolset(): class PrefixDeps: prefix: str | None = None - toolset = FunctionToolset[PrefixDeps]() + toolset = FunctionToolset[PrefixDeps]('tools') async def prepare_add_prefix(ctx: RunContext[PrefixDeps], tool_def: ToolDefinition) -> ToolDefinition | None: if ctx.deps.prefix is None: @@ -127,7 +127,7 @@ def subtract(a: int, b: int) -> int: async def test_prepared_toolset_user_error_add_new_tools(): """Test that PreparedToolset raises UserError when prepare function tries to add new tools.""" context = build_run_context(None) - base_toolset = FunctionToolset[None]() + base_toolset = FunctionToolset[None]('tools') @base_toolset.tool def add(a: int, b: int) -> int: @@ -162,7 +162,7 @@ async def prepare_add_new_tool(ctx: RunContext[None], tool_defs: list[ToolDefini async def test_prepared_toolset_user_error_change_tool_names(): """Test that PreparedToolset raises UserError when prepare function tries to change tool names.""" context = build_run_context(None) - base_toolset = FunctionToolset[None]() + base_toolset = FunctionToolset[None]('tools') @base_toolset.tool def add(a: int, b: int) -> int: @@ -204,7 +204,7 @@ class TestDeps: enable_advanced: bool = True # Create first FunctionToolset with basic math operations - math_toolset = FunctionToolset[TestDeps]() + math_toolset = FunctionToolset[TestDeps]('math') @math_toolset.tool def add(a: int, b: int) -> int: @@ -222,7 +222,7 @@ def multiply(a: int, b: int) -> int: return a * b # pragma: no cover # Create second FunctionToolset with string operations - string_toolset = FunctionToolset[TestDeps]() + string_toolset = FunctionToolset[TestDeps]('string') @string_toolset.tool def concat(s1: str, s2: str) -> str: @@ -240,7 +240,7 @@ def reverse(text: str) -> str: return text[::-1] # pragma: no cover # Create third FunctionToolset with advanced operations - advanced_toolset = FunctionToolset[TestDeps]() + advanced_toolset = FunctionToolset[TestDeps]('advanced') @advanced_toolset.tool def power(base: int, exponent: int) -> int: