From f62479306a5842f4561073d55ae595c0a4e8e21c Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 16 Jul 2024 13:37:53 -0700 Subject: [PATCH] Update custom tool use in docs (#6091) --- examples/src/agents/custom_tool.ts | 141 ------------- examples/src/agents/handle_parsing_error.ts | 83 -------- examples/src/agents/openai_tools_runnable.ts | 77 ------- .../src/agents/structured_chat_runnable.ts | 189 ------------------ .../src/models/chat/chat_mistralai_agents.ts | 6 +- .../src/models/chat/chat_vertexai_agents.ts | 6 +- ...ntegration_anthropic_tool_calling_agent.ts | 5 +- examples/src/use_cases/tool_use/agents.ts | 78 -------- .../src/use_cases/tool_use/multiple_tools.ts | 96 --------- examples/src/use_cases/tool_use/parallel.ts | 101 ---------- .../use_cases/tool_use/quickstart_agents.ts | 77 ------- .../use_cases/tool_use/quickstart_chains.ts | 62 ------ .../tool_use/tool_error_handling_fallbacks.ts | 74 ------- .../tool_use/tool_error_handling_intro.ts | 50 ----- 14 files changed, 8 insertions(+), 1037 deletions(-) delete mode 100644 examples/src/agents/custom_tool.ts delete mode 100644 examples/src/agents/handle_parsing_error.ts delete mode 100644 examples/src/agents/openai_tools_runnable.ts delete mode 100644 examples/src/agents/structured_chat_runnable.ts delete mode 100644 examples/src/use_cases/tool_use/agents.ts delete mode 100644 examples/src/use_cases/tool_use/multiple_tools.ts delete mode 100644 examples/src/use_cases/tool_use/parallel.ts delete mode 100644 examples/src/use_cases/tool_use/quickstart_agents.ts delete mode 100644 examples/src/use_cases/tool_use/quickstart_chains.ts delete mode 100644 examples/src/use_cases/tool_use/tool_error_handling_fallbacks.ts delete mode 100644 examples/src/use_cases/tool_use/tool_error_handling_intro.ts diff --git a/examples/src/agents/custom_tool.ts b/examples/src/agents/custom_tool.ts deleted file mode 100644 index c344ad988c47..000000000000 --- a/examples/src/agents/custom_tool.ts +++ /dev/null @@ -1,141 +0,0 @@ -import { ChatOpenAI } from "@langchain/openai"; -import type { ChatPromptTemplate } from "@langchain/core/prompts"; -import { createOpenAIFunctionsAgent, AgentExecutor } from "langchain/agents"; -import { pull } from "langchain/hub"; - -import { z } from "zod"; -import { DynamicTool, DynamicStructuredTool } from "@langchain/core/tools"; - -const llm = new ChatOpenAI({ - model: "gpt-3.5-turbo", - temperature: 0, -}); - -const tools = [ - new DynamicTool({ - name: "FOO", - description: - "call this to get the value of foo. input should be an empty string.", - func: async () => "baz", - }), - new DynamicStructuredTool({ - name: "random-number-generator", - description: "generates a random number between two input numbers", - schema: z.object({ - low: z.number().describe("The lower bound of the generated number"), - high: z.number().describe("The upper bound of the generated number"), - }), - func: async ({ low, high }) => - (Math.random() * (high - low) + low).toString(), // Outputs still must be strings - }), -]; - -// Get the prompt to use - you can modify this!\ -// If you want to see the prompt in full, you can at: -// https://smith.langchain.com/hub/hwchase17/openai-functions-agent -const prompt = await pull( - "hwchase17/openai-functions-agent" -); - -const agent = await createOpenAIFunctionsAgent({ - llm, - tools, - prompt, -}); - -const agentExecutor = new AgentExecutor({ - agent, - tools, - verbose: true, -}); - -const result = await agentExecutor.invoke({ - input: `What is the value of foo?`, -}); - -console.log(`Got output ${result.output}`); - -/* - [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { - "input": "What is the value of foo?" - } - [agent/action] [1:chain:AgentExecutor] Agent selected action: { - "tool": "FOO", - "toolInput": {}, - "log": "Invoking \"FOO\" with {}\n", - "messageLog": [ - { - "lc": 1, - "type": "constructor", - "id": [ - "langchain_core", - "messages", - "AIMessage" - ], - "kwargs": { - "content": "", - "additional_kwargs": { - "function_call": { - "name": "FOO", - "arguments": "{}" - } - } - } - } - ] - } - [tool/start] [1:chain:AgentExecutor > 8:tool:FOO] Entering Tool run with input: "undefined" - [tool/end] [1:chain:AgentExecutor > 8:tool:FOO] [113ms] Exiting Tool run with output: "baz" - [chain/end] [1:chain:AgentExecutor] [3.36s] Exiting Chain run with output: { - "input": "What is the value of foo?", - "output": "The value of foo is \"baz\"." - } - Got output The value of foo is "baz". -*/ - -const result2 = await agentExecutor.invoke({ - input: `Generate a random number between 1 and 10.`, -}); - -console.log(`Got output ${result2.output}`); - -/* - [chain/start] [1:chain:AgentExecutor] Entering Chain run with input: { - "input": "Generate a random number between 1 and 10." - } - [agent/action] [1:chain:AgentExecutor] Agent selected action: { - "tool": "random-number-generator", - "toolInput": { - "low": 1, - "high": 10 - }, - "log": "Invoking \"random-number-generator\" with {\n \"low\": 1,\n \"high\": 10\n}\n", - "messageLog": [ - { - "lc": 1, - "type": "constructor", - "id": [ - "langchain_core", - "messages", - "AIMessage" - ], - "kwargs": { - "content": "", - "additional_kwargs": { - "function_call": { - "name": "random-number-generator", - "arguments": "{\n \"low\": 1,\n \"high\": 10\n}" - } - } - } - } - ] - } - [tool/start] [1:chain:AgentExecutor > 8:tool:random-number-generator] Entering Tool run with input: "{"low":1,"high":10}" - [tool/end] [1:chain:AgentExecutor > 8:tool:random-number-generator] [58ms] Exiting Tool run with output: "2.4757639017769293" - [chain/end] [1:chain:AgentExecutor] [3.32s] Exiting Chain run with output: { - "input": "Generate a random number between 1 and 10.", - "output": "The random number generated between 1 and 10 is 2.476." - } - Got output The random number generated between 1 and 10 is 2.476. -*/ diff --git a/examples/src/agents/handle_parsing_error.ts b/examples/src/agents/handle_parsing_error.ts deleted file mode 100644 index 2cdd838e363d..000000000000 --- a/examples/src/agents/handle_parsing_error.ts +++ /dev/null @@ -1,83 +0,0 @@ -import { z } from "zod"; -import type { ChatPromptTemplate } from "@langchain/core/prompts"; -import { ChatOpenAI } from "@langchain/openai"; -import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents"; -import { pull } from "langchain/hub"; -import { DynamicStructuredTool } from "@langchain/core/tools"; - -const model = new ChatOpenAI({ temperature: 0.1 }); -const tools = [ - new DynamicStructuredTool({ - name: "task-scheduler", - description: "Schedules tasks", - schema: z - .object({ - tasks: z - .array( - z.object({ - title: z - .string() - .describe("The title of the tasks, reminders and alerts"), - due_date: z - .string() - .describe("Due date. Must be a valid JavaScript date string"), - task_type: z - .enum([ - "Call", - "Message", - "Todo", - "In-Person Meeting", - "Email", - "Mail", - "Text", - "Open House", - ]) - .describe("The type of task"), - }) - ) - .describe("The JSON for task, reminder or alert to create"), - }) - .describe("JSON definition for creating tasks, reminders and alerts"), - func: async (input: { tasks: object }) => JSON.stringify(input), - }), -]; - -// Get the prompt to use - you can modify this! -// If you want to see the prompt in full, you can at: -// https://smith.langchain.com/hub/hwchase17/openai-functions-agent -const prompt = await pull( - "hwchase17/openai-functions-agent" -); - -const agent = await createOpenAIFunctionsAgent({ - llm: model, - tools, - prompt, -}); - -const agentExecutor = new AgentExecutor({ - agent, - tools, - verbose: true, - handleParsingErrors: - "Please try again, paying close attention to the allowed enum values", -}); - -console.log("Loaded agent."); - -const input = `Set a reminder to renew our online property ads next week.`; - -console.log(`Executing with input "${input}"...`); - -const result = await agentExecutor.invoke({ input }); - -console.log({ result }); - -/* - { - result: { - input: 'Set a reminder to renew our online property ads next week.', - output: 'I have set a reminder for you to renew your online property ads on October 10th, 2022.' - } - } -*/ diff --git a/examples/src/agents/openai_tools_runnable.ts b/examples/src/agents/openai_tools_runnable.ts deleted file mode 100644 index d1619d6af374..000000000000 --- a/examples/src/agents/openai_tools_runnable.ts +++ /dev/null @@ -1,77 +0,0 @@ -import { z } from "zod"; -import { ChatOpenAI } from "@langchain/openai"; -import { Calculator } from "@langchain/community/tools/calculator"; -import { AgentExecutor } from "langchain/agents"; -import { formatToOpenAIToolMessages } from "langchain/agents/format_scratchpad/openai_tools"; -import { convertToOpenAITool } from "@langchain/core/utils/function_calling"; -import { - OpenAIToolsAgentOutputParser, - type ToolsAgentStep, -} from "langchain/agents/openai/output_parser"; -import { - ChatPromptTemplate, - MessagesPlaceholder, -} from "@langchain/core/prompts"; -import { RunnableSequence } from "@langchain/core/runnables"; -import { DynamicStructuredTool } from "@langchain/core/tools"; - -const model = new ChatOpenAI({ - model: "gpt-3.5-turbo-1106", - temperature: 0, -}); - -const weatherTool = new DynamicStructuredTool({ - name: "get_current_weather", - description: "Get the current weather in a given location", - func: async ({ location }) => { - if (location.toLowerCase().includes("tokyo")) { - return JSON.stringify({ location, temperature: "10", unit: "celsius" }); - } else if (location.toLowerCase().includes("san francisco")) { - return JSON.stringify({ - location, - temperature: "72", - unit: "fahrenheit", - }); - } else { - return JSON.stringify({ location, temperature: "22", unit: "celsius" }); - } - }, - schema: z.object({ - location: z.string().describe("The city and state, e.g. San Francisco, CA"), - unit: z.enum(["celsius", "fahrenheit"]), - }), -}); - -const tools = [new Calculator(), weatherTool]; - -// Convert to OpenAI tool format -const modelWithTools = model.bind({ tools: tools.map(convertToOpenAITool) }); - -const prompt = ChatPromptTemplate.fromMessages([ - ["ai", "You are a helpful assistant"], - ["human", "{input}"], - new MessagesPlaceholder("agent_scratchpad"), -]); - -const runnableAgent = RunnableSequence.from([ - { - input: (i: { input: string; steps: ToolsAgentStep[] }) => i.input, - agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) => - formatToOpenAIToolMessages(i.steps), - }, - prompt, - modelWithTools, - new OpenAIToolsAgentOutputParser(), -]).withConfig({ runName: "OpenAIToolsAgent" }); - -const executor = AgentExecutor.fromAgentAndTools({ - agent: runnableAgent, - tools, -}); - -const res = await executor.invoke({ - input: - "What is the sum of the current temperature in San Francisco, New York, and Tokyo?", -}); - -console.log(res); diff --git a/examples/src/agents/structured_chat_runnable.ts b/examples/src/agents/structured_chat_runnable.ts deleted file mode 100644 index aeddfb30e312..000000000000 --- a/examples/src/agents/structured_chat_runnable.ts +++ /dev/null @@ -1,189 +0,0 @@ -import { z } from "zod"; -import { ChatOpenAI } from "@langchain/openai"; -import { - AgentExecutor, - StructuredChatOutputParserWithRetries, -} from "langchain/agents"; -import { Calculator } from "@langchain/community/tools/calculator"; -import { renderTextDescriptionAndArgs } from "langchain/tools/render"; -import { formatLogToString } from "langchain/agents/format_scratchpad/log"; -import { - ChatPromptTemplate, - HumanMessagePromptTemplate, - PromptTemplate, - SystemMessagePromptTemplate, -} from "@langchain/core/prompts"; -import { RunnableSequence } from "@langchain/core/runnables"; -import { AgentStep } from "@langchain/core/agents"; -import { DynamicStructuredTool } from "@langchain/core/tools"; - -/** - * Need: - * memory - * multi input tools - */ - -/** Define the chat model. */ -const model = new ChatOpenAI({ temperature: 0 }).bind({ - stop: ["\nObservation:"], -}); -/** Define your list of tools, including the `DynamicStructuredTool` */ -const tools = [ - new Calculator(), // Older existing single input tools will still work - new DynamicStructuredTool({ - name: "random-number-generator", - description: "generates a random number between two input numbers", - schema: z.object({ - low: z.number().describe("The lower bound of the generated number"), - high: z.number().describe("The upper bound of the generated number"), - }), - func: async ({ low, high }) => - (Math.random() * (high - low) + low).toString(), // Outputs still must be strings - returnDirect: false, // This is an option that allows the tool to return the output directly - }), -]; -const toolNames = tools.map((tool) => tool.name); - -/** - * Create your prompt. - * Here we'll use three prompt strings: prefix, format instructions and suffix. - * With these we'll format the prompt with the tool schemas and names. - */ -const PREFIX = `Answer the following questions truthfully and as best you can.`; -const AGENT_ACTION_FORMAT_INSTRUCTIONS = `Output a JSON markdown code snippet containing a valid JSON blob (denoted below by $JSON_BLOB). -This $JSON_BLOB must have a "action" key (with the name of the tool to use) and an "action_input" key (tool input). - -Valid "action" values: "Final Answer" (which you must use when giving your final response to the user), or one of [{tool_names}]. - -The $JSON_BLOB must be valid, parseable JSON and only contain a SINGLE action. Here is an example of an acceptable output: - -\`\`\`json -{{ - "action": $TOOL_NAME, - "action_input": $INPUT -}} -\`\`\` - -Remember to include the surrounding markdown code snippet delimiters (begin with "\`\`\`" json and close with "\`\`\`")! -`; -const FORMAT_INSTRUCTIONS = `You have access to the following tools. -You must format your inputs to these tools to match their "JSON schema" definitions below. - -"JSON Schema" is a declarative language that allows you to annotate and validate JSON documents. - -For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} -would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings. -Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. - -Here are the JSON Schema instances for the tools you have access to: - -{tool_schemas} - -The way you use the tools is as follows: - ------------------------- - -${AGENT_ACTION_FORMAT_INSTRUCTIONS} - -If you are using a tool, "action_input" must adhere to the tool's input schema, given above. - ------------------------- - -ALWAYS use the following format: - -Question: the input question you must answer -Thought: you should always think about what to do -Action: -\`\`\`json -$JSON_BLOB -\`\`\` -Observation: the result of the action -... (this Thought/Action/Observation can repeat N times) -Thought: I now know the final answer -Action: -\`\`\`json -{{ - "action": "Final Answer", - "action_input": "Final response to human" -}} -\`\`\``; -const SUFFIX = `Begin! Reminder to ALWAYS use the above format, and to use tools if appropriate.`; -const inputVariables = ["input", "agent_scratchpad"]; -const template = [ - PREFIX, - FORMAT_INSTRUCTIONS, - SUFFIX, - `Thoughts: {agent_scratchpad}`, -].join("\n\n"); -const humanMessageTemplate = "{input}"; -const messages = [ - new SystemMessagePromptTemplate( - new PromptTemplate({ - template, - inputVariables, - partialVariables: { - tool_schemas: renderTextDescriptionAndArgs(tools), - tool_names: toolNames.join(", "), - }, - }) - ), - new HumanMessagePromptTemplate( - new PromptTemplate({ - template: humanMessageTemplate, - inputVariables, - }) - ), -]; -const prompt = ChatPromptTemplate.fromMessages(messages); - -/** - * Now we can create our output parser. - * For this, we'll use the pre-built `StructuredChatOutputParserWithRetries` - * - * @important This step is very important and not to be overlooked for one main reason: retries. - * If the agent fails to produce a valid output, it will preform retries to try and coerce the agent - * into producing a valid output. - * - * @important You can not pass in the same model we're using in the executor since it has stop tokens - * bound to it, and the implementation of `StructuredChatOutputParserWithRetries.fromLLM` does not accept - * LLMs of this type. - */ -const outputParser = StructuredChatOutputParserWithRetries.fromLLM( - new ChatOpenAI({ temperature: 0 }), - { - toolNames, - } -); - -/** - * Finally, construct the runnable agent using a - * `RunnableSequence` and pass it to the agent executor - */ -const runnableAgent = RunnableSequence.from([ - { - input: (i: { input: string; steps: AgentStep[] }) => i.input, - agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => - formatLogToString(i.steps), - }, - prompt, - model, - outputParser, -]); - -const executor = AgentExecutor.fromAgentAndTools({ - agent: runnableAgent, - tools, -}); - -console.log("Loaded agent."); - -const input = `What is a random number between 5 and 10 raised to the second power?`; -console.log(`Executing with input "${input}"...`); -const result = await executor.invoke({ input }); -console.log(result); - -/* -Loaded agent. -Executing with input "What is a random number between 5 and 10 raised to the second power?"... -{ output: '67.02412461717323' } -*/ diff --git a/examples/src/models/chat/chat_mistralai_agents.ts b/examples/src/models/chat/chat_mistralai_agents.ts index 1ac89ce6762d..11c69ab44639 100644 --- a/examples/src/models/chat/chat_mistralai_agents.ts +++ b/examples/src/models/chat/chat_mistralai_agents.ts @@ -1,7 +1,7 @@ import { z } from "zod"; import { ChatMistralAI } from "@langchain/mistralai"; -import { DynamicStructuredTool } from "@langchain/core/tools"; +import { tool } from "@langchain/core/tools"; import { AgentExecutor, createToolCallingAgent } from "langchain/agents"; import { ChatPromptTemplate } from "@langchain/core/prompts"; @@ -19,13 +19,13 @@ const prompt = ChatPromptTemplate.fromMessages([ ["placeholder", "{agent_scratchpad}"], ]); -const currentWeatherTool = new DynamicStructuredTool({ +// Mocked tool +const currentWeatherTool = tool(async () => "28 °C", { name: "get_current_weather", description: "Get the current weather in a given location", schema: z.object({ location: z.string().describe("The city and state, e.g. San Francisco, CA"), }), - func: async () => Promise.resolve("28 °C"), }); const agent = await createToolCallingAgent({ diff --git a/examples/src/models/chat/chat_vertexai_agents.ts b/examples/src/models/chat/chat_vertexai_agents.ts index 46fc96fe35de..7bbe9d7a23d2 100644 --- a/examples/src/models/chat/chat_vertexai_agents.ts +++ b/examples/src/models/chat/chat_vertexai_agents.ts @@ -1,6 +1,6 @@ import { z } from "zod"; -import { DynamicStructuredTool } from "@langchain/core/tools"; +import { tool } from "@langchain/core/tools"; import { AgentExecutor, createToolCallingAgent } from "langchain/agents"; import { ChatPromptTemplate } from "@langchain/core/prompts"; @@ -20,13 +20,13 @@ const prompt = ChatPromptTemplate.fromMessages([ ["placeholder", "{agent_scratchpad}"], ]); -const currentWeatherTool = new DynamicStructuredTool({ +// Mocked tool +const currentWeatherTool = tool(async () => "28 °C", { name: "get_current_weather", description: "Get the current weather in a given location", schema: z.object({ location: z.string().describe("The city and state, e.g. San Francisco, CA"), }), - func: async () => Promise.resolve("28 °C"), }); const agent = await createToolCallingAgent({ diff --git a/examples/src/models/chat/integration_anthropic_tool_calling_agent.ts b/examples/src/models/chat/integration_anthropic_tool_calling_agent.ts index 522bba75dcd2..ed95269a33df 100644 --- a/examples/src/models/chat/integration_anthropic_tool_calling_agent.ts +++ b/examples/src/models/chat/integration_anthropic_tool_calling_agent.ts @@ -1,7 +1,7 @@ import { z } from "zod"; import { ChatAnthropic } from "@langchain/anthropic"; -import { DynamicStructuredTool } from "@langchain/core/tools"; +import { tool } from "@langchain/core/tools"; import { AgentExecutor, createToolCallingAgent } from "langchain/agents"; import { ChatPromptTemplate } from "@langchain/core/prompts"; @@ -19,13 +19,12 @@ const prompt = ChatPromptTemplate.fromMessages([ ["placeholder", "{agent_scratchpad}"], ]); -const currentWeatherTool = new DynamicStructuredTool({ +const currentWeatherTool = tool(async () => "28 °C", { name: "get_current_weather", description: "Get the current weather in a given location", schema: z.object({ location: z.string().describe("The city and state, e.g. San Francisco, CA"), }), - func: async () => Promise.resolve("28 °C"), }); const agent = await createToolCallingAgent({ diff --git a/examples/src/use_cases/tool_use/agents.ts b/examples/src/use_cases/tool_use/agents.ts deleted file mode 100644 index 7ff0bf671ba5..000000000000 --- a/examples/src/use_cases/tool_use/agents.ts +++ /dev/null @@ -1,78 +0,0 @@ -/* eslint-disable import/first */ -/* eslint-disable arrow-body-style */ - -import { z } from "zod"; -import { DynamicStructuredTool } from "@langchain/core/tools"; - -const addTool = new DynamicStructuredTool({ - name: "add", - description: "Add two integers together.", - schema: z.object({ - firstInt: z.number(), - secondInt: z.number(), - }), - func: async ({ firstInt, secondInt }) => { - return (firstInt + secondInt).toString(); - }, -}); - -const multiplyTool = new DynamicStructuredTool({ - name: "multiply", - description: "Multiply two integers together.", - schema: z.object({ - firstInt: z.number(), - secondInt: z.number(), - }), - func: async ({ firstInt, secondInt }) => { - return (firstInt * secondInt).toString(); - }, -}); - -const exponentiateTool = new DynamicStructuredTool({ - name: "exponentiate", - description: "Exponentiate the base to the exponent power.", - schema: z.object({ - base: z.number(), - exponent: z.number(), - }), - func: async ({ base, exponent }) => { - return (base ** exponent).toString(); - }, -}); - -const tools = [addTool, multiplyTool, exponentiateTool]; - -import { pull } from "langchain/hub"; -import type { ChatPromptTemplate } from "@langchain/core/prompts"; - -// Get the prompt to use - you can modify this! -// You can also see the full prompt at: -// https://smith.langchain.com/hub/hwchase17/openai-tools-agent -const prompt = await pull("hwchase17/openai-tools-agent"); - -import { ChatOpenAI } from "@langchain/openai"; -import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents"; - -const model = new ChatOpenAI({ - model: "gpt-3.5-turbo-1106", - temperature: 0, -}); - -const agent = await createOpenAIToolsAgent({ - llm: model, - tools, - prompt, -}); - -const agentExecutor = new AgentExecutor({ - agent, - tools, - verbose: true, -}); - -console.log( - await agentExecutor.invoke({ - input: - "Take 3 to the fifth power and multiply that by the sum of twelve and three, then square the whole result", - }) -); diff --git a/examples/src/use_cases/tool_use/multiple_tools.ts b/examples/src/use_cases/tool_use/multiple_tools.ts deleted file mode 100644 index 3b2fd5ab5287..000000000000 --- a/examples/src/use_cases/tool_use/multiple_tools.ts +++ /dev/null @@ -1,96 +0,0 @@ -/* eslint-disable import/first */ -/* eslint-disable arrow-body-style */ - -import { z } from "zod"; -import { DynamicStructuredTool } from "@langchain/core/tools"; - -const addTool = new DynamicStructuredTool({ - name: "add", - description: "Add two integers together.", - schema: z.object({ - firstInt: z.number(), - secondInt: z.number(), - }), - func: async ({ firstInt, secondInt }) => { - return (firstInt + secondInt).toString(); - }, -}); - -const multiplyTool = new DynamicStructuredTool({ - name: "multiply", - description: "Multiply two integers together.", - schema: z.object({ - firstInt: z.number(), - secondInt: z.number(), - }), - func: async ({ firstInt, secondInt }) => { - return (firstInt * secondInt).toString(); - }, -}); - -const exponentiateTool = new DynamicStructuredTool({ - name: "exponentiate", - description: "Exponentiate the base to the exponent power.", - schema: z.object({ - base: z.number(), - exponent: z.number(), - }), - func: async ({ base, exponent }) => { - return (base ** exponent).toString(); - }, -}); - -import { ChatOpenAI } from "@langchain/openai"; -import { convertToOpenAITool } from "@langchain/core/utils/function_calling"; -import { - RunnableLambda, - RunnablePassthrough, - RunnableSequence, -} from "@langchain/core/runnables"; -import { JsonOutputToolsParser } from "@langchain/core/output_parsers/openai_tools"; - -const model = new ChatOpenAI({ - model: "gpt-3.5-turbo-1106", -}); - -const tools = [multiplyTool, exponentiateTool, addTool]; - -const toolMap: Record = { - multiply: multiplyTool, - exponentiate: exponentiateTool, - add: addTool, -}; - -const modelWithTools = model.bind({ - tools: tools.map(convertToOpenAITool), -}); - -const callSelectedTool = RunnableLambda.from( - (toolInvocation: Record) => { - const selectedTool = toolMap[toolInvocation.type]; - if (!selectedTool) { - throw new Error( - `No matching tool available for requested type "${toolInvocation.type}".` - ); - } - const toolCallChain = RunnableSequence.from([ - (toolInvocation) => toolInvocation.args, - selectedTool, - ]); - return RunnablePassthrough.assign({ - output: toolCallChain, - }); - } -); - -const chain = RunnableSequence.from([ - modelWithTools, - new JsonOutputToolsParser(), - callSelectedTool.map(), -]); - -console.log(await chain.invoke("What's 23 times 7")); - -console.log(await chain.invoke("add a million plus a billion")); - -console.log(await chain.invoke("cube thirty-seven")); diff --git a/examples/src/use_cases/tool_use/parallel.ts b/examples/src/use_cases/tool_use/parallel.ts deleted file mode 100644 index c9bd769b9d06..000000000000 --- a/examples/src/use_cases/tool_use/parallel.ts +++ /dev/null @@ -1,101 +0,0 @@ -/* eslint-disable import/first */ -/* eslint-disable arrow-body-style */ - -import { z } from "zod"; -import { DynamicStructuredTool } from "@langchain/core/tools"; - -const addTool = new DynamicStructuredTool({ - name: "add", - description: "Add two integers together.", - schema: z.object({ - firstInt: z.number(), - secondInt: z.number(), - }), - func: async ({ firstInt, secondInt }) => { - return (firstInt + secondInt).toString(); - }, -}); - -const multiplyTool = new DynamicStructuredTool({ - name: "multiply", - description: "Multiply two integers together.", - schema: z.object({ - firstInt: z.number(), - secondInt: z.number(), - }), - func: async ({ firstInt, secondInt }) => { - return (firstInt * secondInt).toString(); - }, -}); - -const exponentiateTool = new DynamicStructuredTool({ - name: "exponentiate", - description: "Exponentiate the base to the exponent power.", - schema: z.object({ - base: z.number(), - exponent: z.number(), - }), - func: async ({ base, exponent }) => { - return (base ** exponent).toString(); - }, -}); - -import { ChatOpenAI } from "@langchain/openai"; -import { convertToOpenAITool } from "@langchain/core/utils/function_calling"; -import { - RunnableLambda, - RunnablePassthrough, - RunnableSequence, -} from "@langchain/core/runnables"; -import { JsonOutputToolsParser } from "@langchain/core/output_parsers/openai_tools"; - -const model = new ChatOpenAI({ - model: "gpt-3.5-turbo-1106", -}); - -const tools = [multiplyTool, exponentiateTool, addTool]; - -const toolMap: Record = { - multiply: multiplyTool, - exponentiate: exponentiateTool, - add: addTool, -}; - -const modelWithTools = model.bind({ - tools: tools.map(convertToOpenAITool), -}); - -// Function for dynamically constructing the end of the chain based on the model-selected tool. -const callSelectedTool = RunnableLambda.from( - (toolInvocation: Record) => { - const selectedTool = toolMap[toolInvocation.type]; - if (!selectedTool) { - throw new Error( - `No matching tool available for requested type "${toolInvocation.type}".` - ); - } - const toolCallChain = RunnableSequence.from([ - (toolInvocation) => toolInvocation.args, - selectedTool, - ]); - // We use `RunnablePassthrough.assign` here to return the intermediate `toolInvocation` params - // as well, but you can omit if you only care about the answer. - return RunnablePassthrough.assign({ - output: toolCallChain, - }); - } -); - -const chain = RunnableSequence.from([ - modelWithTools, - new JsonOutputToolsParser(), - // .map() allows us to apply a function for each item in a list of inputs. - // Required because the model can call multiple tools at once. - callSelectedTool.map(), -]); - -console.log( - await chain.invoke( - "What's 23 times 7, and what's five times 18 and add a million plus a billion and cube thirty-seven" - ) -); diff --git a/examples/src/use_cases/tool_use/quickstart_agents.ts b/examples/src/use_cases/tool_use/quickstart_agents.ts deleted file mode 100644 index a61ace957db8..000000000000 --- a/examples/src/use_cases/tool_use/quickstart_agents.ts +++ /dev/null @@ -1,77 +0,0 @@ -/* eslint-disable import/first */ -/* eslint-disable arrow-body-style */ - -import { pull } from "langchain/hub"; -import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents"; -import { ChatOpenAI } from "@langchain/openai"; -import type { ChatPromptTemplate } from "@langchain/core/prompts"; - -// Get the prompt to use - you can modify this! -// You can also see the full prompt at: -// https://smith.langchain.com/hub/hwchase17/openai-tools-agent -const prompt = await pull("hwchase17/openai-tools-agent"); - -import { z } from "zod"; -import { DynamicStructuredTool } from "@langchain/core/tools"; - -const addTool = new DynamicStructuredTool({ - name: "add", - description: "Add two integers together.", - schema: z.object({ - firstInt: z.number(), - secondInt: z.number(), - }), - func: async ({ firstInt, secondInt }) => { - return (firstInt + secondInt).toString(); - }, -}); - -const multiplyTool = new DynamicStructuredTool({ - name: "multiply", - description: "Multiply two integers together.", - schema: z.object({ - firstInt: z.number(), - secondInt: z.number(), - }), - func: async ({ firstInt, secondInt }) => { - return (firstInt * secondInt).toString(); - }, -}); - -const exponentiateTool = new DynamicStructuredTool({ - name: "exponentiate", - description: "Exponentiate the base to the exponent power.", - schema: z.object({ - base: z.number(), - exponent: z.number(), - }), - func: async ({ base, exponent }) => { - return (base ** exponent).toString(); - }, -}); - -const model = new ChatOpenAI({ - model: "gpt-3.5-turbo-1106", - temperature: 0, -}); - -const tools = [addTool, multiplyTool, exponentiateTool]; - -const agent = await createOpenAIToolsAgent({ - llm: model, - tools, - prompt, -}); - -const agentExecutor = new AgentExecutor({ - agent, - tools, - verbose: true, -}); - -console.log( - await agentExecutor.invoke({ - input: - "Take 3 to the fifth power and multiply that by the sum of twelve and three, then square the whole result", - }) -); diff --git a/examples/src/use_cases/tool_use/quickstart_chains.ts b/examples/src/use_cases/tool_use/quickstart_chains.ts deleted file mode 100644 index 1381ce58e481..000000000000 --- a/examples/src/use_cases/tool_use/quickstart_chains.ts +++ /dev/null @@ -1,62 +0,0 @@ -/* eslint-disable import/first */ -/* eslint-disable arrow-body-style */ - -import { z } from "zod"; -import { DynamicStructuredTool } from "@langchain/core/tools"; -import { RunnableSequence } from "@langchain/core/runnables"; -import { - JsonOutputToolsParser, - JsonOutputKeyToolsParser, -} from "@langchain/core/output_parsers/openai_tools"; - -const multiplyTool = new DynamicStructuredTool({ - name: "multiply", - description: "Multiply two integers together.", - schema: z.object({ - firstInt: z.number(), - secondInt: z.number(), - }), - func: async ({ firstInt, secondInt }) => { - return (firstInt * secondInt).toString(); - }, -}); - -console.log(await multiplyTool.invoke({ firstInt: 4, secondInt: 5 })); - -import { ChatOpenAI } from "@langchain/openai"; - -const model = new ChatOpenAI({ - model: "gpt-3.5-turbo-1106", -}); - -import { convertToOpenAITool } from "@langchain/core/utils/function_calling"; - -const formattedTools = [convertToOpenAITool(multiplyTool)]; - -console.log(JSON.stringify(formattedTools, null, 2)); - -const modelWithTools = model.bind({ - tools: formattedTools, - // We specify tool_choice to enforce that the 'multiply' function is called by the model. - tool_choice: { - type: "function", - function: { name: "multiply" }, - }, -}); -const chain = modelWithTools.pipe(new JsonOutputToolsParser()); - -console.log(await chain.invoke("What's 4 times 23?")); - -const chain2 = modelWithTools.pipe( - new JsonOutputKeyToolsParser({ keyName: "multiply", returnSingle: true }) -); - -console.log(await chain2.invoke("What's 4 times 23?")); - -const chain3 = RunnableSequence.from([ - modelWithTools, - new JsonOutputKeyToolsParser({ keyName: "multiply", returnSingle: true }), - multiplyTool, -]); - -console.log(await chain3.invoke("What's 4 times 23?")); diff --git a/examples/src/use_cases/tool_use/tool_error_handling_fallbacks.ts b/examples/src/use_cases/tool_use/tool_error_handling_fallbacks.ts deleted file mode 100644 index bd7d09c191f5..000000000000 --- a/examples/src/use_cases/tool_use/tool_error_handling_fallbacks.ts +++ /dev/null @@ -1,74 +0,0 @@ -/* eslint-disable import/first */ -import { z } from "zod"; -import { DynamicStructuredTool } from "@langchain/core/tools"; - -const complexTool = new DynamicStructuredTool({ - name: "complex_tool", - description: "Do something complex with a complex tool.", - schema: z.object({ - intArg: z.number(), - floatArg2: z.number(), - dictArg: z.object({}), - }), - func: async ({ intArg, floatArg2, dictArg }) => { - // Unused for demo purposes - console.log(dictArg); - return (intArg * floatArg2).toString(); - }, -}); - -import { ChatOpenAI } from "@langchain/openai"; - -const model = new ChatOpenAI({ - model: "gpt-3.5-turbo-1106", - temperature: 0, -}); - -import { convertToOpenAITool } from "@langchain/core/utils/function_calling"; - -const formattedTools = [convertToOpenAITool(complexTool)]; - -const modelWithTools = model.bind({ - tools: formattedTools, - // We specify tool_choice to enforce that the 'multiply' function is called by the model. - tool_choice: { - type: "function", - function: { name: "complex_tool" }, - }, -}); -import { RunnableSequence } from "@langchain/core/runnables"; -import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools"; - -const chain = RunnableSequence.from([ - modelWithTools, - new JsonOutputKeyToolsParser({ keyName: "complex_tool", returnSingle: true }), - complexTool, -]); - -const betterModel = new ChatOpenAI({ - model: "gpt-4-1106-preview", - temperature: 0, -}).bind({ - tools: formattedTools, - // We specify tool_choice to enforce that the 'multiply' function is called by the model. - tool_choice: { - type: "function", - function: { name: "complex_tool" }, - }, -}); - -const betterChain = RunnableSequence.from([ - betterModel, - new JsonOutputKeyToolsParser({ keyName: "complex_tool", returnSingle: true }), - complexTool, -]); - -const chainWithFallback = chain.withFallbacks({ - fallbacks: [betterChain], -}); - -console.log( - await chainWithFallback.invoke( - "use complex tool. the args are 5, 2.1, potato." - ) -); diff --git a/examples/src/use_cases/tool_use/tool_error_handling_intro.ts b/examples/src/use_cases/tool_use/tool_error_handling_intro.ts deleted file mode 100644 index 68525c5c1695..000000000000 --- a/examples/src/use_cases/tool_use/tool_error_handling_intro.ts +++ /dev/null @@ -1,50 +0,0 @@ -/* eslint-disable import/first */ -import { z } from "zod"; -import { DynamicStructuredTool } from "@langchain/core/tools"; - -const complexTool = new DynamicStructuredTool({ - name: "complex_tool", - description: "Do something complex with a complex tool.", - schema: z.object({ - intArg: z.number(), - floatArg: z.number(), - dictArg: z.object({}), - }), - func: async ({ intArg, floatArg, dictArg }) => { - // Unused for demo purposes - console.log(dictArg); - return (intArg * floatArg).toString(); - }, -}); - -import { ChatOpenAI } from "@langchain/openai"; - -const model = new ChatOpenAI({ - model: "gpt-3.5-turbo", - temperature: 0, -}); - -import { convertToOpenAITool } from "@langchain/core/utils/function_calling"; - -const formattedTools = [convertToOpenAITool(complexTool)]; - -const modelWithTools = model.bind({ - tools: formattedTools, - // We specify tool_choice to enforce that the 'multiply' function is called by the model. - tool_choice: { - type: "function", - function: { name: "complex_tool" }, - }, -}); -import { RunnableSequence } from "@langchain/core/runnables"; -import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools"; - -const chain = RunnableSequence.from([ - modelWithTools, - new JsonOutputKeyToolsParser({ keyName: "complex_tool", returnSingle: true }), - complexTool, -]); - -console.log( - await chain.invoke("use complex tool. the args are 5, 2.1, potato.") -);