diff --git a/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx b/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx
index 2f296b4e7e61..d6f13289ee31 100644
--- a/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx
+++ b/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx
@@ -1,5 +1,6 @@
---
hide_table_of_contents: true
+sidebar_class_name: hidden
---
import CodeBlock from "@theme/CodeBlock";
@@ -7,6 +8,10 @@ import Example from "@examples/agents/aiplugin-tool.ts";
# ChatGPT Plugins
+:::warning
+OpenAI has [deprecated plugins](https://openai.com/index/chatgpt-plugins/).
+:::
+
This example shows how to use ChatGPT Plugins within LangChain abstractions.
Note 1: This currently only works for plugins with no auth.
diff --git a/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb b/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb
index 939a9471be48..e0053c4cd9b1 100644
--- a/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb
+++ b/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb
@@ -29,9 +29,9 @@
"\n",
"### Integration details\n",
"\n",
- "| Class | Package | Serializable | [PY support](https://python.langchain.com/docs/integrations/tools/ddg/) | Package latest |\n",
+ "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/tools/ddg/) | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: |\n",
- "| [DuckDuckGoSearch](https://api.js.langchain.com/classes/langchain_community_tools_duckduckgo_search.DuckDuckGoSearch.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_tools_duckduckgo_search.html) | ❌ | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n",
+ "| [DuckDuckGoSearch](https://api.js.langchain.com/classes/langchain_community_tools_duckduckgo_search.DuckDuckGoSearch.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n",
"\n",
"## Setup\n",
"\n",
@@ -65,7 +65,7 @@
"source": [
"## Instantiation\n",
"\n",
- "Here we show how to insatiate an instance of the `DuckDuckGoSearch` tool, with "
+ "You can instantiate an instance of the `DuckDuckGoSearch` tool like this:"
]
},
{
@@ -100,12 +100,12 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "[{\"title\":\"Anthropic forecasts more than $850 mln in annualized revenue rate by ...\",\"link\":\"https://www.reuters.com/technology/anthropic-forecasts-more-than-850-mln-annualized-revenue-rate-by-2024-end-report-2023-12-26/\",\"snippet\":\"Dec 26 (Reuters) - Artificial intelligence startup Anthropic has projected it will generate more than $850 million in annualized revenue by the end of 2024, the Information reported on Tuesday ...\"}]\n"
+ "[{\"title\":\"San Francisco, CA Current Weather | AccuWeather\",\"link\":\"https://www.accuweather.com/en/us/san-francisco/94103/current-weather/347629\",\"snippet\":\"Current weather in San Francisco, CA. Check current conditions in San Francisco, CA with radar, hourly, and more.\"}]\n"
]
}
],
"source": [
- "await tool.invoke(\"What is Anthropic's estimated revenue for 2024?\")"
+ "await tool.invoke(\"what is the current weather in sf?\")"
]
},
{
@@ -129,7 +129,7 @@
"output_type": "stream",
"text": [
"ToolMessage {\n",
- " \"content\": \"[{\\\"title\\\":\\\"Anthropic forecasts more than $850 mln in annualized revenue rate by ...\\\",\\\"link\\\":\\\"https://www.reuters.com/technology/anthropic-forecasts-more-than-850-mln-annualized-revenue-rate-by-2024-end-report-2023-12-26/\\\",\\\"snippet\\\":\\\"Dec 26 (Reuters) - Artificial intelligence startup Anthropic has projected it will generate more than $850 million in annualized revenue by the end of 2024, the Information reported on Tuesday ...\\\"}]\",\n",
+ " \"content\": \"[{\\\"title\\\":\\\"San Francisco, CA Weather Conditions | Weather Underground\\\",\\\"link\\\":\\\"https://www.wunderground.com/weather/us/ca/san-francisco\\\",\\\"snippet\\\":\\\"San Francisco Weather Forecasts. Weather Underground provides local & long-range weather forecasts, weatherreports, maps & tropical weather conditions for the San Francisco area.\\\"}]\",\n",
" \"name\": \"duckduckgo-search\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {},\n",
@@ -142,7 +142,7 @@
"// This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
"const modelGeneratedToolCall = {\n",
" args: {\n",
- " input: \"What is Anthropic's estimated revenue for 2024?\"\n",
+ " input: \"what is the current weather in sf?\"\n",
" },\n",
" id: \"tool_call_id\",\n",
" name: tool.name,\n",
@@ -185,43 +185,45 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 10,
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
"metadata": {},
"outputs": [],
"source": [
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n",
- "import { RunnableConfig } from \"@langchain/core/runnables\"\n",
- "import { AIMessage } from \"@langchain/core/messages\"\n",
+ "import { HumanMessage } from \"@langchain/core/messages\";\n",
+ "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
+ "import { RunnableLambda } from \"@langchain/core/runnables\";\n",
"\n",
"const prompt = ChatPromptTemplate.fromMessages(\n",
" [\n",
" [\"system\", \"You are a helpful assistant.\"],\n",
- " [\"human\", \"{user_input}\"],\n",
" [\"placeholder\", \"{messages}\"],\n",
" ]\n",
")\n",
"\n",
- "// specifying tool_choice will force the model to call this tool.\n",
- "const llmWithTools = llm.bindTools([tool], {\n",
- " tool_choice: tool.name\n",
- "})\n",
+ "const llmWithTools = llm.bindTools([tool]);\n",
"\n",
- "const llmChain = prompt.pipe(llmWithTools);\n",
+ "const chain = prompt.pipe(llmWithTools);\n",
"\n",
- "const toolChain = async (userInput: string, config?: RunnableConfig): Promise => {\n",
- " const input_ = { user_input: userInput };\n",
- " const aiMsg = await llmChain.invoke(input_, config);\n",
- " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n",
- " return llmChain.invoke({ ...input_, messages: [aiMsg, ...toolMsgs] }, config);\n",
- "};\n",
+ "const toolChain = RunnableLambda.from(\n",
+ " async (userInput: string, config) => {\n",
+ " const humanMessage = new HumanMessage(userInput,);\n",
+ " const aiMsg = await chain.invoke({\n",
+ " messages: [new HumanMessage(userInput)],\n",
+ " }, config);\n",
+ " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n",
+ " return chain.invoke({\n",
+ " messages: [humanMessage, aiMsg, ...toolMsgs],\n",
+ " }, config);\n",
+ " }\n",
+ ");\n",
"\n",
- "const toolChainResult = await toolChain(\"What is Anthropic's estimated revenue for 2024?\");"
+ "const toolChainResult = await toolChain.invoke(\"how many people have climbed mount everest?\");"
]
},
{
"cell_type": "code",
- "execution_count": 13,
+ "execution_count": 11,
"id": "28448fe2",
"metadata": {},
"outputs": [
@@ -230,17 +232,8 @@
"output_type": "stream",
"text": [
"AIMessage {\n",
- " \"tool_calls\": [\n",
- " {\n",
- " \"name\": \"duckduckgo-search\",\n",
- " \"args\": {\n",
- " \"input\": \"Anthropic revenue 2024 forecast\"\n",
- " },\n",
- " \"type\": \"tool_call\",\n",
- " \"id\": \"call_E22L1T1bI6xPrMtL8wrKW5C5\"\n",
- " }\n",
- " ],\n",
- " \"content\": \"\"\n",
+ " \"tool_calls\": [],\n",
+ " \"content\": \"As of December 2023, a total of 6,664 different people have reached the summit of Mount Everest.\"\n",
"}\n"
]
}
@@ -250,81 +243,18 @@
"\n",
"console.log(\"AIMessage\", JSON.stringify({\n",
" tool_calls,\n",
- " content\n",
- "}, null, 2))"
+ " content,\n",
+ "}, null, 2));"
]
},
{
"cell_type": "markdown",
- "id": "71ceafa7",
- "metadata": {},
- "source": [
- "## With an agent\n",
- "\n",
- "We can also pass the `DuckDuckGoSearch` tool to an agent. First, ensure you have the LangGraph package installed.\n",
- "\n",
- "```{=mdx}\n",
- "\n",
- " @langchain/langgraph\n",
- "\n",
- "```"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "77a05a61",
- "metadata": {},
- "outputs": [],
- "source": [
- "import { DuckDuckGoSearch } from \"@langchain/community/tools/duckduckgo_search\";\n",
- "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n",
- "\n",
- "// Define the tools the agent will have access to.\n",
- "const toolsForAgent = [new DuckDuckGoSearch({ maxResults: 1 })];\n",
- "\n",
- "const agentExecutor = createReactAgent({ llm, tools: toolsForAgent });"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "id": "3130e547",
+ "id": "570f4662",
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[\n",
- " {\n",
- " name: 'duckduckgo-search',\n",
- " args: { input: 'Anthropic estimated revenue 2024' },\n",
- " type: 'tool_call',\n",
- " id: 'call_eZwbyemMAu8tgQw4VqJs65hF'\n",
- " }\n",
- "]\n",
- "[{\"title\":\"Anthropic forecasts more than $850 mln in annualized revenue rate by ...\",\"link\":\"https://www.reuters.com/technology/anthropic-forecasts-more-than-850-mln-annualized-revenue-rate-by-2024-end-report-2023-12-26/\",\"snippet\":\"Dec 26 (Reuters) - Artificial intelligence startup Anthropic has projected it will generate more than $850 million in annualized revenue by the end of 2024, the Information reported on Tuesday ...\"}]\n",
- "Anthropic is projected to generate more than $850 million in annualized revenue by the end of 2024.\n"
- ]
- }
- ],
"source": [
- "const exampleQuery = \"What is Anthropic's estimated revenue for 2024?\"\n",
- "\n",
- "const events = await agentExecutor.stream(\n",
- " { messages: [[\"user\", exampleQuery]]},\n",
- " { streamMode: \"values\", }\n",
- ")\n",
+ "## Agents\n",
"\n",
- "for await (const event of events) {\n",
- " const lastMsg = event.messages[event.messages.length - 1];\n",
- " if (lastMsg.tool_calls?.length) {\n",
- " console.dir(lastMsg.tool_calls, { depth: null });\n",
- " } else if (lastMsg.content) {\n",
- " console.log(lastMsg.content);\n",
- " }\n",
- "}"
+ "For guides on how to use LangChain tools in agents, see the [LangGraph.js](https://langchain-ai.github.io/langgraphjs/) docs."
]
},
{
diff --git a/docs/core_docs/docs/integrations/tools/tavily_search.ipynb b/docs/core_docs/docs/integrations/tools/tavily_search.ipynb
new file mode 100644
index 000000000000..0f00521cddc8
--- /dev/null
+++ b/docs/core_docs/docs/integrations/tools/tavily_search.ipynb
@@ -0,0 +1,306 @@
+{
+ "cells": [
+ {
+ "cell_type": "raw",
+ "id": "10238e62-3465-4973-9279-606cbb7ccf16",
+ "metadata": {
+ "vscode": {
+ "languageId": "raw"
+ }
+ },
+ "source": [
+ "---\n",
+ "sidebar_label: Tavily Search\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a6f91f20",
+ "metadata": {},
+ "source": [
+ "# TavilySearchResults\n",
+ "\n",
+ "[Tavily](https://tavily.com/) Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.\n",
+ "\n",
+ "This guide provides a quick overview for getting started with the Tavily search results [tool](/docs/integrations/tools/). For detailed documentation of all `TavilySearchResults` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_tools_tavily_search.TavilySearchResults.html).\n",
+ "\n",
+ "## Overview\n",
+ "\n",
+ "### Integration details\n",
+ "\n",
+ "| Class | Package | [PY support](https://python.langchain.com/v0.2/docs/integrations/tools/tavily_search/) | Package latest |\n",
+ "| :--- | :--- | :---: | :---: | :---: |\n",
+ "| [TavilySearchResults](https://api.js.langchain.com/classes/langchain_community_tools_tavily_search.TavilySearchResults.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/__package_name__?style=flat-square&label=%20&) |\n",
+ "\n",
+ "## Setup\n",
+ "\n",
+ "The integration lives in the `@langchain/community` package, which you can install as shown below:\n",
+ "\n",
+ "```{=mdx}\n",
+ "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n",
+ "import Npm2Yarn from \"@theme/Npm2Yarn\";\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " @langchain/community\n",
+ "\n",
+ "```\n",
+ "\n",
+ "### Credentials\n",
+ "\n",
+ "Set up an API key [here](https://app.tavily.com) and set it as an environment variable named `TAVILY_API_KEY`.\n",
+ "\n",
+ "```typescript\n",
+ "process.env.TAVILY_API_KEY = \"YOUR_API_KEY\"\n",
+ "```\n",
+ "\n",
+ "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n",
+ "\n",
+ "```typescript\n",
+ "process.env.LANGCHAIN_TRACING_V2=\"true\"\n",
+ "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
+ "metadata": {},
+ "source": [
+ "## Instantiation\n",
+ "\n",
+ "You can import and instantiate an instance of the `TavilySearchResults` tool like this:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import { TavilySearchResults } from \"@langchain/community/tools/tavily_search\";\n",
+ "\n",
+ "const tool = new TavilySearchResults({\n",
+ " maxResults: 2,\n",
+ " // ...\n",
+ "});"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "74147a1a",
+ "metadata": {},
+ "source": [
+ "## Invocation\n",
+ "\n",
+ "### [Invoke directly with args](/docs/concepts/#invoke-with-just-the-arguments)\n",
+ "\n",
+ "You can invoke the tool directly like this:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[{\"title\":\"San Francisco, CA Current Weather | AccuWeather\",\"url\":\"https://www.accuweather.com/en/us/san-francisco/94103/current-weather/347629\",\"content\":\"Current weather in San Francisco, CA. Check current conditions in San Francisco, CA with radar, hourly, and more.\",\"score\":0.9428234,\"raw_content\":null},{\"title\":\"National Weather Service\",\"url\":\"https://forecast.weather.gov/zipcity.php?inputstring=San+Francisco,CA\",\"content\":\"NOAA National Weather Service. Current conditions at SAN FRANCISCO DOWNTOWN (SFOC1) Lat: 37.77056°NLon: 122.42694°WElev: 150.0ft.\",\"score\":0.94261247,\"raw_content\":null}]\n"
+ ]
+ }
+ ],
+ "source": [
+ "await tool.invoke({\n",
+ " input: \"what is the current weather in SF?\"\n",
+ "});"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d6e73897",
+ "metadata": {},
+ "source": [
+ "### [Invoke with ToolCall](/docs/concepts/#invoke-with-toolcall)\n",
+ "\n",
+ "We can also invoke the tool with a model-generated `ToolCall`, in which case a `ToolMessage` will be returned:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "f90e33a7",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "ToolMessage {\n",
+ " \"content\": \"[{\\\"title\\\":\\\"Weather in San Francisco\\\",\\\"url\\\":\\\"https://www.weatherapi.com/\\\",\\\"content\\\":\\\"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1722967498, 'localtime': '2024-08-06 11:04'}, 'current': {'last_updated_epoch': 1722967200, 'last_updated': '2024-08-06 11:00', 'temp_c': 18.4, 'temp_f': 65.2, 'is_day': 1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png', 'code': 1000}, 'wind_mph': 2.9, 'wind_kph': 4.7, 'wind_degree': 275, 'wind_dir': 'W', 'pressure_mb': 1015.0, 'pressure_in': 29.97, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 64, 'cloud': 2, 'feelslike_c': 18.5, 'feelslike_f': 65.2, 'windchill_c': 18.5, 'windchill_f': 65.2, 'heatindex_c': 18.4, 'heatindex_f': 65.2, 'dewpoint_c': 11.7, 'dewpoint_f': 53.1, 'vis_km': 10.0, 'vis_miles': 6.0, 'uv': 5.0, 'gust_mph': 4.3, 'gust_kph': 7.0}}\\\",\\\"score\\\":0.9983156,\\\"raw_content\\\":null},{\\\"title\\\":\\\"Weather in San Francisco in June 2024 - Detailed Forecast\\\",\\\"url\\\":\\\"https://www.easeweather.com/north-america/united-states/california/city-and-county-of-san-francisco/san-francisco/june\\\",\\\"content\\\":\\\"Until now, June 2024 in San Francisco is slightly cooler than the historical average by -0.6 ° C.. The forecast for June 2024 in San Francisco predicts the temperature to closely align with the historical average at 17.7 ° C. 17.7 ° C.\\\",\\\"score\\\":0.9905143,\\\"raw_content\\\":null}]\",\n",
+ " \"name\": \"tavily_search_results_json\",\n",
+ " \"additional_kwargs\": {},\n",
+ " \"response_metadata\": {},\n",
+ " \"tool_call_id\": \"1\"\n",
+ "}\n"
+ ]
+ }
+ ],
+ "source": [
+ "// This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
+ "const modelGeneratedToolCall = {\n",
+ " args: {\n",
+ " input: \"what is the current weather in SF?\"\n",
+ " },\n",
+ " id: \"1\",\n",
+ " name: tool.name,\n",
+ " type: \"tool_call\",\n",
+ "}\n",
+ "\n",
+ "await tool.invoke(modelGeneratedToolCall)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
+ "metadata": {},
+ "source": [
+ "## Chaining\n",
+ "\n",
+ "We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n",
+ "\n",
+ "```{=mdx}\n",
+ "import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
+ "\n",
+ "\n",
+ "```\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "// @lc-docs-hide-cell\n",
+ "\n",
+ "import { ChatOpenAI } from \"@langchain/openai\"\n",
+ "\n",
+ "const llm = new ChatOpenAI({\n",
+ " model: \"gpt-4o\",\n",
+ " temperature: 0,\n",
+ "})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import { HumanMessage } from \"@langchain/core/messages\";\n",
+ "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
+ "import { RunnableLambda } from \"@langchain/core/runnables\";\n",
+ "\n",
+ "const prompt = ChatPromptTemplate.fromMessages(\n",
+ " [\n",
+ " [\"system\", \"You are a helpful assistant.\"],\n",
+ " [\"placeholder\", \"{messages}\"],\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "const llmWithTools = llm.bindTools([tool]);\n",
+ "\n",
+ "const chain = prompt.pipe(llmWithTools);\n",
+ "\n",
+ "const toolChain = RunnableLambda.from(\n",
+ " async (userInput: string, config) => {\n",
+ " const humanMessage = new HumanMessage(userInput,);\n",
+ " const aiMsg = await chain.invoke({\n",
+ " messages: [new HumanMessage(userInput)],\n",
+ " }, config);\n",
+ " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n",
+ " return chain.invoke({\n",
+ " messages: [humanMessage, aiMsg, ...toolMsgs],\n",
+ " }, config);\n",
+ " }\n",
+ ");\n",
+ "\n",
+ "const toolChainResult = await toolChain.invoke(\"what is the current weather in sf?\");"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "9ac188a2",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "AIMessage {\n",
+ " \"tool_calls\": [],\n",
+ " \"content\": \"The current weather in San Francisco is as follows:\\n\\n- **Condition:** Sunny\\n- **Temperature:** 18.4°C (65.2°F)\\n- **Wind:** 2.9 mph (4.7 kph) from the west\\n- **Humidity:** 64%\\n- **Visibility:** 10 km (6 miles)\\n- **UV Index:** 5\\n\\n![Sunny](//cdn.weatherapi.com/weather/64x64/day/113.png)\\n\\nFor more detailed information, you can visit [WeatherAPI](https://www.weatherapi.com/).\"\n",
+ "}\n"
+ ]
+ }
+ ],
+ "source": [
+ "const { tool_calls, content } = toolChainResult;\n",
+ "\n",
+ "console.log(\"AIMessage\", JSON.stringify({\n",
+ " tool_calls,\n",
+ " content,\n",
+ "}, null, 2));"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "573fb391",
+ "metadata": {},
+ "source": [
+ "## Agents\n",
+ "\n",
+ "For guides on how to use LangChain tools in agents, see the [LangGraph.js](https://langchain-ai.github.io/langgraphjs/) docs."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4ac8146c",
+ "metadata": {},
+ "source": [
+ "## API reference\n",
+ "\n",
+ "For detailed documentation of all `TavilySearchResults` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_tools_tavily_search.TavilySearchResults.html"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "TypeScript",
+ "language": "typescript",
+ "name": "tslab"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "mode": "typescript",
+ "name": "javascript",
+ "typescript": true
+ },
+ "file_extension": ".ts",
+ "mimetype": "text/typescript",
+ "name": "typescript",
+ "version": "3.7.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/core_docs/docs/integrations/tools/tavily_search.mdx b/docs/core_docs/docs/integrations/tools/tavily_search.mdx
deleted file mode 100644
index c61aaf953057..000000000000
--- a/docs/core_docs/docs/integrations/tools/tavily_search.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
----
-hide_table_of_contents: true
----
-
-import CodeBlock from "@theme/CodeBlock";
-
-# Tavily Search
-
-Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.
-
-## Setup
-
-Set up an API key [here](https://app.tavily.com) and set it as an environment variable named `TAVILY_API_KEY`.
-
-You'll also need to install the `@langchain/community` package:
-
-import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
-
-
-
-```bash npm2yarn
-npm install @langchain/openai @langchain/community
-```
-
-## Usage
-
-import ToolExample from "@examples/tools/tavily_search.ts";
-
-{ToolExample}
-
-## Related
-
-- Tool [conceptual guide](/docs/concepts/#tools)
-- Tool [how-to guides](/docs/how_to/#tools)
diff --git a/libs/langchain-scripts/src/cli/docs/templates/tools.ipynb b/libs/langchain-scripts/src/cli/docs/templates/tools.ipynb
index e51469f89fc9..e43a446f0909 100644
--- a/libs/langchain-scripts/src/cli/docs/templates/tools.ipynb
+++ b/libs/langchain-scripts/src/cli/docs/templates/tools.ipynb
@@ -33,15 +33,14 @@
"\n",
"- TODO: Make sure links and features are correct\n",
"\n",
- "| Class | Package | Serializable | [PY support](__python_doc_url__) | Package latest |\n",
+ "| Class | Package | [PY support](__python_doc_url__) | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: |\n",
- "| [__module_name__](__api_ref_module__) | [__package_name__](__api_ref_package__) | __serializable__ | __py_support__ | ![NPM - Version](https://img.shields.io/npm/v/__package_name__?style=flat-square&label=%20&) |\n",
+ "| [__module_name__](__api_ref_module__) | [__package_name__](__api_ref_package__) | __py_support__ | ![NPM - Version](https://img.shields.io/npm/v/__package_name__?style=flat-square&label=%20&) |\n",
"\n",
"### Tool features\n",
"\n",
"- TODO: Add feature table if it makes sense\n",
"\n",
- "\n",
"## Setup\n",
"\n",
"- TODO: Add any additional deps\n",
@@ -84,7 +83,7 @@
"\n",
"- TODO: Fill in instantiation params\n",
"\n",
- "Here we show how to insatiate an instance of the `__module_name__` tool, with "
+ "You can import and instantiate an instance of the `__module_name__` tool like this:"
]
},
{
@@ -214,33 +213,35 @@
},
"outputs": [],
"source": [
- "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n",
- "import { RunnableConfig } from \"@langchain/core/runnables\"\n",
- "import { AIMessage } from \"@langchain/core/messages\"\n",
+ "import { HumanMessage } from \"@langchain/core/messages\";\n",
+ "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
+ "import { RunnableLambda } from \"@langchain/core/runnables\";\n",
"\n",
"const prompt = ChatPromptTemplate.fromMessages(\n",
" [\n",
" [\"system\", \"You are a helpful assistant.\"],\n",
- " [\"human\", \"{user_input}\"],\n",
" [\"placeholder\", \"{messages}\"],\n",
" ]\n",
")\n",
"\n",
- "// specifying tool_choice will force the model to call this tool.\n",
- "const llmWithTools = llm.bindTools([tool], {\n",
- " tool_choice: tool.name\n",
- "})\n",
+ "const llmWithTools = llm.bindTools([tool]);\n",
"\n",
- "const llmChain = prompt.pipe(llmWithTools);\n",
+ "const chain = prompt.pipe(llmWithTools);\n",
"\n",
- "const toolChain = async (userInput: string, config?: RunnableConfig): Promise => {\n",
- " const input_ = { user_input: userInput };\n",
- " const aiMsg = await llmChain.invoke(input_, config);\n",
- " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n",
- " return llmChain.invoke({ ...input_, messages: [aiMsg, ...toolMsgs] }, config);\n",
- "};\n",
+ "const toolChain = RunnableLambda.from(\n",
+ " async (userInput: string, config) => {\n",
+ " const humanMessage = new HumanMessage(userInput,);\n",
+ " const aiMsg = await chain.invoke({\n",
+ " messages: [new HumanMessage(userInput)],\n",
+ " }, config);\n",
+ " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n",
+ " return chain.invoke({\n",
+ " messages: [humanMessage, aiMsg, ...toolMsgs],\n",
+ " }, config);\n",
+ " }\n",
+ ");\n",
"\n",
- "const toolChainResult = await toolChain(\"What is Anthropic's estimated revenue for 2024?\");"
+ "const toolChainResult = await toolChain.invoke(\"what is the current weather in sf?\");"
]
},
{
@@ -258,8 +259,18 @@
"\n",
"console.log(\"AIMessage\", JSON.stringify({\n",
" tool_calls,\n",
- " content\n",
- "}, null, 2))"
+ " content,\n",
+ "}, null, 2));"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "93848b02",
+ "metadata": {},
+ "source": [
+ "## Agents\n",
+ "\n",
+ "For guides on how to use LangChain tools in agents, see the [LangGraph.js](https://langchain-ai.github.io/langgraphjs/) docs."
]
},
{