From 86ac8b7bc2c60a55dedab20a28cea132509d930e Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Tue, 25 Jul 2023 14:52:42 -0700 Subject: [PATCH 01/15] Delete Examples --- .../examples/agent_vectordb_sota_pg.ipynb | 511 --------- .../data_augmented_question_answering.ipynb | 554 +++++----- .../evaluation/examples/openapi_eval.ipynb | 975 ------------------ .../examples/qa_benchmarking_pg.ipynb | 372 ------- .../examples/qa_benchmarking_sota.ipynb | 385 ------- .../evaluation/examples/qa_generation.ipynb | 118 --- .../examples/question_answering.ipynb | 355 ++----- .../sql_qa_benchmarking_chinook.ipynb | 428 -------- .../examples/state_of_the_union.txt | 723 +++++++++++++ .../guides/evaluation/string/Untitled.ipynb | 318 ++++++ .../modules/chains/foundational/llm_chain.mdx | 2 +- .../langchain/evaluation/qa/generate_chain.py | 10 + .../evaluation/qa/generate_prompt.py | 5 +- 13 files changed, 1415 insertions(+), 3341 deletions(-) delete mode 100644 docs/extras/guides/evaluation/examples/agent_vectordb_sota_pg.ipynb delete mode 100644 docs/extras/guides/evaluation/examples/openapi_eval.ipynb delete mode 100644 docs/extras/guides/evaluation/examples/qa_benchmarking_pg.ipynb delete mode 100644 docs/extras/guides/evaluation/examples/qa_benchmarking_sota.ipynb delete mode 100644 docs/extras/guides/evaluation/examples/qa_generation.ipynb delete mode 100644 docs/extras/guides/evaluation/examples/sql_qa_benchmarking_chinook.ipynb create mode 100644 docs/extras/guides/evaluation/examples/state_of_the_union.txt create mode 100644 docs/extras/guides/evaluation/string/Untitled.ipynb diff --git a/docs/extras/guides/evaluation/examples/agent_vectordb_sota_pg.ipynb b/docs/extras/guides/evaluation/examples/agent_vectordb_sota_pg.ipynb deleted file mode 100644 index ca812019043a7..0000000000000 --- a/docs/extras/guides/evaluation/examples/agent_vectordb_sota_pg.ipynb +++ /dev/null @@ -1,511 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "984169ca", - "metadata": {}, - "source": [ - "# Agent VectorDB Question Answering Benchmarking\n", - "\n", - "Here we go over how to benchmark performance on a question answering task using an agent to route between multiple vectordatabases.\n", - "\n", - "It is highly recommended that you do any evaluation/benchmarking with tracing enabled. See [here](https://python.langchain.com/guides/tracing/) for an explanation of what tracing is and how to set it up." - ] - }, - { - "cell_type": "markdown", - "id": "8a16b75d", - "metadata": {}, - "source": [ - "## Loading the data\n", - "First, let's load the data." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "5b2d5e98", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Found cached dataset json (/Users/qt/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--agent-vectordb-qa-sota-pg-d3ae24016b514f92/0.0.0/fe5dd6ea2639a6df622901539cb550cf8797e5a6b2dd7af1cf934bed8e233e6e)\n", - "100%|██████████| 1/1 [00:00<00:00, 414.42it/s]\n" - ] - } - ], - "source": [ - "from langchain.evaluation.loading import load_dataset\n", - "\n", - "dataset = load_dataset(\"agent-vectordb-qa-sota-pg\")" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "61375342", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'question': 'What is the purpose of the NATO Alliance?',\n", - " 'answer': 'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.',\n", - " 'steps': [{'tool': 'State of Union QA System', 'tool_input': None},\n", - " {'tool': None, 'tool_input': 'What is the purpose of the NATO Alliance?'}]}" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dataset[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "02500304", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'question': 'What is the purpose of YC?',\n", - " 'answer': 'The purpose of YC is to cause startups to be founded that would not otherwise have existed.',\n", - " 'steps': [{'tool': 'Paul Graham QA System', 'tool_input': None},\n", - " {'tool': None, 'tool_input': 'What is the purpose of YC?'}]}" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dataset[-1]" - ] - }, - { - "cell_type": "markdown", - "id": "4ab6a716", - "metadata": {}, - "source": [ - "## Setting up a chain\n", - "Now we need to create some pipelines for doing question answering. Step one in that is creating indexes over the data in question." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "c18680b5", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.document_loaders import TextLoader\n", - "\n", - "loader = TextLoader(\"../../modules/state_of_the_union.txt\")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "7f0de2b3", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.indexes import VectorstoreIndexCreator" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "ef84ff99", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Using embedded DuckDB without persistence: data will be transient\n" - ] - } - ], - "source": [ - "vectorstore_sota = (\n", - " VectorstoreIndexCreator(vectorstore_kwargs={\"collection_name\": \"sota\"})\n", - " .from_loaders([loader])\n", - " .vectorstore\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "f0b5d8f6", - "metadata": {}, - "source": [ - "Now we can create a question answering chain." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "8843cb0c", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.chains import RetrievalQA\n", - "from langchain.llms import OpenAI" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "573719a0", - "metadata": {}, - "outputs": [], - "source": [ - "chain_sota = RetrievalQA.from_chain_type(\n", - " llm=OpenAI(temperature=0),\n", - " chain_type=\"stuff\",\n", - " retriever=vectorstore_sota.as_retriever(),\n", - " input_key=\"question\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e48b03d8", - "metadata": {}, - "source": [ - "Now we do the same for the Paul Graham data." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "c2dbb014", - "metadata": {}, - "outputs": [], - "source": [ - "loader = TextLoader(\"../../modules/paul_graham_essay.txt\")" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "98d16f08", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Using embedded DuckDB without persistence: data will be transient\n" - ] - } - ], - "source": [ - "vectorstore_pg = (\n", - " VectorstoreIndexCreator(vectorstore_kwargs={\"collection_name\": \"paul_graham\"})\n", - " .from_loaders([loader])\n", - " .vectorstore\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "ec0aab02", - "metadata": {}, - "outputs": [], - "source": [ - "chain_pg = RetrievalQA.from_chain_type(\n", - " llm=OpenAI(temperature=0),\n", - " chain_type=\"stuff\",\n", - " retriever=vectorstore_pg.as_retriever(),\n", - " input_key=\"question\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "76b5f8fb", - "metadata": {}, - "source": [ - "We can now set up an agent to route between them." - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "ade1aafa", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", - "\n", - "tools = [\n", - " Tool(\n", - " name=\"State of Union QA System\",\n", - " func=chain_sota.run,\n", - " description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\",\n", - " ),\n", - " Tool(\n", - " name=\"Paul Graham System\",\n", - " func=chain_pg.run,\n", - " description=\"useful for when you need to answer questions about Paul Graham. Input should be a fully formed question.\",\n", - " ),\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "id": "104853f8", - "metadata": {}, - "outputs": [], - "source": [ - "agent = initialize_agent(\n", - " tools,\n", - " OpenAI(temperature=0),\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " max_iterations=4,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "7f036641", - "metadata": {}, - "source": [ - "## Make a prediction\n", - "\n", - "First, we can make predictions one datapoint at a time. Doing it at this level of granularity allows use to explore the outputs in detail, and also is a lot cheaper than running over multiple datapoints" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "id": "4664e79f", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.'" - ] - }, - "execution_count": 35, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "agent.run(dataset[0][\"question\"])" - ] - }, - { - "cell_type": "markdown", - "id": "d0c16cd7", - "metadata": {}, - "source": [ - "## Make many predictions\n", - "Now we can make predictions" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "id": "799f6c17", - "metadata": {}, - "outputs": [], - "source": [ - "predictions = []\n", - "predicted_dataset = []\n", - "error_dataset = []\n", - "for data in dataset:\n", - " new_data = {\"input\": data[\"question\"], \"answer\": data[\"answer\"]}\n", - " try:\n", - " predictions.append(agent(new_data))\n", - " predicted_dataset.append(new_data)\n", - " except Exception:\n", - " error_dataset.append(new_data)" - ] - }, - { - "cell_type": "markdown", - "id": "49d969fb", - "metadata": {}, - "source": [ - "## Evaluate performance\n", - "Now we can evaluate the predictions. The first thing we can do is look at them by eye." - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "id": "1d583f03", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'input': 'What is the purpose of the NATO Alliance?',\n", - " 'answer': 'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.',\n", - " 'output': 'The purpose of the NATO Alliance is to secure peace and stability in Europe after World War 2.'}" - ] - }, - "execution_count": 37, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "predictions[0]" - ] - }, - { - "cell_type": "markdown", - "id": "4783344b", - "metadata": {}, - "source": [ - "Next, we can use a language model to score them programatically" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "id": "d0a9341d", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.evaluation.qa import QAEvalChain" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "id": "1612dec1", - "metadata": {}, - "outputs": [], - "source": [ - "llm = OpenAI(temperature=0)\n", - "eval_chain = QAEvalChain.from_llm(llm)\n", - "graded_outputs = eval_chain.evaluate(\n", - " predicted_dataset, predictions, question_key=\"input\", prediction_key=\"output\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "79587806", - "metadata": {}, - "source": [ - "We can add in the graded output to the `predictions` dict and then get a count of the grades." - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "id": "2a689df5", - "metadata": {}, - "outputs": [], - "source": [ - "for i, prediction in enumerate(predictions):\n", - " prediction[\"grade\"] = graded_outputs[i][\"text\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "id": "27b61215", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Counter({' CORRECT': 28, ' INCORRECT': 5})" - ] - }, - "execution_count": 41, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from collections import Counter\n", - "\n", - "Counter([pred[\"grade\"] for pred in predictions])" - ] - }, - { - "cell_type": "markdown", - "id": "12fe30f4", - "metadata": {}, - "source": [ - "We can also filter the datapoints to the incorrect examples and look at them." - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "id": "47c692a1", - "metadata": {}, - "outputs": [], - "source": [ - "incorrect = [pred for pred in predictions if pred[\"grade\"] == \" INCORRECT\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "id": "0ef976c1", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'input': 'What are the four common sense steps that the author suggests to move forward safely?',\n", - " 'answer': 'The four common sense steps suggested by the author to move forward safely are: stay protected with vaccines and treatments, prepare for new variants, end the shutdown of schools and businesses, and stay vigilant.',\n", - " 'output': 'The four common sense steps suggested in the most recent State of the Union address are: cutting the cost of prescription drugs, providing a pathway to citizenship for Dreamers, revising laws so businesses have the workers they need and families don’t wait decades to reunite, and protecting access to health care and preserving a woman’s right to choose.',\n", - " 'grade': ' INCORRECT'}" - ] - }, - "execution_count": 43, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "incorrect[0]" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/extras/guides/evaluation/examples/data_augmented_question_answering.ipynb b/docs/extras/guides/evaluation/examples/data_augmented_question_answering.ipynb index ad19388d9cd00..478939de41d5c 100644 --- a/docs/extras/guides/evaluation/examples/data_augmented_question_answering.ipynb +++ b/docs/extras/guides/evaluation/examples/data_augmented_question_answering.ipynb @@ -9,50 +9,120 @@ "\n", "This notebook uses some generic prompts/language models to evaluate an question answering system that uses other sources of data besides what is in the model. For example, this can be used to evaluate a question answering system over your proprietary data.\n", "\n", + "The overall steps to do this are:\n", + "1. Define your chain for the Q&A system\n", + "2. Define a dataset (as a list of examples)\n", + "3. Evaluate the chain on the dataset\n", + "\n", "## Setup\n", - "Let's set up an example with our favorite example - the state of the union address." + "\n", + "Let's set up an example with our favorite example - the state of the union address. This will be done by:\n", + "1. Loading the text data\n", + "2. Chunking and storing data in the vectorstore\n", + "3. Creating the retriever from the vectorstore\n", + "4. Creating the Q&A chain using an LLM and retriever\n", + "\n", + "First, fetch the example data from the langchain repo." ] }, { "cell_type": "code", "execution_count": 1, - "id": "ab4a6931", - "metadata": {}, + "id": "abd606ab", + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.llms import OpenAI\n", - "from langchain.chains import RetrievalQA" + "import requests\n", + "\n", + "state_of_the_union_url = \"https://raw.githubusercontent.com/langchain-ai/langchain/76102971c056bb277bf394068c98fb05ee2fb07d/docs/extras/modules/state_of_the_union.txt\"\n", + "with open(\"state_of_the_union.txt\", \"w\") as f:\n", + " f.write(requests.get(state_of_the_union_url).text)" + ] + }, + { + "cell_type": "markdown", + "id": "7b37ee26-cfa8-4a14-9c77-e6393eeed94e", + "metadata": {}, + "source": [ + "#### Chunk the text data\n", + "\n", + "Use the `CharacterTextSplitter` to chunk the text data using naive character-length splitting." ] }, { "cell_type": "code", "execution_count": 2, - "id": "4fdc211d", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running Chroma using direct local API.\n", - "Using DuckDB in-memory for database. Data will be transient.\n" - ] - } - ], + "id": "185406b0-204b-479b-bc16-a0192a27b3cb", + "metadata": { + "tags": [] + }, + "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", + "from langchain.text_splitter import CharacterTextSplitter\n", "\n", - "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", - "documents = loader.load()\n", + "loader = TextLoader(\"state_of_the_union.txt\")\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", - "texts = text_splitter.split_documents(documents)\n", + "texts = text_splitter.split_documents(loader.load())" + ] + }, + { + "cell_type": "markdown", + "id": "0f4419eb-f00c-4367-89a5-8208f19e2cae", + "metadata": {}, + "source": [ + "#### Create Retriever\n", + "\n", + "Select the `embeddings` to use for vectorizing the text chunks, and select the vectorstore to drive the retriever used for question answering." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "5e740bb9-9d5f-49c6-8f91-a0a1c42bc900", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.vectorstores import Chroma\n", + "\n", "\n", "embeddings = OpenAIEmbeddings()\n", "docsearch = Chroma.from_documents(texts, embeddings)\n", - "qa = RetrievalQA.from_llm(llm=OpenAI(), retriever=docsearch.as_retriever())" + "retriever = docsearch.as_retriever()" + ] + }, + { + "cell_type": "markdown", + "id": "bc159f62-d1b7-4307-8e82-d2bcd192052d", + "metadata": {}, + "source": [ + "#### Create QA Chain\n", + "\n", + "We will use GPT turbo for this example." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "4fdc211d", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.chains import RetrievalQA\n", + "\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", + "qa = RetrievalQA.from_llm(\n", + " llm=llm,\n", + " retriever=retriever,\n", + ")" ] }, { @@ -61,17 +131,22 @@ "metadata": {}, "source": [ "## Examples\n", - "Now we need some examples to evaluate. We can do this in two ways:\n", + "\n", + "Now we need some examples to evaluate. There are two basic ways to do this:\n", "\n", "1. Hard code some examples ourselves\n", - "2. Generate examples automatically, using a language model" + "2. Generate examples automatically, using a language model\n", + "\n", + "If you have example data from prior usage, this is often the best. When you're just starting out, you can bootstrap a dataset using the `QAGenerationChain` or your own custom `LLMChain`." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 17, "id": "3459b001", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ "# Hard-coded examples\n", @@ -86,62 +161,41 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "id": "b9c3fa75", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ "# Generated examples\n", "from langchain.evaluation.qa import QAGenerateChain\n", "\n", - "example_gen_chain = QAGenerateChain.from_llm(OpenAI())" + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", + "example_gen_chain = QAGenerateChain.from_llm(llm)" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 16, "id": "c24543a9", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "new_examples = example_gen_chain.apply_and_parse([{\"doc\": t} for t in texts[:5]])" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "a2d27560", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'query': 'According to the document, what did Vladimir Putin miscalculate?',\n", - " 'answer': 'He miscalculated that he could roll into Ukraine and the world would roll over.'},\n", - " {'query': 'Who is the Ukrainian Ambassador to the United States?',\n", - " 'answer': 'The Ukrainian Ambassador to the United States is here tonight.'},\n", - " {'query': 'How many countries were part of the coalition formed to confront Putin?',\n", - " 'answer': '27 members of the European Union, France, Germany, Italy, the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland.'},\n", - " {'query': 'What action is the U.S. Department of Justice taking to target Russian oligarchs?',\n", - " 'answer': 'The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs and joining with European allies to find and seize their yachts, luxury apartments, and private jets.'},\n", - " {'query': 'How much direct assistance is the United States providing to Ukraine?',\n", - " 'answer': 'The United States is providing more than $1 Billion in direct assistance to Ukraine.'}]" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "new_examples" + "new_examples = [\n", + " ex[example_gen_chain.output_key] for ex in example_gen_chain.apply([{\"doc\": t} for t in texts[:5]])\n", + "]" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 19, "id": "558da6f3", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ "# Combine examples\n", @@ -154,113 +208,146 @@ "metadata": {}, "source": [ "## Evaluate\n", - "Now that we have examples, we can use the question answering evaluator to evaluate our question answering chain." + "\n", + "Now that we have examples, it's time to evaluate the chain. Generate predictions and then use an evaluator to grade its performance." ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 20, "id": "782169a5", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "from langchain.evaluation.qa import QAEvalChain" + "predictions = qa.apply(examples)" + ] + }, + { + "cell_type": "markdown", + "id": "b7485d36-aafc-40c1-b335-0f67b43ff052", + "metadata": { + "tags": [] + }, + "source": [ + "Use the [qa](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.qa.eval_chain.QAEvalChain.html#langchain.evaluation.qa.eval_chain.QAEvalChain) evaluator to grade correctness of the question answering chain. For more information on evaluators, check out the [reference docs](https://api.python.langchain.com/en/latest/api_reference.html#module-langchain.evaluation)" ] }, { "cell_type": "code", - "execution_count": 9, - "id": "1bb77416", - "metadata": {}, + "execution_count": 37, + "id": "f718e437-f410-4d95-a288-73fe01cb5822", + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "predictions = qa.apply(examples)" + "from langchain.evaluation import load_evaluator\n", + "\n", + "qa_evaluator = load_evaluator(\"qa\")" ] }, { - "cell_type": "code", - "execution_count": 10, - "id": "bcd0ad7f", + "cell_type": "markdown", + "id": "eb4d739c-2169-40ba-b19b-44d27d900788", "metadata": {}, - "outputs": [], "source": [ - "llm = OpenAI(temperature=0)\n", - "eval_chain = QAEvalChain.from_llm(llm)" + "***Use the `tabulate` package for pretty printing the results.***" ] }, { "cell_type": "code", - "execution_count": 11, - "id": "2e6af79a", + "execution_count": null, + "id": "0c6dfbfc-eb2a-4356-a91b-76e8494ee794", "metadata": {}, "outputs": [], "source": [ - "graded_outputs = eval_chain.evaluate(examples, predictions)" + "# %pip install tabulate" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 45, "id": "32fac2dc", - "metadata": {}, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from tqdm import tqdm\n", + "from tabulate import tabulate\n", + "\n", + "def truncate(s, n):\n", + " \"\"\"Truncate `s` to `n` characters.\"\"\"\n", + " return (s[:n] + '..') if len(s) > n else s\n", + "\n", + "def print_results(examples, predictions, evaluators):\n", + " max_length = 80\n", + " table = [(\"Example\", \"Evaluator\", \"Value\", \"Score\", \"Query\", \"Prediction\", \"Answer\")]\n", + " for i, (eg, pred) in tqdm(enumerate(zip(examples, predictions))):\n", + " for evaluator in evaluators:\n", + " verdict = evaluator.evaluate_strings(\n", + " input=eg['query'],\n", + " prediction=pred['result'],\n", + " reference=eg['answer'],\n", + " )\n", + " table.append(\n", + " (f\"{i}\",\n", + " f\"{evaluator.evaluation_name}\",\n", + " f\"{verdict['value']}\",\n", + " f\"{verdict['score']}\",\n", + " f\"{truncate(eg['query'], max_length)}\",\n", + " f\"{truncate(pred['result'], max_length)}\",\n", + " f\"{truncate(eg['answer'], max_length)}\")\n", + " )\n", + " print(tabulate(table, headers=\"firstrow\", tablefmt='grid'))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "id": "7722b9e6-c915-4c8a-9c65-38c8778011fe", + "metadata": { + "tags": [] + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Example 0:\n", - "Question: What did the president say about Ketanji Brown Jackson\n", - "Real Answer: He praised her legal ability and said he nominated her for the supreme court.\n", - "Predicted Answer: The president said that she is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, and from a family of public school educators and police officers. He also said that she is a consensus builder and that she has received a broad range of support from the Fraternal Order of Police to former judges appointed by both Democrats and Republicans.\n", - "Predicted Grade: CORRECT\n", - "\n", - "Example 1:\n", - "Question: What did the president say about Michael Jackson\n", - "Real Answer: Nothing\n", - "Predicted Answer: The president did not mention Michael Jackson in this speech.\n", - "Predicted Grade: CORRECT\n", - "\n", - "Example 2:\n", - "Question: According to the document, what did Vladimir Putin miscalculate?\n", - "Real Answer: He miscalculated that he could roll into Ukraine and the world would roll over.\n", - "Predicted Answer: Putin miscalculated that the world would roll over when he rolled into Ukraine.\n", - "Predicted Grade: CORRECT\n", - "\n", - "Example 3:\n", - "Question: Who is the Ukrainian Ambassador to the United States?\n", - "Real Answer: The Ukrainian Ambassador to the United States is here tonight.\n", - "Predicted Answer: I don't know.\n", - "Predicted Grade: INCORRECT\n", - "\n", - "Example 4:\n", - "Question: How many countries were part of the coalition formed to confront Putin?\n", - "Real Answer: 27 members of the European Union, France, Germany, Italy, the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland.\n", - "Predicted Answer: The coalition included freedom-loving nations from Europe and the Americas to Asia and Africa, 27 members of the European Union including France, Germany, Italy, the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland.\n", - "Predicted Grade: INCORRECT\n", - "\n", - "Example 5:\n", - "Question: What action is the U.S. Department of Justice taking to target Russian oligarchs?\n", - "Real Answer: The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs and joining with European allies to find and seize their yachts, luxury apartments, and private jets.\n", - "Predicted Answer: The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs and to find and seize their yachts, luxury apartments, and private jets.\n", - "Predicted Grade: INCORRECT\n", - "\n", - "Example 6:\n", - "Question: How much direct assistance is the United States providing to Ukraine?\n", - "Real Answer: The United States is providing more than $1 Billion in direct assistance to Ukraine.\n", - "Predicted Answer: The United States is providing more than $1 billion in direct assistance to Ukraine.\n", - "Predicted Grade: CORRECT\n", - "\n" + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| Example | Evaluator | Value | Score | Query | Prediction | Answer |\n", + "+===========+=============+===========+=========+====================================================================================+====================================================================================+====================================================================================+\n", + "| 0 | correctness | CORRECT | 1 | What did the president say about Ketanji Brown Jackson | The president said that Ketanji Brown Jackson is one of our nation's top legal m.. | He praised her legal ability and said he nominated her for the supreme court. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 1 | correctness | CORRECT | 1 | What did the president say about Michael Jackson | There is no mention of Michael Jackson in the provided context. | Nothing |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 2 | correctness | CORRECT | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 3 | correctness | CORRECT | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 4 | correctness | CORRECT | 1 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 5 | correctness | CORRECT | 1 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 6 | correctness | INCORRECT | 0 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 7 | correctness | CORRECT | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 8 | correctness | CORRECT | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 9 | correctness | CORRECT | 1 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 10 | correctness | CORRECT | 1 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 11 | correctness | INCORRECT | 0 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", + "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n" ] } ], "source": [ - "for i, eg in enumerate(examples):\n", - " print(f\"Example {i}:\")\n", - " print(\"Question: \" + predictions[i][\"query\"])\n", - " print(\"Real Answer: \" + predictions[i][\"answer\"])\n", - " print(\"Predicted Answer: \" + predictions[i][\"result\"])\n", - " print(\"Predicted Grade: \" + graded_outputs[i][\"text\"])\n", - " print()" + "print_results(examples, predictions, [qa_evaluator])" ] }, { @@ -270,154 +357,97 @@ "source": [ "## Evaluate with Other Metrics\n", "\n", - "In addition to predicting whether the answer is correct or incorrect using a language model, we can also use other metrics to get a more nuanced view on the quality of the answers. To do so, we can use the [Critique](https://docs.inspiredco.ai/critique/) library, which allows for simple calculation of various metrics over generated text.\n", + "In addition to predicting whether the answer is correct or incorrect using a language model, we can also use other evalutors, such as the [labeled_criteria](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.criteria.eval_chain.LabeledCriteriaEvalChain.html#langchain.evaluation.criteria.eval_chain.LabeledCriteriaEvalChain) evaluator.\n", "\n", - "First you can get an API key from the [Inspired Cognition Dashboard](https://dashboard.inspiredco.ai) and do some setup:\n", - "\n", - "```bash\n", - "export INSPIREDCO_API_KEY=\"...\"\n", - "pip install inspiredco\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "bd0b01dc", - "metadata": {}, - "outputs": [], - "source": [ - "import inspiredco.critique\n", - "import os\n", - "\n", - "critique = inspiredco.critique.Critique(api_key=os.environ[\"INSPIREDCO_API_KEY\"])" - ] - }, - { - "cell_type": "markdown", - "id": "4f52629e", - "metadata": {}, - "source": [ - "Then run the following code to set up the configuration and calculate the [ROUGE](https://docs.inspiredco.ai/critique/metric_rouge.html), [chrf](https://docs.inspiredco.ai/critique/metric_chrf.html), [BERTScore](https://docs.inspiredco.ai/critique/metric_bert_score.html), and [UniEval](https://docs.inspiredco.ai/critique/metric_uni_eval.html) (you can choose [other metrics](https://docs.inspiredco.ai/critique/metrics.html) too):" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "84a0ba21", - "metadata": {}, - "outputs": [], - "source": [ - "metrics = {\n", - " \"rouge\": {\n", - " \"metric\": \"rouge\",\n", - " \"config\": {\"variety\": \"rouge_l\"},\n", - " },\n", - " \"chrf\": {\n", - " \"metric\": \"chrf\",\n", - " \"config\": {},\n", - " },\n", - " \"bert_score\": {\n", - " \"metric\": \"bert_score\",\n", - " \"config\": {\"model\": \"bert-base-uncased\"},\n", - " },\n", - " \"uni_eval\": {\n", - " \"metric\": \"uni_eval\",\n", - " \"config\": {\"task\": \"summarization\", \"evaluation_aspect\": \"relevance\"},\n", - " },\n", - "}" + "Let's evaluate based on conciseness and a custom 'pedagogical skill'." ] }, { "cell_type": "code", - "execution_count": 15, - "id": "3b9a4056", - "metadata": {}, + "execution_count": 33, + "id": "0cb558f6-449c-4f2b-9b14-31868581473a", + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "critique_data = [\n", - " {\"target\": pred[\"result\"], \"references\": [pred[\"answer\"]]} for pred in predictions\n", - "]\n", - "eval_results = {\n", - " k: critique.evaluate(dataset=critique_data, metric=v[\"metric\"], config=v[\"config\"])\n", - " for k, v in metrics.items()\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "6f0ae799", - "metadata": {}, - "source": [ - "Finally, we can print out the results. We can see that overall the scores are higher when the output is semantically correct, and also when the output closely matches with the gold-standard answer." + "evaluators = [\n", + " load_evaluator(\"labeled_criteria\", criteria=\"conciseness\"),\n", + " load_evaluator(\"labeled_criteria\", criteria={\n", + " \"pedagogical skill\": \"Did the submission propertly interpret inquiries, generate informative and understandable responses,\"\n", + " \" and present information in a manner that promotes strong thinking and problem-solving.\"\n", + " }),\n", + "]" ] }, { "cell_type": "code", - "execution_count": 16, - "id": "b51edcf4", - "metadata": {}, + "execution_count": 47, + "id": "11c1b474-90ce-49c5-8f4a-6ebca5669438", + "metadata": { + "tags": [] + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Example 0:\n", - "Question: What did the president say about Ketanji Brown Jackson\n", - "Real Answer: He praised her legal ability and said he nominated her for the supreme court.\n", - "Predicted Answer: The president said that she is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, and from a family of public school educators and police officers. He also said that she is a consensus builder and that she has received a broad range of support from the Fraternal Order of Police to former judges appointed by both Democrats and Republicans.\n", - "Predicted Scores: rouge=0.0941, chrf=0.2001, bert_score=0.5219, uni_eval=0.9043\n", - "\n", - "Example 1:\n", - "Question: What did the president say about Michael Jackson\n", - "Real Answer: Nothing\n", - "Predicted Answer: The president did not mention Michael Jackson in this speech.\n", - "Predicted Scores: rouge=0.0000, chrf=0.1087, bert_score=0.3486, uni_eval=0.7802\n", - "\n", - "Example 2:\n", - "Question: According to the document, what did Vladimir Putin miscalculate?\n", - "Real Answer: He miscalculated that he could roll into Ukraine and the world would roll over.\n", - "Predicted Answer: Putin miscalculated that the world would roll over when he rolled into Ukraine.\n", - "Predicted Scores: rouge=0.5185, chrf=0.6955, bert_score=0.8421, uni_eval=0.9578\n", - "\n", - "Example 3:\n", - "Question: Who is the Ukrainian Ambassador to the United States?\n", - "Real Answer: The Ukrainian Ambassador to the United States is here tonight.\n", - "Predicted Answer: I don't know.\n", - "Predicted Scores: rouge=0.0000, chrf=0.0375, bert_score=0.3159, uni_eval=0.7493\n", - "\n", - "Example 4:\n", - "Question: How many countries were part of the coalition formed to confront Putin?\n", - "Real Answer: 27 members of the European Union, France, Germany, Italy, the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland.\n", - "Predicted Answer: The coalition included freedom-loving nations from Europe and the Americas to Asia and Africa, 27 members of the European Union including France, Germany, Italy, the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland.\n", - "Predicted Scores: rouge=0.7419, chrf=0.8602, bert_score=0.8388, uni_eval=0.0669\n", - "\n", - "Example 5:\n", - "Question: What action is the U.S. Department of Justice taking to target Russian oligarchs?\n", - "Real Answer: The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs and joining with European allies to find and seize their yachts, luxury apartments, and private jets.\n", - "Predicted Answer: The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs and to find and seize their yachts, luxury apartments, and private jets.\n", - "Predicted Scores: rouge=0.9412, chrf=0.8687, bert_score=0.9607, uni_eval=0.9718\n", - "\n", - "Example 6:\n", - "Question: How much direct assistance is the United States providing to Ukraine?\n", - "Real Answer: The United States is providing more than $1 Billion in direct assistance to Ukraine.\n", - "Predicted Answer: The United States is providing more than $1 billion in direct assistance to Ukraine.\n", - "Predicted Scores: rouge=1.0000, chrf=0.9483, bert_score=1.0000, uni_eval=0.9734\n", - "\n" + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| Example | Evaluator | Value | Score | Query | Prediction | Answer |\n", + "+===========+===================+=========+=========+====================================================================================+====================================================================================+====================================================================================+\n", + "| 0 | conciseness | Y | 1 | What did the president say about Ketanji Brown Jackson | The president said that Ketanji Brown Jackson is one of our nation's top legal m.. | He praised her legal ability and said he nominated her for the supreme court. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 0 | pedagogical skill | Y | 1 | What did the president say about Ketanji Brown Jackson | The president said that Ketanji Brown Jackson is one of our nation's top legal m.. | He praised her legal ability and said he nominated her for the supreme court. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 1 | conciseness | Y | 1 | What did the president say about Michael Jackson | There is no mention of Michael Jackson in the provided context. | Nothing |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 1 | pedagogical skill | N | 0 | What did the president say about Michael Jackson | There is no mention of Michael Jackson in the provided context. | Nothing |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 2 | conciseness | Y | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 2 | pedagogical skill | Y | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 3 | conciseness | Y | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 3 | pedagogical skill | Y | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 4 | conciseness | Y | 1 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 4 | pedagogical skill | Y | 1 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 5 | conciseness | N | 0 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 5 | pedagogical skill | Y | 1 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 6 | conciseness | N | 0 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 6 | pedagogical skill | Y | 1 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 7 | conciseness | Y | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 7 | pedagogical skill | Y | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 8 | conciseness | Y | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 8 | pedagogical skill | Y | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 9 | conciseness | N | 0 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 9 | pedagogical skill | Y | 1 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 10 | conciseness | N | 0 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 10 | pedagogical skill | Y | 1 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 11 | conciseness | N | 0 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", + "| 11 | pedagogical skill | Y | 1 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", + "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n" ] } ], "source": [ - "for i, eg in enumerate(examples):\n", - " score_string = \", \".join(\n", - " [f\"{k}={v['examples'][i]['value']:.4f}\" for k, v in eval_results.items()]\n", - " )\n", - " print(f\"Example {i}:\")\n", - " print(\"Question: \" + predictions[i][\"query\"])\n", - " print(\"Real Answer: \" + predictions[i][\"answer\"])\n", - " print(\"Predicted Answer: \" + predictions[i][\"result\"])\n", - " print(\"Predicted Scores: \" + score_string)\n", - " print()" + "print_results(examples, predictions, evaluators)" ] } ], diff --git a/docs/extras/guides/evaluation/examples/openapi_eval.ipynb b/docs/extras/guides/evaluation/examples/openapi_eval.ipynb deleted file mode 100644 index de65b553d4243..0000000000000 --- a/docs/extras/guides/evaluation/examples/openapi_eval.ipynb +++ /dev/null @@ -1,975 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "692f3256", - "metadata": {}, - "source": [ - "# Evaluating an OpenAPI Chain\n", - "\n", - "This notebook goes over ways to semantically evaluate an [OpenAPI Chain](/docs/modules/chains/additional/openapi.html), which calls an endpoint defined by the OpenAPI specification using purely natural language." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "a457106d", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.tools import OpenAPISpec, APIOperation\n", - "from langchain.chains import OpenAPIEndpointChain, LLMChain\n", - "from langchain.requests import Requests\n", - "from langchain.llms import OpenAI" - ] - }, - { - "cell_type": "markdown", - "id": "2c3b0954", - "metadata": {}, - "source": [ - "## Load the API Chain\n", - "\n", - "Load a wrapper of the spec (so we can work with it more easily). You can load from a url or from a local file." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "794142ba", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n" - ] - } - ], - "source": [ - "# Load and parse the OpenAPI Spec\n", - "spec = OpenAPISpec.from_url(\n", - " \"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\"\n", - ")\n", - "# Load a single endpoint operation\n", - "operation = APIOperation.from_openapi_spec(spec, \"/public/openai/v0/products\", \"get\")\n", - "verbose = False\n", - "# Select any LangChain LLM\n", - "llm = OpenAI(temperature=0, max_tokens=1000)\n", - "# Create the endpoint chain\n", - "api_chain = OpenAPIEndpointChain.from_api_operation(\n", - " operation,\n", - " llm,\n", - " requests=Requests(),\n", - " verbose=verbose,\n", - " return_intermediate_steps=True, # Return request and response text\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "6c05ba5b", - "metadata": {}, - "source": [ - "### *Optional*: Generate Input Questions and Request Ground Truth Queries\n", - "\n", - "See [Generating Test Datasets](#Generating-Test-Datasets) at the end of this notebook for more details." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "a0c0cb7e", - "metadata": {}, - "outputs": [], - "source": [ - "# import re\n", - "# from langchain.prompts import PromptTemplate\n", - "\n", - "# template = \"\"\"Below is a service description:\n", - "\n", - "# {spec}\n", - "\n", - "# Imagine you're a new user trying to use {operation} through a search bar. What are 10 different things you want to request?\n", - "# Wants/Questions:\n", - "# 1. \"\"\"\n", - "\n", - "# prompt = PromptTemplate.from_template(template)\n", - "\n", - "# generation_chain = LLMChain(llm=llm, prompt=prompt)\n", - "\n", - "# questions_ = generation_chain.run(spec=operation.to_typescript(), operation=operation.operation_id).split('\\n')\n", - "# # Strip preceding numeric bullets\n", - "# questions = [re.sub(r'^\\d+\\. ', '', q).strip() for q in questions_]\n", - "# questions" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "f3d767ef", - "metadata": {}, - "outputs": [], - "source": [ - "# ground_truths = [\n", - "# {\"q\": ...} # What are the best queries for each input?\n", - "# ]" - ] - }, - { - "cell_type": "markdown", - "id": "81098a05", - "metadata": {}, - "source": [ - "## Run the API Chain\n", - "\n", - "The two simplest questions a user of the API Chain are:\n", - "- Did the chain succesfully access the endpoint?\n", - "- Did the action accomplish the correct result?\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "64bc7ed9", - "metadata": {}, - "outputs": [], - "source": [ - "from collections import defaultdict\n", - "\n", - "# Collect metrics to report at completion\n", - "scores = defaultdict(list)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "dfd2d09f", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Found cached dataset json (/Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--openapi-chain-klarna-products-get-5d03362007667626/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51)\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "10932c9c139941d1a8be1a798f29e923", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/1 [00:00 Question: {question}\n", - "\n", - "The query you know you should be executing against the API is:\n", - "\n", - "> Query: {truth_query}\n", - "\n", - "Is the following predicted query semantically the same (eg likely to produce the same answer)?\n", - "\n", - "> Predicted Query: {predict_query}\n", - "\n", - "Please give the Predicted Query a grade of either an A, B, C, D, or F, along with an explanation of why. End the evaluation with 'Final Grade: '\n", - "\n", - "> Explanation: Let's think step by step.\"\"\"\n", - "\n", - "prompt = PromptTemplate.from_template(template)\n", - "\n", - "eval_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "8cc1b1db", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[' The original query is asking for all iPhone models, so the \"q\" parameter is correct. The \"max_price\" parameter is also correct, as it is set to null, meaning that no maximum price is set. The predicted query adds two additional parameters, \"size\" and \"min_price\". The \"size\" parameter is not necessary, as it is not relevant to the question being asked. The \"min_price\" parameter is also not necessary, as it is not relevant to the question being asked and it is set to 0, which is the default value. Therefore, the predicted query is not semantically the same as the original query and is not likely to produce the same answer. Final Grade: D',\n", - " ' The original query is asking for laptops with a maximum price of 300. The predicted query is asking for laptops with a minimum price of 0 and a maximum price of 500. This means that the predicted query is likely to return more results than the original query, as it is asking for a wider range of prices. Therefore, the predicted query is not semantically the same as the original query, and it is not likely to produce the same answer. Final Grade: F',\n", - " \" The first two parameters are the same, so that's good. The third parameter is different, but it's not necessary for the query, so that's not a problem. The fourth parameter is the problem. The original query specifies a maximum price of 500, while the predicted query specifies a maximum price of null. This means that the predicted query will not limit the results to the cheapest gaming PCs, so it is not semantically the same as the original query. Final Grade: F\",\n", - " ' The original query is asking for tablets under $400, so the first two parameters are correct. The predicted query also includes the parameters \"size\" and \"min_price\", which are not necessary for the original query. The \"size\" parameter is not relevant to the question, and the \"min_price\" parameter is redundant since the original query already specifies a maximum price. Therefore, the predicted query is not semantically the same as the original query and is not likely to produce the same answer. Final Grade: D',\n", - " ' The original query is asking for headphones with no maximum price, so the predicted query is not semantically the same because it has a maximum price of 500. The predicted query also has a size of 10, which is not specified in the original query. Therefore, the predicted query is not semantically the same as the original query. Final Grade: F',\n", - " \" The original query is asking for the top rated laptops, so the 'size' parameter should be set to 10 to get the top 10 results. The 'min_price' parameter should be set to 0 to get results from all price ranges. The 'max_price' parameter should be set to null to get results from all price ranges. The 'q' parameter should be set to 'laptop' to get results related to laptops. All of these parameters are present in the predicted query, so it is semantically the same as the original query. Final Grade: A\",\n", - " ' The original query is asking for shoes, so the predicted query is asking for the same thing. The original query does not specify a size, so the predicted query is not adding any additional information. The original query does not specify a price range, so the predicted query is adding additional information that is not necessary. Therefore, the predicted query is not semantically the same as the original query and is likely to produce different results. Final Grade: D',\n", - " ' The original query is asking for a skirt, so the predicted query is asking for the same thing. The predicted query also adds additional parameters such as size and price range, which could help narrow down the results. However, the size parameter is not necessary for the query to be successful, and the price range is too narrow. Therefore, the predicted query is not as effective as the original query. Final Grade: C',\n", - " ' The first part of the query is asking for a Desktop PC, which is the same as the original query. The second part of the query is asking for a size of 10, which is not relevant to the original query. The third part of the query is asking for a minimum price of 0, which is not relevant to the original query. The fourth part of the query is asking for a maximum price of null, which is not relevant to the original query. Therefore, the Predicted Query does not semantically match the original query and is not likely to produce the same answer. Final Grade: F',\n", - " ' The original query is asking for cameras with a maximum price of 300. The predicted query is asking for cameras with a maximum price of 500. This means that the predicted query is likely to return more results than the original query, which may include cameras that are not within the budget range. Therefore, the predicted query is not semantically the same as the original query and does not answer the original question. Final Grade: F']" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "request_eval_results = []\n", - "for question, predict_query, truth_query in list(\n", - " zip(questions, predicted_queries, truth_queries)\n", - "):\n", - " eval_output = eval_chain.run(\n", - " question=question,\n", - " truth_query=truth_query,\n", - " predict_query=predict_query,\n", - " )\n", - " request_eval_results.append(eval_output)\n", - "request_eval_results" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "0d76f8ba", - "metadata": {}, - "outputs": [], - "source": [ - "import re\n", - "from typing import List\n", - "\n", - "\n", - "# Parse the evaluation chain responses into a rubric\n", - "def parse_eval_results(results: List[str]) -> List[float]:\n", - " rubric = {\"A\": 1.0, \"B\": 0.75, \"C\": 0.5, \"D\": 0.25, \"F\": 0}\n", - " return [rubric[re.search(r\"Final Grade: (\\w+)\", res).group(1)] for res in results]\n", - "\n", - "\n", - "parsed_results = parse_eval_results(request_eval_results)\n", - "# Collect the scores for a final evaluation table\n", - "scores[\"request_synthesizer\"].extend(parsed_results)" - ] - }, - { - "cell_type": "markdown", - "id": "6f3ee8ea", - "metadata": {}, - "source": [ - "## Evaluate the Response Chain\n", - "\n", - "The second component translated the structured API response to a natural language response.\n", - "Evaluate this against the user's original question." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "8b97847c", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.prompts import PromptTemplate\n", - "\n", - "template = \"\"\"You are trying to answer the following question by querying an API:\n", - "\n", - "> Question: {question}\n", - "\n", - "The API returned a response of:\n", - "\n", - "> API result: {api_response}\n", - "\n", - "Your response to the user: {answer}\n", - "\n", - "Please evaluate the accuracy and utility of your response to the user's original question, conditioned on the information available.\n", - "Give a letter grade of either an A, B, C, D, or F, along with an explanation of why. End the evaluation with 'Final Grade: '\n", - "\n", - "> Explanation: Let's think step by step.\"\"\"\n", - "\n", - "prompt = PromptTemplate.from_template(template)\n", - "\n", - "eval_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "642852ce", - "metadata": {}, - "outputs": [], - "source": [ - "# Extract the API responses from the chain\n", - "api_responses = [\n", - " output[\"intermediate_steps\"][\"response_text\"] for output in chain_outputs\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "08a5eb4f", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "data": { - "text/plain": [ - "[' The original query is asking for all iPhone models, so the \"q\" parameter is correct. The \"max_price\" parameter is also correct, as it is set to null, meaning that no maximum price is set. The predicted query adds two additional parameters, \"size\" and \"min_price\". The \"size\" parameter is not necessary, as it is not relevant to the question being asked. The \"min_price\" parameter is also not necessary, as it is not relevant to the question being asked and it is set to 0, which is the default value. Therefore, the predicted query is not semantically the same as the original query and is not likely to produce the same answer. Final Grade: D',\n", - " ' The original query is asking for laptops with a maximum price of 300. The predicted query is asking for laptops with a minimum price of 0 and a maximum price of 500. This means that the predicted query is likely to return more results than the original query, as it is asking for a wider range of prices. Therefore, the predicted query is not semantically the same as the original query, and it is not likely to produce the same answer. Final Grade: F',\n", - " \" The first two parameters are the same, so that's good. The third parameter is different, but it's not necessary for the query, so that's not a problem. The fourth parameter is the problem. The original query specifies a maximum price of 500, while the predicted query specifies a maximum price of null. This means that the predicted query will not limit the results to the cheapest gaming PCs, so it is not semantically the same as the original query. Final Grade: F\",\n", - " ' The original query is asking for tablets under $400, so the first two parameters are correct. The predicted query also includes the parameters \"size\" and \"min_price\", which are not necessary for the original query. The \"size\" parameter is not relevant to the question, and the \"min_price\" parameter is redundant since the original query already specifies a maximum price. Therefore, the predicted query is not semantically the same as the original query and is not likely to produce the same answer. Final Grade: D',\n", - " ' The original query is asking for headphones with no maximum price, so the predicted query is not semantically the same because it has a maximum price of 500. The predicted query also has a size of 10, which is not specified in the original query. Therefore, the predicted query is not semantically the same as the original query. Final Grade: F',\n", - " \" The original query is asking for the top rated laptops, so the 'size' parameter should be set to 10 to get the top 10 results. The 'min_price' parameter should be set to 0 to get results from all price ranges. The 'max_price' parameter should be set to null to get results from all price ranges. The 'q' parameter should be set to 'laptop' to get results related to laptops. All of these parameters are present in the predicted query, so it is semantically the same as the original query. Final Grade: A\",\n", - " ' The original query is asking for shoes, so the predicted query is asking for the same thing. The original query does not specify a size, so the predicted query is not adding any additional information. The original query does not specify a price range, so the predicted query is adding additional information that is not necessary. Therefore, the predicted query is not semantically the same as the original query and is likely to produce different results. Final Grade: D',\n", - " ' The original query is asking for a skirt, so the predicted query is asking for the same thing. The predicted query also adds additional parameters such as size and price range, which could help narrow down the results. However, the size parameter is not necessary for the query to be successful, and the price range is too narrow. Therefore, the predicted query is not as effective as the original query. Final Grade: C',\n", - " ' The first part of the query is asking for a Desktop PC, which is the same as the original query. The second part of the query is asking for a size of 10, which is not relevant to the original query. The third part of the query is asking for a minimum price of 0, which is not relevant to the original query. The fourth part of the query is asking for a maximum price of null, which is not relevant to the original query. Therefore, the Predicted Query does not semantically match the original query and is not likely to produce the same answer. Final Grade: F',\n", - " ' The original query is asking for cameras with a maximum price of 300. The predicted query is asking for cameras with a maximum price of 500. This means that the predicted query is likely to return more results than the original query, which may include cameras that are not within the budget range. Therefore, the predicted query is not semantically the same as the original query and does not answer the original question. Final Grade: F',\n", - " ' The user asked a question about what iPhone models are available, and the API returned a response with 10 different models. The response provided by the user accurately listed all 10 models, so the accuracy of the response is A+. The utility of the response is also A+ since the user was able to get the exact information they were looking for. Final Grade: A+',\n", - " \" The API response provided a list of laptops with their prices and attributes. The user asked if there were any budget laptops, and the response provided a list of laptops that are all priced under $500. Therefore, the response was accurate and useful in answering the user's question. Final Grade: A\",\n", - " \" The API response provided the name, price, and URL of the product, which is exactly what the user asked for. The response also provided additional information about the product's attributes, which is useful for the user to make an informed decision. Therefore, the response is accurate and useful. Final Grade: A\",\n", - " \" The API response provided a list of tablets that are under $400. The response accurately answered the user's question. Additionally, the response provided useful information such as the product name, price, and attributes. Therefore, the response was accurate and useful. Final Grade: A\",\n", - " \" The API response provided a list of headphones with their respective prices and attributes. The user asked for the best headphones, so the response should include the best headphones based on the criteria provided. The response provided a list of headphones that are all from the same brand (Apple) and all have the same type of headphone (True Wireless, In-Ear). This does not provide the user with enough information to make an informed decision about which headphones are the best. Therefore, the response does not accurately answer the user's question. Final Grade: F\",\n", - " ' The API response provided a list of laptops with their attributes, which is exactly what the user asked for. The response provided a comprehensive list of the top rated laptops, which is what the user was looking for. The response was accurate and useful, providing the user with the information they needed. Final Grade: A',\n", - " ' The API response provided a list of shoes from both Adidas and Nike, which is exactly what the user asked for. The response also included the product name, price, and attributes for each shoe, which is useful information for the user to make an informed decision. The response also included links to the products, which is helpful for the user to purchase the shoes. Therefore, the response was accurate and useful. Final Grade: A',\n", - " \" The API response provided a list of skirts that could potentially meet the user's needs. The response also included the name, price, and attributes of each skirt. This is a great start, as it provides the user with a variety of options to choose from. However, the response does not provide any images of the skirts, which would have been helpful for the user to make a decision. Additionally, the response does not provide any information about the availability of the skirts, which could be important for the user. \\n\\nFinal Grade: B\",\n", - " ' The user asked for a professional desktop PC with no budget constraints. The API response provided a list of products that fit the criteria, including the Skytech Archangel Gaming Computer PC Desktop, the CyberPowerPC Gamer Master Gaming Desktop, and the ASUS ROG Strix G10DK-RS756. The response accurately suggested these three products as they all offer powerful processors and plenty of RAM. Therefore, the response is accurate and useful. Final Grade: A',\n", - " \" The API response provided a list of cameras with their prices, which is exactly what the user asked for. The response also included additional information such as features and memory cards, which is not necessary for the user's question but could be useful for further research. The response was accurate and provided the user with the information they needed. Final Grade: A\"]" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Run the grader chain\n", - "response_eval_results = []\n", - "for question, api_response, answer in list(zip(questions, api_responses, answers)):\n", - " request_eval_results.append(\n", - " eval_chain.run(question=question, api_response=api_response, answer=answer)\n", - " )\n", - "request_eval_results" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "a144aa9d", - "metadata": {}, - "outputs": [], - "source": [ - "# Reusing the rubric from above, parse the evaluation chain responses\n", - "parsed_response_results = parse_eval_results(request_eval_results)\n", - "# Collect the scores for a final evaluation table\n", - "scores[\"result_synthesizer\"].extend(parsed_response_results)" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "e95042bc", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Metric \tMin \tMean \tMax \n", - "completed \t1.00 \t1.00 \t1.00 \n", - "request_synthesizer \t0.00 \t0.23 \t1.00 \n", - "result_synthesizer \t0.00 \t0.55 \t1.00 \n" - ] - } - ], - "source": [ - "# Print out Score statistics for the evaluation session\n", - "header = \"{:<20}\\t{:<10}\\t{:<10}\\t{:<10}\".format(\"Metric\", \"Min\", \"Mean\", \"Max\")\n", - "print(header)\n", - "for metric, metric_scores in scores.items():\n", - " mean_scores = (\n", - " sum(metric_scores) / len(metric_scores)\n", - " if len(metric_scores) > 0\n", - " else float(\"nan\")\n", - " )\n", - " row = \"{:<20}\\t{:<10.2f}\\t{:<10.2f}\\t{:<10.2f}\".format(\n", - " metric, min(metric_scores), mean_scores, max(metric_scores)\n", - " )\n", - " print(row)" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "03fe96af", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[]" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Re-show the examples for which the chain failed to complete\n", - "failed_examples" - ] - }, - { - "cell_type": "markdown", - "id": "2bb3636d", - "metadata": {}, - "source": [ - "## Generating Test Datasets\n", - "\n", - "To evaluate a chain against your own endpoint, you'll want to generate a test dataset that's conforms to the API.\n", - "\n", - "This section provides an overview of how to bootstrap the process.\n", - "\n", - "First, we'll parse the OpenAPI Spec. For this example, we'll [Speak](https://www.speak.com/)'s OpenAPI specification." - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "a453eb93", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n", - "Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n" - ] - } - ], - "source": [ - "# Load and parse the OpenAPI Spec\n", - "spec = OpenAPISpec.from_url(\"https://api.speak.com/openapi.yaml\")" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "bb65ffe8", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['/v1/public/openai/explain-phrase',\n", - " '/v1/public/openai/explain-task',\n", - " '/v1/public/openai/translate']" - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# List the paths in the OpenAPI Spec\n", - "paths = sorted(spec.paths.keys())\n", - "paths" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "0988f01b", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['post']" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# See which HTTP Methods are available for a given path\n", - "methods = spec.get_methods_for_path(\"/v1/public/openai/explain-task\")\n", - "methods" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "id": "e9ef0a77", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "type explainTask = (_: {\n", - "/* Description of the task that the user wants to accomplish or do. For example, \"tell the waiter they messed up my order\" or \"compliment someone on their shirt\" */\n", - " task_description?: string,\n", - "/* The foreign language that the user is learning and asking about. The value can be inferred from question - for example, if the user asks \"how do i ask a girl out in mexico city\", the value should be \"Spanish\" because of Mexico City. Always use the full name of the language (e.g. Spanish, French). */\n", - " learning_language?: string,\n", - "/* The user's native language. Infer this value from the language the user asked their question in. Always use the full name of the language (e.g. Spanish, French). */\n", - " native_language?: string,\n", - "/* A description of any additional context in the user's question that could affect the explanation - e.g. setting, scenario, situation, tone, speaking style and formality, usage notes, or any other qualifiers. */\n", - " additional_context?: string,\n", - "/* Full text of the user's question. */\n", - " full_query?: string,\n", - "}) => any;\n" - ] - } - ], - "source": [ - "# Load a single endpoint operation\n", - "operation = APIOperation.from_openapi_spec(\n", - " spec, \"/v1/public/openai/explain-task\", \"post\"\n", - ")\n", - "\n", - "# The operation can be serialized as typescript\n", - "print(operation.to_typescript())" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "f1186b6d", - "metadata": {}, - "outputs": [], - "source": [ - "# Compress the service definition to avoid leaking too much input structure to the sample data\n", - "template = \"\"\"In 20 words or less, what does this service accomplish?\n", - "{spec}\n", - "\n", - "Function: It's designed to \"\"\"\n", - "prompt = PromptTemplate.from_template(template)\n", - "generation_chain = LLMChain(llm=llm, prompt=prompt)\n", - "purpose = generation_chain.run(spec=operation.to_typescript())" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "id": "a594406a", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[\"Can you explain how to say 'hello' in Spanish?\",\n", - " \"I need help understanding the French word for 'goodbye'.\",\n", - " \"Can you tell me how to say 'thank you' in German?\",\n", - " \"I'm trying to learn the Italian word for 'please'.\",\n", - " \"Can you help me with the pronunciation of 'yes' in Portuguese?\",\n", - " \"I'm looking for the Dutch word for 'no'.\",\n", - " \"Can you explain the meaning of 'hello' in Japanese?\",\n", - " \"I need help understanding the Russian word for 'thank you'.\",\n", - " \"Can you tell me how to say 'goodbye' in Chinese?\",\n", - " \"I'm trying to learn the Arabic word for 'please'.\"]" - ] - }, - "execution_count": 28, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "template = \"\"\"Write a list of {num_to_generate} unique messages users might send to a service designed to{purpose} They must each be completely unique.\n", - "\n", - "1.\"\"\"\n", - "\n", - "\n", - "def parse_list(text: str) -> List[str]:\n", - " # Match lines starting with a number then period\n", - " # Strip leading and trailing whitespace\n", - " matches = re.findall(r\"^\\d+\\. \", text)\n", - " return [re.sub(r\"^\\d+\\. \", \"\", q).strip().strip('\"') for q in text.split(\"\\n\")]\n", - "\n", - "\n", - "num_to_generate = 10 # How many examples to use for this test set.\n", - "prompt = PromptTemplate.from_template(template)\n", - "generation_chain = LLMChain(llm=llm, prompt=prompt)\n", - "text = generation_chain.run(purpose=purpose, num_to_generate=num_to_generate)\n", - "# Strip preceding numeric bullets\n", - "queries = parse_list(text)\n", - "queries" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "id": "8dc60f43", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['{\"task_description\": \"say \\'hello\\'\", \"learning_language\": \"Spanish\", \"native_language\": \"English\", \"full_query\": \"Can you explain how to say \\'hello\\' in Spanish?\"}',\n", - " '{\"task_description\": \"understanding the French word for \\'goodbye\\'\", \"learning_language\": \"French\", \"native_language\": \"English\", \"full_query\": \"I need help understanding the French word for \\'goodbye\\'.\"}',\n", - " '{\"task_description\": \"say \\'thank you\\'\", \"learning_language\": \"German\", \"native_language\": \"English\", \"full_query\": \"Can you tell me how to say \\'thank you\\' in German?\"}',\n", - " '{\"task_description\": \"Learn the Italian word for \\'please\\'\", \"learning_language\": \"Italian\", \"native_language\": \"English\", \"full_query\": \"I\\'m trying to learn the Italian word for \\'please\\'.\"}',\n", - " '{\"task_description\": \"Help with pronunciation of \\'yes\\' in Portuguese\", \"learning_language\": \"Portuguese\", \"native_language\": \"English\", \"full_query\": \"Can you help me with the pronunciation of \\'yes\\' in Portuguese?\"}',\n", - " '{\"task_description\": \"Find the Dutch word for \\'no\\'\", \"learning_language\": \"Dutch\", \"native_language\": \"English\", \"full_query\": \"I\\'m looking for the Dutch word for \\'no\\'.\"}',\n", - " '{\"task_description\": \"Explain the meaning of \\'hello\\' in Japanese\", \"learning_language\": \"Japanese\", \"native_language\": \"English\", \"full_query\": \"Can you explain the meaning of \\'hello\\' in Japanese?\"}',\n", - " '{\"task_description\": \"understanding the Russian word for \\'thank you\\'\", \"learning_language\": \"Russian\", \"native_language\": \"English\", \"full_query\": \"I need help understanding the Russian word for \\'thank you\\'.\"}',\n", - " '{\"task_description\": \"say goodbye\", \"learning_language\": \"Chinese\", \"native_language\": \"English\", \"full_query\": \"Can you tell me how to say \\'goodbye\\' in Chinese?\"}',\n", - " '{\"task_description\": \"Learn the Arabic word for \\'please\\'\", \"learning_language\": \"Arabic\", \"native_language\": \"English\", \"full_query\": \"I\\'m trying to learn the Arabic word for \\'please\\'.\"}']" - ] - }, - "execution_count": 29, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Define the generation chain to get hypotheses\n", - "api_chain = OpenAPIEndpointChain.from_api_operation(\n", - " operation,\n", - " llm,\n", - " requests=Requests(),\n", - " verbose=verbose,\n", - " return_intermediate_steps=True, # Return request and response text\n", - ")\n", - "\n", - "predicted_outputs = [api_chain(query) for query in queries]\n", - "request_args = [\n", - " output[\"intermediate_steps\"][\"request_args\"] for output in predicted_outputs\n", - "]\n", - "\n", - "# Show the generated request\n", - "request_args" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "id": "b727e28e", - "metadata": {}, - "outputs": [], - "source": [ - "## AI Assisted Correction\n", - "correction_template = \"\"\"Correct the following API request based on the user's feedback. If the user indicates no changes are needed, output the original without making any changes.\n", - "\n", - "REQUEST: {request}\n", - "\n", - "User Feedback / requested changes: {user_feedback}\n", - "\n", - "Finalized Request: \"\"\"\n", - "\n", - "prompt = PromptTemplate.from_template(correction_template)\n", - "correction_chain = LLMChain(llm=llm, prompt=prompt)" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "id": "c1f4d71f", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Query: Can you explain how to say 'hello' in Spanish?\n", - "Request: {\"task_description\": \"say 'hello'\", \"learning_language\": \"Spanish\", \"native_language\": \"English\", \"full_query\": \"Can you explain how to say 'hello' in Spanish?\"}\n", - "Requested changes: \n", - "Query: I need help understanding the French word for 'goodbye'.\n", - "Request: {\"task_description\": \"understanding the French word for 'goodbye'\", \"learning_language\": \"French\", \"native_language\": \"English\", \"full_query\": \"I need help understanding the French word for 'goodbye'.\"}\n", - "Requested changes: \n", - "Query: Can you tell me how to say 'thank you' in German?\n", - "Request: {\"task_description\": \"say 'thank you'\", \"learning_language\": \"German\", \"native_language\": \"English\", \"full_query\": \"Can you tell me how to say 'thank you' in German?\"}\n", - "Requested changes: \n", - "Query: I'm trying to learn the Italian word for 'please'.\n", - "Request: {\"task_description\": \"Learn the Italian word for 'please'\", \"learning_language\": \"Italian\", \"native_language\": \"English\", \"full_query\": \"I'm trying to learn the Italian word for 'please'.\"}\n", - "Requested changes: \n", - "Query: Can you help me with the pronunciation of 'yes' in Portuguese?\n", - "Request: {\"task_description\": \"Help with pronunciation of 'yes' in Portuguese\", \"learning_language\": \"Portuguese\", \"native_language\": \"English\", \"full_query\": \"Can you help me with the pronunciation of 'yes' in Portuguese?\"}\n", - "Requested changes: \n", - "Query: I'm looking for the Dutch word for 'no'.\n", - "Request: {\"task_description\": \"Find the Dutch word for 'no'\", \"learning_language\": \"Dutch\", \"native_language\": \"English\", \"full_query\": \"I'm looking for the Dutch word for 'no'.\"}\n", - "Requested changes: \n", - "Query: Can you explain the meaning of 'hello' in Japanese?\n", - "Request: {\"task_description\": \"Explain the meaning of 'hello' in Japanese\", \"learning_language\": \"Japanese\", \"native_language\": \"English\", \"full_query\": \"Can you explain the meaning of 'hello' in Japanese?\"}\n", - "Requested changes: \n", - "Query: I need help understanding the Russian word for 'thank you'.\n", - "Request: {\"task_description\": \"understanding the Russian word for 'thank you'\", \"learning_language\": \"Russian\", \"native_language\": \"English\", \"full_query\": \"I need help understanding the Russian word for 'thank you'.\"}\n", - "Requested changes: \n", - "Query: Can you tell me how to say 'goodbye' in Chinese?\n", - "Request: {\"task_description\": \"say goodbye\", \"learning_language\": \"Chinese\", \"native_language\": \"English\", \"full_query\": \"Can you tell me how to say 'goodbye' in Chinese?\"}\n", - "Requested changes: \n", - "Query: I'm trying to learn the Arabic word for 'please'.\n", - "Request: {\"task_description\": \"Learn the Arabic word for 'please'\", \"learning_language\": \"Arabic\", \"native_language\": \"English\", \"full_query\": \"I'm trying to learn the Arabic word for 'please'.\"}\n", - "Requested changes: \n" - ] - } - ], - "source": [ - "ground_truth = []\n", - "for query, request_arg in list(zip(queries, request_args)):\n", - " feedback = input(f\"Query: {query}\\nRequest: {request_arg}\\nRequested changes: \")\n", - " if feedback == \"n\" or feedback == \"none\" or not feedback:\n", - " ground_truth.append(request_arg)\n", - " continue\n", - " resolved = correction_chain.run(request=request_arg, user_feedback=feedback)\n", - " ground_truth.append(resolved.strip())\n", - " print(\"Updated request:\", resolved)" - ] - }, - { - "cell_type": "markdown", - "id": "19d68882", - "metadata": {}, - "source": [ - "**Now you can use the `ground_truth` as shown above in [Evaluate the Requests Chain](#Evaluate-the-requests-chain)!**" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "id": "5a596176", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['{\"task_description\": \"say \\'hello\\'\", \"learning_language\": \"Spanish\", \"native_language\": \"English\", \"full_query\": \"Can you explain how to say \\'hello\\' in Spanish?\"}',\n", - " '{\"task_description\": \"understanding the French word for \\'goodbye\\'\", \"learning_language\": \"French\", \"native_language\": \"English\", \"full_query\": \"I need help understanding the French word for \\'goodbye\\'.\"}',\n", - " '{\"task_description\": \"say \\'thank you\\'\", \"learning_language\": \"German\", \"native_language\": \"English\", \"full_query\": \"Can you tell me how to say \\'thank you\\' in German?\"}',\n", - " '{\"task_description\": \"Learn the Italian word for \\'please\\'\", \"learning_language\": \"Italian\", \"native_language\": \"English\", \"full_query\": \"I\\'m trying to learn the Italian word for \\'please\\'.\"}',\n", - " '{\"task_description\": \"Help with pronunciation of \\'yes\\' in Portuguese\", \"learning_language\": \"Portuguese\", \"native_language\": \"English\", \"full_query\": \"Can you help me with the pronunciation of \\'yes\\' in Portuguese?\"}',\n", - " '{\"task_description\": \"Find the Dutch word for \\'no\\'\", \"learning_language\": \"Dutch\", \"native_language\": \"English\", \"full_query\": \"I\\'m looking for the Dutch word for \\'no\\'.\"}',\n", - " '{\"task_description\": \"Explain the meaning of \\'hello\\' in Japanese\", \"learning_language\": \"Japanese\", \"native_language\": \"English\", \"full_query\": \"Can you explain the meaning of \\'hello\\' in Japanese?\"}',\n", - " '{\"task_description\": \"understanding the Russian word for \\'thank you\\'\", \"learning_language\": \"Russian\", \"native_language\": \"English\", \"full_query\": \"I need help understanding the Russian word for \\'thank you\\'.\"}',\n", - " '{\"task_description\": \"say goodbye\", \"learning_language\": \"Chinese\", \"native_language\": \"English\", \"full_query\": \"Can you tell me how to say \\'goodbye\\' in Chinese?\"}',\n", - " '{\"task_description\": \"Learn the Arabic word for \\'please\\'\", \"learning_language\": \"Arabic\", \"native_language\": \"English\", \"full_query\": \"I\\'m trying to learn the Arabic word for \\'please\\'.\"}']" - ] - }, - "execution_count": 32, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Now you have a new ground truth set to use as shown above!\n", - "ground_truth" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b7fe9dfa", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/extras/guides/evaluation/examples/qa_benchmarking_pg.ipynb b/docs/extras/guides/evaluation/examples/qa_benchmarking_pg.ipynb deleted file mode 100644 index c35b17258ec85..0000000000000 --- a/docs/extras/guides/evaluation/examples/qa_benchmarking_pg.ipynb +++ /dev/null @@ -1,372 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "984169ca", - "metadata": {}, - "source": [ - "# Question Answering Benchmarking: Paul Graham Essay\n", - "\n", - "Here we go over how to benchmark performance on a question answering task over a Paul Graham essay.\n", - "\n", - "It is highly recommended that you do any evaluation/benchmarking with tracing enabled. See [here](https://python.langchain.com/docs/modules/callbacks/how_to/tracing) for an explanation of what tracing is and how to set it up." - ] - }, - { - "cell_type": "markdown", - "id": "8a16b75d", - "metadata": {}, - "source": [ - "## Loading the data\n", - "First, let's load the data." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "5b2d5e98", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Found cached dataset json (/Users/harrisonchase/.cache/huggingface/datasets/LangChainDatasets___json/LangChainDatasets--question-answering-paul-graham-76e8f711e038d742/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51)\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "9264acfe710b4faabf060f0fcf4f7308", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/1 [00:00>6 additional tennis balls.\n", + "Therefore, Roger now has a total of 5 + 6 = <<5+6=11>>11 tennis balls.\n", + "\n", + "No, the sentence is not plausible. Joao Moutinho is not a football player, and the NFC championship is a game in American football, not soccer.\n" + ] } ], "source": [ - "predictions" + "predictions = chain.apply(examples)\n", + "print(\"\\n\\n\".join([pred['text'] for pred in predictions]))" ] }, { @@ -137,33 +131,44 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 14, "id": "0cacc65a", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "from langchain.evaluation.qa import QAEvalChain" + "from langchain.evaluation import load_evaluator\n", + "\n", + "evaluator = load_evaluator(\"qa\")" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 15, "id": "5aa6cd65", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "llm = OpenAI(temperature=0)\n", - "eval_chain = QAEvalChain.from_llm(llm)\n", - "graded_outputs = eval_chain.evaluate(\n", - " examples, predictions, question_key=\"question\", prediction_key=\"text\"\n", - ")" + "eval_results = [\n", + " evaluator.evaluate_strings(\n", + " input=eg['question'],\n", + " prediction=pred['text'],\n", + " reference=eg['answer'],\n", + " )\n", + " for eg, pred in zip(examples, predictions)\n", + "] " ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 17, "id": "63780020", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [ { "name": "stdout", @@ -172,248 +177,28 @@ "Example 0:\n", "Question: Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\n", "Real Answer: 11\n", - "Predicted Answer: 11 tennis balls\n", - "Predicted Grade: CORRECT\n", + "Predicted Answer: Roger initially has 5 tennis balls. He buys 2 cans of tennis balls, and each can has 3 tennis balls. So, he has 2 * 3 = <<2*3=6>>6 additional tennis balls.\n", + "Therefore, Roger now has a total of 5 + 6 = <<5+6=11>>11 tennis balls.\n", + "Predicted Result: CORRECT\n", "\n", "Example 1:\n", "Question: Is the following sentence plausible? \"Joao Moutinho caught the screen pass in the NFC championship.\"\n", "Real Answer: No\n", - "Predicted Answer: No, this sentence is not plausible. Joao Moutinho is a professional soccer player, not an American football player, so it is not likely that he would be catching a screen pass in the NFC championship.\n", - "Predicted Grade: CORRECT\n", + "Predicted Answer: No, the sentence is not plausible. Joao Moutinho is not a football player, and the NFC championship is a game in American football, not soccer.\n", + "Predicted Result: CORRECT\n", "\n" ] } ], "source": [ - "for i, eg in enumerate(examples):\n", + "for i, (eval_res, eg, pred) in enumerate(zip(eval_results, examples, predictions)):\n", " print(f\"Example {i}:\")\n", " print(\"Question: \" + eg[\"question\"])\n", " print(\"Real Answer: \" + eg[\"answer\"])\n", - " print(\"Predicted Answer: \" + predictions[i][\"text\"])\n", - " print(\"Predicted Grade: \" + graded_outputs[i][\"text\"])\n", + " print(\"Predicted Answer: \" + pred[\"text\"])\n", + " print(\"Predicted Result: \" + eval_res['value'])\n", " print()" ] - }, - { - "cell_type": "markdown", - "id": "782ae8c8", - "metadata": {}, - "source": [ - "## Customize Prompt\n", - "\n", - "You can also customize the prompt that is used. Here is an example prompting it using a score from 0 to 10.\n", - "The custom prompt requires 3 input variables: \"query\", \"answer\" and \"result\". Where \"query\" is the question, \"answer\" is the ground truth answer, and \"result\" is the predicted answer." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "153425c4", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.prompts.prompt import PromptTemplate\n", - "\n", - "_PROMPT_TEMPLATE = \"\"\"You are an expert professor specialized in grading students' answers to questions.\n", - "You are grading the following question:\n", - "{query}\n", - "Here is the real answer:\n", - "{answer}\n", - "You are grading the following predicted answer:\n", - "{result}\n", - "What grade do you give from 0 to 10, where 0 is the lowest (very low similarity) and 10 is the highest (very high similarity)?\n", - "\"\"\"\n", - "\n", - "PROMPT = PromptTemplate(\n", - " input_variables=[\"query\", \"answer\", \"result\"], template=_PROMPT_TEMPLATE\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0a3b0fb7", - "metadata": {}, - "outputs": [], - "source": [ - "evalchain = QAEvalChain.from_llm(llm=llm, prompt=PROMPT)\n", - "evalchain.evaluate(\n", - " examples,\n", - " predictions,\n", - " question_key=\"question\",\n", - " answer_key=\"answer\",\n", - " prediction_key=\"text\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "cb1cf335", - "metadata": {}, - "source": [ - "## Evaluation without Ground Truth\n", - "Its possible to evaluate question answering systems without ground truth. You would need a `\"context\"` input that reflects what the information the LLM uses to answer the question. This context can be obtained by any retreival system. Here's an example of how it works:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6c59293f", - "metadata": {}, - "outputs": [], - "source": [ - "context_examples = [\n", - " {\n", - " \"question\": \"How old am I?\",\n", - " \"context\": \"I am 30 years old. I live in New York and take the train to work everyday.\",\n", - " },\n", - " {\n", - " \"question\": 'Who won the NFC championship game in 2023?\"',\n", - " \"context\": \"NFC Championship Game 2023: Philadelphia Eagles 31, San Francisco 49ers 7\",\n", - " },\n", - "]\n", - "QA_PROMPT = \"Answer the question based on the context\\nContext:{context}\\nQuestion:{question}\\nAnswer:\"\n", - "template = PromptTemplate(input_variables=[\"context\", \"question\"], template=QA_PROMPT)\n", - "qa_chain = LLMChain(llm=llm, prompt=template)\n", - "predictions = qa_chain.apply(context_examples)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "e500d0cc", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'text': 'You are 30 years old.'},\n", - " {'text': ' The Philadelphia Eagles won the NFC championship game in 2023.'}]" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "predictions" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "6d8cbc1d", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.evaluation.qa import ContextQAEvalChain\n", - "\n", - "eval_chain = ContextQAEvalChain.from_llm(llm)\n", - "graded_outputs = eval_chain.evaluate(\n", - " context_examples, predictions, question_key=\"question\", prediction_key=\"text\"\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "6c5262d0", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'text': ' CORRECT'}, {'text': ' CORRECT'}]" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "graded_outputs" - ] - }, - { - "cell_type": "markdown", - "id": "aaa61f0c", - "metadata": {}, - "source": [ - "## Comparing to other evaluation metrics\n", - "We can compare the evaluation results we get to other common evaluation metrics. To do this, let's load some evaluation metrics from HuggingFace's `evaluate` package." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "d851453b", - "metadata": {}, - "outputs": [], - "source": [ - "# Some data munging to get the examples in the right format\n", - "for i, eg in enumerate(examples):\n", - " eg[\"id\"] = str(i)\n", - " eg[\"answers\"] = {\"text\": [eg[\"answer\"]], \"answer_start\": [0]}\n", - " predictions[i][\"id\"] = str(i)\n", - " predictions[i][\"prediction_text\"] = predictions[i][\"text\"]\n", - "\n", - "for p in predictions:\n", - " del p[\"text\"]\n", - "\n", - "new_examples = examples.copy()\n", - "for eg in new_examples:\n", - " del eg[\"question\"]\n", - " del eg[\"answer\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "c38eb3e9", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "from evaluate import load\n", - "\n", - "squad_metric = load(\"squad\")\n", - "results = squad_metric.compute(\n", - " references=new_examples,\n", - " predictions=predictions,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "07d68f85", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'exact_match': 0.0, 'f1': 28.125}" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "results" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3b775150", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -432,7 +217,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.11.2" }, "vscode": { "interpreter": { diff --git a/docs/extras/guides/evaluation/examples/sql_qa_benchmarking_chinook.ipynb b/docs/extras/guides/evaluation/examples/sql_qa_benchmarking_chinook.ipynb deleted file mode 100644 index 00ac7a645fb28..0000000000000 --- a/docs/extras/guides/evaluation/examples/sql_qa_benchmarking_chinook.ipynb +++ /dev/null @@ -1,428 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "984169ca", - "metadata": {}, - "source": [ - "# SQL Question Answering Benchmarking: Chinook\n", - "\n", - "Here we go over how to benchmark performance on a question answering task over a SQL database.\n", - "\n", - "It is highly reccomended that you do any evaluation/benchmarking with tracing enabled. See [here](https://langchain.readthedocs.io/en/latest/tracing.html) for an explanation of what tracing is and how to set it up." - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "id": "44874486", - "metadata": {}, - "outputs": [], - "source": [ - "# Comment this out if you are NOT using tracing\n", - "import os\n", - "\n", - "os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\"" - ] - }, - { - "cell_type": "markdown", - "id": "0f66405e", - "metadata": {}, - "source": [ - "## Loading the data\n", - "\n", - "First, let's load the data." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "0df1393f", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "b220d07ee5d14909bc842b4545cdc0de", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Downloading readme: 0%| | 0.00/21.0 [00:00 Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3mThis question is not something that can be answered using the available tools.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3mI need to follow the correct format for answering questions.\n", + "Action: N/A\u001b[0m\n", + "Observation: Invalid Format: Missing 'Action Input:' after 'Action:'\n", + "Thought:\u001b[32;1m\u001b[1;3m\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n", + "Agent stopped due to iteration limit or time limit.\n", + "Chunk: Agent stopped due to iteration limit or time limit.\n", + "Response sent.\n" + ] + } + ], + "source": [ + "await on_message(Message(content=\"Hi AI, how are you today?\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "b850294c-7f8f-4e79-adcf-47e4e3a898df", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langsmith import Client\n", + "\n", + "client = Client()" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "6d089ddc-69bc-45a8-b8db-9962e4f1f5ee", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from itertools import islice\n", + "\n", + "runs = list(islice(client.list_runs(), 10))" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "f0349fac-5a98-400f-ba03-61ed4e1332be", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "runs = sorted(runs, key=lambda x: x.start_time, reverse=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "02f133f0-39ee-4b46-b443-12c1f9b76fff", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "ids = [run.id for run in runs]" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "3366dce4-0c38-4a7d-8111-046a58b24917", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "runs2 = list(client.list_runs(id=ids))\n", + "runs2 = sorted(runs2, key=lambda x: x.start_time, reverse=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "id": "82915b90-39a0-47d6-9121-56a13f210f52", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "['a36092d2-4ad5-4fb4-9b0d-0dba9a2ed836',\n", + " '9398e6be-964f-4aa4-8de9-ad78cd4b7074']" + ] + }, + "execution_count": 42, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "[str(x) for x in ids[:2]]" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "id": "f610ec91-dc48-4a17-91c5-5c4675c77abc", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langsmith.run_helpers import traceable\n", + "\n", + "@traceable(run_type=\"llm\", name=\"\"\"\"\"\")\n", + "def foo():\n", + " return \"bar\"" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "id": "bd317bd7-8b2a-433a-8ec3-098a84ba8e64", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'bar'" + ] + }, + "execution_count": 49, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "foo()" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "id": "b142519b-6885-415c-83b9-4a346fb90589", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.llms import AzureOpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5c50bb2b-72b8-4322-9b16-d857ecd9f347", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/snippets/modules/chains/foundational/llm_chain.mdx b/docs/snippets/modules/chains/foundational/llm_chain.mdx index ac441532a2612..ec14bbf59dd44 100644 --- a/docs/snippets/modules/chains/foundational/llm_chain.mdx +++ b/docs/snippets/modules/chains/foundational/llm_chain.mdx @@ -122,7 +122,7 @@ llm_chain.predict() -With `predict_and_parser`: +With `predict_and_parse`: ```python diff --git a/libs/langchain/langchain/evaluation/qa/generate_chain.py b/libs/langchain/langchain/evaluation/qa/generate_chain.py index 1a2bc24cd20fb..2fca06074d616 100644 --- a/libs/langchain/langchain/evaluation/qa/generate_chain.py +++ b/libs/langchain/langchain/evaluation/qa/generate_chain.py @@ -5,12 +5,22 @@ from langchain.chains.llm import LLMChain from langchain.evaluation.qa.generate_prompt import PROMPT +from langchain.output_parsers.regex import RegexParser from langchain.schema.language_model import BaseLanguageModel +from langchain.schema.output_parser import BaseLLMOutputParser +from pydantic import Field + +_QA_OUTPUT_PARSER = RegexParser( + regex=r"QUESTION: (.*?)\n+ANSWER: (.*)", output_keys=["query", "answer"] +) class QAGenerateChain(LLMChain): """LLM Chain specifically for generating examples for question answering.""" + output_parser: BaseLLMOutputParser = Field(default=_QA_OUTPUT_PARSER) + output_key: str = "qa_pairs" + @classmethod def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain: """Load QA Generate Chain from LLM.""" diff --git a/libs/langchain/langchain/evaluation/qa/generate_prompt.py b/libs/langchain/langchain/evaluation/qa/generate_prompt.py index a20defe530af9..610b12bd7096e 100644 --- a/libs/langchain/langchain/evaluation/qa/generate_prompt.py +++ b/libs/langchain/langchain/evaluation/qa/generate_prompt.py @@ -17,9 +17,6 @@ {doc} """ -output_parser = RegexParser( - regex=r"QUESTION: (.*?)\n+ANSWER: (.*)", output_keys=["query", "answer"] -) PROMPT = PromptTemplate( - input_variables=["doc"], template=template, output_parser=output_parser + input_variables=["doc"], template=template, ) From 56802dc1b4634d4ebf93c32279bc878912c64fcd Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Tue, 25 Jul 2023 14:59:52 -0700 Subject: [PATCH 02/15] lint --- libs/langchain/langchain/evaluation/qa/generate_chain.py | 3 ++- libs/langchain/langchain/evaluation/qa/generate_prompt.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/libs/langchain/langchain/evaluation/qa/generate_chain.py b/libs/langchain/langchain/evaluation/qa/generate_chain.py index 2fca06074d616..c8ebca9a0029e 100644 --- a/libs/langchain/langchain/evaluation/qa/generate_chain.py +++ b/libs/langchain/langchain/evaluation/qa/generate_chain.py @@ -3,12 +3,13 @@ from typing import Any +from pydantic import Field + from langchain.chains.llm import LLMChain from langchain.evaluation.qa.generate_prompt import PROMPT from langchain.output_parsers.regex import RegexParser from langchain.schema.language_model import BaseLanguageModel from langchain.schema.output_parser import BaseLLMOutputParser -from pydantic import Field _QA_OUTPUT_PARSER = RegexParser( regex=r"QUESTION: (.*?)\n+ANSWER: (.*)", output_keys=["query", "answer"] diff --git a/libs/langchain/langchain/evaluation/qa/generate_prompt.py b/libs/langchain/langchain/evaluation/qa/generate_prompt.py index 610b12bd7096e..aae2845f6e388 100644 --- a/libs/langchain/langchain/evaluation/qa/generate_prompt.py +++ b/libs/langchain/langchain/evaluation/qa/generate_prompt.py @@ -18,5 +18,6 @@ {doc} """ PROMPT = PromptTemplate( - input_variables=["doc"], template=template, + input_variables=["doc"], + template=template, ) From f8c452a3357410c5bdb204902ff36816eb7a4c0a Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 11:50:40 -0700 Subject: [PATCH 03/15] rm qa specific guides --- .../data_augmented_question_answering.ipynb | 475 ------------ .../examples/question_answering.ipynb | 230 ------ .../examples/state_of_the_union.txt | 723 ------------------ 3 files changed, 1428 deletions(-) delete mode 100644 docs/extras/guides/evaluation/examples/data_augmented_question_answering.ipynb delete mode 100644 docs/extras/guides/evaluation/examples/question_answering.ipynb delete mode 100644 docs/extras/guides/evaluation/examples/state_of_the_union.txt diff --git a/docs/extras/guides/evaluation/examples/data_augmented_question_answering.ipynb b/docs/extras/guides/evaluation/examples/data_augmented_question_answering.ipynb deleted file mode 100644 index 478939de41d5c..0000000000000 --- a/docs/extras/guides/evaluation/examples/data_augmented_question_answering.ipynb +++ /dev/null @@ -1,475 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "e78b7bb1", - "metadata": {}, - "source": [ - "# Data Augmented Question Answering\n", - "\n", - "This notebook uses some generic prompts/language models to evaluate an question answering system that uses other sources of data besides what is in the model. For example, this can be used to evaluate a question answering system over your proprietary data.\n", - "\n", - "The overall steps to do this are:\n", - "1. Define your chain for the Q&A system\n", - "2. Define a dataset (as a list of examples)\n", - "3. Evaluate the chain on the dataset\n", - "\n", - "## Setup\n", - "\n", - "Let's set up an example with our favorite example - the state of the union address. This will be done by:\n", - "1. Loading the text data\n", - "2. Chunking and storing data in the vectorstore\n", - "3. Creating the retriever from the vectorstore\n", - "4. Creating the Q&A chain using an LLM and retriever\n", - "\n", - "First, fetch the example data from the langchain repo." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "abd606ab", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import requests\n", - "\n", - "state_of_the_union_url = \"https://raw.githubusercontent.com/langchain-ai/langchain/76102971c056bb277bf394068c98fb05ee2fb07d/docs/extras/modules/state_of_the_union.txt\"\n", - "with open(\"state_of_the_union.txt\", \"w\") as f:\n", - " f.write(requests.get(state_of_the_union_url).text)" - ] - }, - { - "cell_type": "markdown", - "id": "7b37ee26-cfa8-4a14-9c77-e6393eeed94e", - "metadata": {}, - "source": [ - "#### Chunk the text data\n", - "\n", - "Use the `CharacterTextSplitter` to chunk the text data using naive character-length splitting." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "185406b0-204b-479b-bc16-a0192a27b3cb", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from langchain.document_loaders import TextLoader\n", - "from langchain.text_splitter import CharacterTextSplitter\n", - "\n", - "loader = TextLoader(\"state_of_the_union.txt\")\n", - "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", - "texts = text_splitter.split_documents(loader.load())" - ] - }, - { - "cell_type": "markdown", - "id": "0f4419eb-f00c-4367-89a5-8208f19e2cae", - "metadata": {}, - "source": [ - "#### Create Retriever\n", - "\n", - "Select the `embeddings` to use for vectorizing the text chunks, and select the vectorstore to drive the retriever used for question answering." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "5e740bb9-9d5f-49c6-8f91-a0a1c42bc900", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import Chroma\n", - "\n", - "\n", - "embeddings = OpenAIEmbeddings()\n", - "docsearch = Chroma.from_documents(texts, embeddings)\n", - "retriever = docsearch.as_retriever()" - ] - }, - { - "cell_type": "markdown", - "id": "bc159f62-d1b7-4307-8e82-d2bcd192052d", - "metadata": {}, - "source": [ - "#### Create QA Chain\n", - "\n", - "We will use GPT turbo for this example." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "4fdc211d", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains import RetrievalQA\n", - "\n", - "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", - "qa = RetrievalQA.from_llm(\n", - " llm=llm,\n", - " retriever=retriever,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "30fd72f2", - "metadata": {}, - "source": [ - "## Examples\n", - "\n", - "Now we need some examples to evaluate. There are two basic ways to do this:\n", - "\n", - "1. Hard code some examples ourselves\n", - "2. Generate examples automatically, using a language model\n", - "\n", - "If you have example data from prior usage, this is often the best. When you're just starting out, you can bootstrap a dataset using the `QAGenerationChain` or your own custom `LLMChain`." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "3459b001", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Hard-coded examples\n", - "examples = [\n", - " {\n", - " \"query\": \"What did the president say about Ketanji Brown Jackson\",\n", - " \"answer\": \"He praised her legal ability and said he nominated her for the supreme court.\",\n", - " },\n", - " {\"query\": \"What did the president say about Michael Jackson\", \"answer\": \"Nothing\"},\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "b9c3fa75", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Generated examples\n", - "from langchain.evaluation.qa import QAGenerateChain\n", - "\n", - "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", - "example_gen_chain = QAGenerateChain.from_llm(llm)" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "c24543a9", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "new_examples = [\n", - " ex[example_gen_chain.output_key] for ex in example_gen_chain.apply([{\"doc\": t} for t in texts[:5]])\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "558da6f3", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Combine examples\n", - "examples += new_examples" - ] - }, - { - "cell_type": "markdown", - "id": "443dc34e", - "metadata": {}, - "source": [ - "## Evaluate\n", - "\n", - "Now that we have examples, it's time to evaluate the chain. Generate predictions and then use an evaluator to grade its performance." - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "782169a5", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "predictions = qa.apply(examples)" - ] - }, - { - "cell_type": "markdown", - "id": "b7485d36-aafc-40c1-b335-0f67b43ff052", - "metadata": { - "tags": [] - }, - "source": [ - "Use the [qa](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.qa.eval_chain.QAEvalChain.html#langchain.evaluation.qa.eval_chain.QAEvalChain) evaluator to grade correctness of the question answering chain. For more information on evaluators, check out the [reference docs](https://api.python.langchain.com/en/latest/api_reference.html#module-langchain.evaluation)" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "id": "f718e437-f410-4d95-a288-73fe01cb5822", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from langchain.evaluation import load_evaluator\n", - "\n", - "qa_evaluator = load_evaluator(\"qa\")" - ] - }, - { - "cell_type": "markdown", - "id": "eb4d739c-2169-40ba-b19b-44d27d900788", - "metadata": {}, - "source": [ - "***Use the `tabulate` package for pretty printing the results.***" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0c6dfbfc-eb2a-4356-a91b-76e8494ee794", - "metadata": {}, - "outputs": [], - "source": [ - "# %pip install tabulate" - ] - }, - { - "cell_type": "code", - "execution_count": 45, - "id": "32fac2dc", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from tqdm import tqdm\n", - "from tabulate import tabulate\n", - "\n", - "def truncate(s, n):\n", - " \"\"\"Truncate `s` to `n` characters.\"\"\"\n", - " return (s[:n] + '..') if len(s) > n else s\n", - "\n", - "def print_results(examples, predictions, evaluators):\n", - " max_length = 80\n", - " table = [(\"Example\", \"Evaluator\", \"Value\", \"Score\", \"Query\", \"Prediction\", \"Answer\")]\n", - " for i, (eg, pred) in tqdm(enumerate(zip(examples, predictions))):\n", - " for evaluator in evaluators:\n", - " verdict = evaluator.evaluate_strings(\n", - " input=eg['query'],\n", - " prediction=pred['result'],\n", - " reference=eg['answer'],\n", - " )\n", - " table.append(\n", - " (f\"{i}\",\n", - " f\"{evaluator.evaluation_name}\",\n", - " f\"{verdict['value']}\",\n", - " f\"{verdict['score']}\",\n", - " f\"{truncate(eg['query'], max_length)}\",\n", - " f\"{truncate(pred['result'], max_length)}\",\n", - " f\"{truncate(eg['answer'], max_length)}\")\n", - " )\n", - " print(tabulate(table, headers=\"firstrow\", tablefmt='grid'))\n" - ] - }, - { - "cell_type": "code", - "execution_count": 46, - "id": "7722b9e6-c915-4c8a-9c65-38c8778011fe", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| Example | Evaluator | Value | Score | Query | Prediction | Answer |\n", - "+===========+=============+===========+=========+====================================================================================+====================================================================================+====================================================================================+\n", - "| 0 | correctness | CORRECT | 1 | What did the president say about Ketanji Brown Jackson | The president said that Ketanji Brown Jackson is one of our nation's top legal m.. | He praised her legal ability and said he nominated her for the supreme court. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 1 | correctness | CORRECT | 1 | What did the president say about Michael Jackson | There is no mention of Michael Jackson in the provided context. | Nothing |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 2 | correctness | CORRECT | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 3 | correctness | CORRECT | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 4 | correctness | CORRECT | 1 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 5 | correctness | CORRECT | 1 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 6 | correctness | INCORRECT | 0 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 7 | correctness | CORRECT | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 8 | correctness | CORRECT | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 9 | correctness | CORRECT | 1 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 10 | correctness | CORRECT | 1 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 11 | correctness | INCORRECT | 0 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", - "+-----------+-------------+-----------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n" - ] - } - ], - "source": [ - "print_results(examples, predictions, [qa_evaluator])" - ] - }, - { - "cell_type": "markdown", - "id": "50a9e845", - "metadata": {}, - "source": [ - "## Evaluate with Other Metrics\n", - "\n", - "In addition to predicting whether the answer is correct or incorrect using a language model, we can also use other evalutors, such as the [labeled_criteria](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.criteria.eval_chain.LabeledCriteriaEvalChain.html#langchain.evaluation.criteria.eval_chain.LabeledCriteriaEvalChain) evaluator.\n", - "\n", - "Let's evaluate based on conciseness and a custom 'pedagogical skill'." - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "id": "0cb558f6-449c-4f2b-9b14-31868581473a", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "evaluators = [\n", - " load_evaluator(\"labeled_criteria\", criteria=\"conciseness\"),\n", - " load_evaluator(\"labeled_criteria\", criteria={\n", - " \"pedagogical skill\": \"Did the submission propertly interpret inquiries, generate informative and understandable responses,\"\n", - " \" and present information in a manner that promotes strong thinking and problem-solving.\"\n", - " }),\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 47, - "id": "11c1b474-90ce-49c5-8f4a-6ebca5669438", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| Example | Evaluator | Value | Score | Query | Prediction | Answer |\n", - "+===========+===================+=========+=========+====================================================================================+====================================================================================+====================================================================================+\n", - "| 0 | conciseness | Y | 1 | What did the president say about Ketanji Brown Jackson | The president said that Ketanji Brown Jackson is one of our nation's top legal m.. | He praised her legal ability and said he nominated her for the supreme court. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 0 | pedagogical skill | Y | 1 | What did the president say about Ketanji Brown Jackson | The president said that Ketanji Brown Jackson is one of our nation's top legal m.. | He praised her legal ability and said he nominated her for the supreme court. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 1 | conciseness | Y | 1 | What did the president say about Michael Jackson | There is no mention of Michael Jackson in the provided context. | Nothing |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 1 | pedagogical skill | N | 0 | What did the president say about Michael Jackson | There is no mention of Michael Jackson in the provided context. | Nothing |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 2 | conciseness | Y | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 2 | pedagogical skill | Y | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 3 | conciseness | Y | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 3 | pedagogical skill | Y | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 4 | conciseness | Y | 1 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 4 | pedagogical skill | Y | 1 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 5 | conciseness | N | 0 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 5 | pedagogical skill | Y | 1 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 6 | conciseness | N | 0 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 6 | pedagogical skill | Y | 1 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 7 | conciseness | Y | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 7 | pedagogical skill | Y | 1 | Who did Russia's Vladimir Putin seek to shake the foundations of? | Russia's Vladimir Putin sought to shake the foundations of the free world. | Russia's Vladimir Putin sought to shake the foundations of the free world. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 8 | conciseness | Y | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 8 | pedagogical skill | Y | 1 | According to the document, why was the NATO Alliance created? | The NATO Alliance was created to secure peace and stability in Europe after Worl.. | The NATO Alliance was created to secure peace and stability in Europe after Worl.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 9 | conciseness | N | 0 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 9 | pedagogical skill | Y | 1 | What actions did the United States take to confront Putin's aggression against U.. | The United States took several actions to confront Putin's aggression against Uk.. | The United States spent months building a coalition of other freedom-loving nati.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 10 | conciseness | N | 0 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 10 | pedagogical skill | Y | 1 | What actions is the United States taking to put pressure on Russia and support U.. | According to the document, the United States is taking several actions to put pr.. | The United States is enforcing powerful economic sanctions, cutting off Russia's.. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 11 | conciseness | N | 0 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n", - "| 11 | pedagogical skill | Y | 1 | According to the document, what actions will the United States take to further i.. | According to the document, the United States will take the following actions to .. | The United States will join its allies in closing off American air space to all .. |\n", - "+-----------+-------------------+---------+---------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+\n" - ] - } - ], - "source": [ - "print_results(examples, predictions, evaluators)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/extras/guides/evaluation/examples/question_answering.ipynb b/docs/extras/guides/evaluation/examples/question_answering.ipynb deleted file mode 100644 index 150be6c9a520c..0000000000000 --- a/docs/extras/guides/evaluation/examples/question_answering.ipynb +++ /dev/null @@ -1,230 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "480b7cf8", - "metadata": {}, - "source": [ - "# Question Answering\n", - "\n", - "This notebook covers how to evaluate generic question answering problems. This is a situation where you have an example containing a question and its corresponding ground truth answer, and you want to measure how well the language model does at answering those questions." - ] - }, - { - "cell_type": "markdown", - "id": "78e3023b", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "For demonstration purposes, we will just evaluate a simple question answering system that only evaluates the model's internal knowledge. Please see the [Data Augmented Question Answering](data_augmented_qa.ipynb) guide for an examples evaluating a Q&A system over data sources." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "96710d50", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n", - "from langchain.chains import LLMChain\n", - "from langchain.chat_models import ChatOpenAI" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "e33ccf00", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "prompt = ChatPromptTemplate.from_messages(\n", - " [\n", - " SystemMessagePromptTemplate.from_template(\"You are a helpful AI assistant.\"),\n", - " HumanMessagePromptTemplate.from_template(\"{question}\"),\n", - " ]\n", - ")\n", - "\n", - "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", - "\n", - "chain = LLMChain(llm=llm, prompt=prompt)" - ] - }, - { - "cell_type": "markdown", - "id": "0c584440", - "metadata": {}, - "source": [ - "## Examples\n", - "For this purpose, we will just use two simple hardcoded examples, but see other notebooks for tips on how to get and/or generate these examples." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "87de1d84", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "examples = [\n", - " {\n", - " \"question\": \"Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\",\n", - " \"answer\": \"11\",\n", - " },\n", - " {\n", - " \"question\": 'Is the following sentence plausible? \"Joao Moutinho caught the screen pass in the NFC championship.\"',\n", - " \"answer\": \"No\",\n", - " },\n", - "]" - ] - }, - { - "cell_type": "markdown", - "id": "143b1155", - "metadata": {}, - "source": [ - "## Predictions\n", - "\n", - "We can now make and inspect the predictions for these questions." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "c7bd809c", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Roger initially has 5 tennis balls. He buys 2 cans of tennis balls, and each can has 3 tennis balls. So, he has 2 * 3 = <<2*3=6>>6 additional tennis balls.\n", - "Therefore, Roger now has a total of 5 + 6 = <<5+6=11>>11 tennis balls.\n", - "\n", - "No, the sentence is not plausible. Joao Moutinho is not a football player, and the NFC championship is a game in American football, not soccer.\n" - ] - } - ], - "source": [ - "predictions = chain.apply(examples)\n", - "print(\"\\n\\n\".join([pred['text'] for pred in predictions]))" - ] - }, - { - "cell_type": "markdown", - "id": "45cc2f9d", - "metadata": {}, - "source": [ - "## Evaluation\n", - "\n", - "We can see that if we tried to just do exact match on the answer answers (`11` and `No`) they would not match what the language model answered. However, semantically the language model is correct in both cases. In order to account for this, we can use a language model itself to evaluate the answers." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "0cacc65a", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from langchain.evaluation import load_evaluator\n", - "\n", - "evaluator = load_evaluator(\"qa\")" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "5aa6cd65", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "eval_results = [\n", - " evaluator.evaluate_strings(\n", - " input=eg['question'],\n", - " prediction=pred['text'],\n", - " reference=eg['answer'],\n", - " )\n", - " for eg, pred in zip(examples, predictions)\n", - "] " - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "63780020", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Example 0:\n", - "Question: Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\n", - "Real Answer: 11\n", - "Predicted Answer: Roger initially has 5 tennis balls. He buys 2 cans of tennis balls, and each can has 3 tennis balls. So, he has 2 * 3 = <<2*3=6>>6 additional tennis balls.\n", - "Therefore, Roger now has a total of 5 + 6 = <<5+6=11>>11 tennis balls.\n", - "Predicted Result: CORRECT\n", - "\n", - "Example 1:\n", - "Question: Is the following sentence plausible? \"Joao Moutinho caught the screen pass in the NFC championship.\"\n", - "Real Answer: No\n", - "Predicted Answer: No, the sentence is not plausible. Joao Moutinho is not a football player, and the NFC championship is a game in American football, not soccer.\n", - "Predicted Result: CORRECT\n", - "\n" - ] - } - ], - "source": [ - "for i, (eval_res, eg, pred) in enumerate(zip(eval_results, examples, predictions)):\n", - " print(f\"Example {i}:\")\n", - " print(\"Question: \" + eg[\"question\"])\n", - " print(\"Real Answer: \" + eg[\"answer\"])\n", - " print(\"Predicted Answer: \" + pred[\"text\"])\n", - " print(\"Predicted Result: \" + eval_res['value'])\n", - " print()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.2" - }, - "vscode": { - "interpreter": { - "hash": "53f3bc57609c7a84333bb558594977aa5b4026b1d6070b93987956689e367341" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/extras/guides/evaluation/examples/state_of_the_union.txt b/docs/extras/guides/evaluation/examples/state_of_the_union.txt deleted file mode 100644 index d50175de40e70..0000000000000 --- a/docs/extras/guides/evaluation/examples/state_of_the_union.txt +++ /dev/null @@ -1,723 +0,0 @@ -Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. - -Last year COVID-19 kept us apart. This year we are finally together again. - -Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. - -With a duty to one another to the American people to the Constitution. - -And with an unwavering resolve that freedom will always triumph over tyranny. - -Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. - -He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. - -He met the Ukrainian people. - -From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. - -Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. - -In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. - -Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. - -Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. - -Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. - -They keep moving. - -And the costs and the threats to America and the world keep rising. - -That’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. - -The United States is a member along with 29 other nations. - -It matters. American diplomacy matters. American resolve matters. - -Putin’s latest attack on Ukraine was premeditated and unprovoked. - -He rejected repeated efforts at diplomacy. - -He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did. - -We prepared extensively and carefully. - -We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. - -I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression. - -We countered Russia’s lies with truth. - -And now that he has acted the free world is holding him accountable. - -Along with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. - -We are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. - -Together with our allies –we are right now enforcing powerful economic sanctions. - -We are cutting off Russia’s largest banks from the international financial system. - -Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. - -We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. - -Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. - -The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. - -We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. - -And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. - -The Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. - -Together with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. - -We are giving more than $1 Billion in direct assistance to Ukraine. - -And we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering. - -Let me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine. - -Our forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west. - -For that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. - -As I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power. - -And we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them. - -Putin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run. - -And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. - -To all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. - -And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. - -Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. - -America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. - -These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. - -But I want you to know that we are going to be okay. - -When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. - -While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. - -We see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine. - -In the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. - -This is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. - -To our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. - -Putin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. - -He will never extinguish their love of freedom. He will never weaken the resolve of the free world. - -We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. - -The pandemic has been punishing. - -And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. - -I understand. - -I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. - -That’s why one of the first things I did as President was fight to pass the American Rescue Plan. - -Because people were hurting. We needed to act, and we did. - -Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis. - -It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. - -Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. - -And as my Dad used to say, it gave people a little breathing room. - -And unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. - -And it worked. It created jobs. Lots of jobs. - -In fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year -than ever before in the history of America. - -Our economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long. - -For the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. - -But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. - -Vice President Harris and I ran for office with a new economic vision for America. - -Invest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up -and the middle out, not from the top down. - -Because we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. - -America used to have the best roads, bridges, and airports on Earth. - -Now our infrastructure is ranked 13th in the world. - -We won’t be able to compete for the jobs of the 21st Century if we don’t fix that. - -That’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. - -This was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. - -We’re done talking about infrastructure weeks. - -We’re going to have an infrastructure decade. - -It is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China. - -As I’ve told Xi Jinping, it is never a good bet to bet against the American people. - -We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. - -And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. - -We’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. - -4,000 projects have already been announced. - -And tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. - -When we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs. - -The federal government spends about $600 Billion a year to keep the country safe and secure. - -There’s been a law on the books for almost a century -to make sure taxpayers’ dollars support American jobs and businesses. - -Every Administration says they’ll do it, but we are actually doing it. - -We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. - -But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors. - -That’s why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing. - -Let me give you one example of why it’s so important to pass it. - -If you travel 20 miles east of Columbus, Ohio, you’ll find 1,000 empty acres of land. - -It won’t look like much, but if you stop and look closely, you’ll see a “Field of dreams,” the ground on which America’s future will be built. - -This is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”. - -Up to eight state-of-the-art factories in one place. 10,000 new good-paying jobs. - -Some of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives. - -Smartphones. The Internet. Technology we have yet to invent. - -But that’s just the beginning. - -Intel’s CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from -$20 billion to $100 billion. - -That would be one of the biggest investments in manufacturing in American history. - -And all they’re waiting for is for you to pass this bill. - -So let’s not wait any longer. Send it to my desk. I’ll sign it. - -And we will really take off. - -And Intel is not alone. - -There’s something happening in America. - -Just look around and you’ll see an amazing story. - -The rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing. - -Companies are choosing to build new factories here, when just a few years ago, they would have built them overseas. - -That’s what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country. - -GM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan. - -All told, we created 369,000 new manufacturing jobs in America just last year. - -Powered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. - -As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” - -It’s time. - -But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. - -Inflation is robbing them of the gains they might otherwise feel. - -I get it. That’s why my top priority is getting prices under control. - -Look, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories. - -The pandemic also disrupted global supply chains. - -When factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up. - -Look at cars. - -Last year, there weren’t enough semiconductors to make all the cars that people wanted to buy. - -And guess what, prices of automobiles went up. - -So—we have a choice. - -One way to fight inflation is to drive down wages and make Americans poorer. - -I have a better plan to fight inflation. - -Lower your costs, not your wages. - -Make more cars and semiconductors in America. - -More infrastructure and innovation in America. - -More goods moving faster and cheaper in America. - -More jobs where you can earn a good living in America. - -And instead of relying on foreign supply chains, let’s make it in America. - -Economists call it “increasing the productive capacity of our economy.” - -I call it building a better America. - -My plan to fight inflation will lower your costs and lower the deficit. - -17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan: - -First – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis. - -He and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make. - -But drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshua’s mom. - -Imagine what it’s like to look at your child who needs insulin and have no idea how you’re going to pay for it. - -What it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be. - -Joshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy. - -For Joshua, and for the 200,000 other young people with Type 1 diabetes, let’s cap the cost of insulin at $35 a month so everyone can afford it. - -Drug companies will still do very well. And while we’re at it let Medicare negotiate lower prices for prescription drugs, like the VA already does. - -Look, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Let’s close the coverage gap and make those savings permanent. - -Second – cut energy costs for families an average of $500 a year by combatting climate change. - -Let’s provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double America’s clean energy production in solar, wind, and so much more; lower the price of electric vehicles, saving you another $80 a month because you’ll never have to pay at the gas pump again. - -Third – cut the cost of child care. Many families pay up to $14,000 a year for child care per child. - -Middle-class and working families shouldn’t have to pay more than 7% of their income for care of young children. - -My plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldn’t afford child care, to be able to get back to work. - -My plan doesn’t stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old. - -All of these will lower costs. - -And under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody. - -The one thing all Americans agree on is that the tax system is not fair. We have to fix it. - -I’m not looking to punish anyone. But let’s make sure corporations and the wealthiest Americans start paying their fair share. - -Just last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax. - -That’s simply not fair. That’s why I’ve proposed a 15% minimum tax rate for corporations. - -We got more than 130 countries to agree on a global minimum tax rate so companies can’t get out of paying their taxes at home by shipping jobs and factories overseas. - -That’s why I’ve proposed closing loopholes so the very wealthy don’t pay a lower tax rate than a teacher or a firefighter. - -So that’s my plan. It will grow the economy and lower costs for families. - -So what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation. - -My plan will not only lower costs to give families a fair shot, it will lower the deficit. - -The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. - -But in my administration, the watchdogs have been welcomed back. - -We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans. - -And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. - -By the end of this year, the deficit will be down to less than half what it was before I took office. - -The only president ever to cut the deficit by more than one trillion dollars in a single year. - -Lowering your costs also means demanding more competition. - -I’m a capitalist, but capitalism without competition isn’t capitalism. - -It’s exploitation—and it drives up prices. - -When corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under. - -We see it happening with ocean carriers moving goods in and out of America. - -During the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits. - -Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. - -And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. - -That ends on my watch. - -Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. - -We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. - -Let’s pass the Paycheck Fairness Act and paid leave. - -Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. - -Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. - -And let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped. - -When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. - -For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. - -And I know you’re tired, frustrated, and exhausted. - -But I also know this. - -Because of the progress we’ve made, because of your resilience and the tools we have, tonight I can say -we are moving forward safely, back to more normal routines. - -We’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July. - -Just a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. - -Under these new guidelines, most Americans in most of the country can now be mask free. - -And based on the projections, more of the country will reach that point across the next couple of weeks. - -Thanks to the progress we have made this past year, COVID-19 need no longer control our lives. - -I know some are talking about “living with COVID-19”. Tonight – I say that we will never just accept living with COVID-19. - -We will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard. - -Here are four common sense steps as we move forward safely. - -First, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If you’re vaccinated and boosted you have the highest degree of protection. - -We will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children. - -The scientists are working hard to get that done and we’ll be ready with plenty of vaccines when they do. - -We’re also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%. - -We’ve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month. - -And we’re launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if they’re positive, receive antiviral pills on the spot at no cost. - -If you’re immunocompromised or have some other vulnerability, we have treatments and free high-quality masks. - -We’re leaving no one behind or ignoring anyone’s needs as we move forward. - -And on testing, we have made hundreds of millions of tests available for you to order for free. - -Even if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week. - -Second – we must prepare for new variants. Over the past year, we’ve gotten much better at detecting new variants. - -If necessary, we’ll be able to deploy new vaccines within 100 days instead of many more months or years. - -And, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed. - -I cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does. - -Third – we can end the shutdown of schools and businesses. We have the tools we need. - -It’s time for Americans to get back to work and fill our great downtowns again. People working from home can feel safe to begin to return to the office. - -We’re doing that here in the federal government. The vast majority of federal workers will once again work in person. - -Our schools are open. Let’s keep it that way. Our kids need to be in school. - -And with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. - -We achieved this because we provided free vaccines, treatments, tests, and masks. - -Of course, continuing this costs money. - -I will soon send Congress a request. - -The vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly. - -Fourth, we will continue vaccinating the world. - -We’ve sent 475 Million vaccine doses to 112 countries, more than any other nation. - -And we won’t stop. - -We have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. - -Let’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. - -Let’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. - -We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. - -I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. - -They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. - -Officer Mora was 27 years old. - -Officer Rivera was 22. - -Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. - -I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. - -I’ve worked on these issues a long time. - -I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. - -So let’s not abandon our streets. Or choose between safety and equal justice. - -Let’s come together to protect our communities, restore trust, and hold law enforcement accountable. - -That’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. - -That’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope. - -We should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. - -I ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe. - -And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. - -And I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? - -Ban assault weapons and high-capacity magazines. - -Repeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. - -These laws don’t infringe on the Second Amendment. They save lives. - -The most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. - -In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. - -We cannot let this happen. - -Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. - -Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. - -One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. - -And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. - -A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. - -And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. - -We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. - -We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. - -We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. - -We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. - -We can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours. - -Provide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers. - -Revise our laws so businesses have the workers they need and families don’t wait decades to reunite. - -It’s not only the right thing to do—it’s the economically smart thing to do. - -That’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce. - -Let’s get it done once and for all. - -Advancing liberty and justice also requires protecting the rights of women. - -The constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. - -If we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America. - -And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. - -As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. - -While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. - -And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. - -So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. - -First, beat the opioid epidemic. - -There is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery. - -Get rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers. - -If you’re suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery. - -Second, let’s take on mental health. Especially among our children, whose lives and education have been turned upside down. - -The American Rescue Plan gave schools money to hire teachers and help students make up for lost learning. - -I urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor. - -Children were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media. - -As Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit. - -It’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children. - -And let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care. - -Third, support our veterans. - -Veterans are the best of us. - -I’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. - -My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. - -Our troops in Iraq and Afghanistan faced many dangers. - -One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. - -When they came home, many of the world’s fittest and best trained warriors were never the same. - -Headaches. Numbness. Dizziness. - -A cancer that would put them in a flag-draped coffin. - -I know. - -One of those soldiers was my son Major Beau Biden. - -We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. - -But I’m committed to finding out everything we can. - -Committed to military families like Danielle Robinson from Ohio. - -The widow of Sergeant First Class Heath Robinson. - -He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. - -Stationed near Baghdad, just yards from burn pits the size of football fields. - -Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter. - -But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. - -Danielle says Heath was a fighter to the very end. - -He didn’t know how to stop fighting, and neither did she. - -Through her pain she found purpose to demand we do better. - -Tonight, Danielle—we are. - -The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. - -And tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers. - -I’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. - -And fourth, let’s end cancer as we know it. - -This is personal to me and Jill, to Kamala, and to so many of you. - -Cancer is the #2 cause of death in America–second only to heart disease. - -Last month, I announced our plan to supercharge -the Cancer Moonshot that President Obama asked me to lead six years ago. - -Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases. - -More support for patients and families. - -To get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. - -It’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. - -ARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. - -A unity agenda for the nation. - -We can do this. - -My fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. - -In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. - -We have fought for freedom, expanded liberty, defeated totalitarianism and terror. - -And built the strongest, freest, and most prosperous nation the world has ever known. - -Now is the hour. - -Our moment of responsibility. - -Our test of resolve and conscience, of history itself. - -It is in this moment that our character is formed. Our purpose is found. Our future is forged. - -Well I know this nation. - -We will meet the test. - -To protect freedom and liberty, to expand fairness and opportunity. - -We will save democracy. - -As hard as these times have been, I am more optimistic about America today than I have been my whole life. - -Because I see the future that is within our grasp. - -Because I know there is simply nothing beyond our capacity. - -We are the only nation on Earth that has always turned every crisis we have faced into an opportunity. - -The only nation that can be defined by a single word: possibilities. - -So on this night, in our 245th year as a nation, I have come to report on the State of the Union. - -And my report is this: the State of the Union is strong—because you, the American people, are strong. - -We are stronger today than we were a year ago. - -And we will be stronger a year from now than we are today. - -Now is our moment to meet and overcome the challenges of our time. - -And we will, as one people. - -One America. - -The United States of America. - -May God bless you all. May God protect our troops. \ No newline at end of file From a663f6893218cbc428c24de3a65f0fe8aa6f88df Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 12:04:33 -0700 Subject: [PATCH 04/15] add output description --- .../trajectory/trajectory_eval.ipynb | 20 +++++++++ .../agents/trajectory_eval_chain.py | 41 ++++++++----------- 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/docs/extras/guides/evaluation/trajectory/trajectory_eval.ipynb b/docs/extras/guides/evaluation/trajectory/trajectory_eval.ipynb index f7c12411f758c..ba49f846c081e 100644 --- a/docs/extras/guides/evaluation/trajectory/trajectory_eval.ipynb +++ b/docs/extras/guides/evaluation/trajectory/trajectory_eval.ipynb @@ -30,6 +30,26 @@ "evaluator = load_evaluator(\"trajectory\")" ] }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "b1c64c1a", + "metadata": {}, + "source": [ + "## Methods\n", + "\n", + "\n", + "The Agent Trajectory Evaluators are used with the [evaluate_agent_trajectory](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.evaluate_agent_trajectory) (and async [aevaluate_agent_trajectory](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.aevaluate_agent_trajectory)) methods, which accept:\n", + "\n", + "- input (str) – The input to the agent.\n", + "- prediction (str) – The final predicted response.\n", + "- agent_trajectory (List[Tuple[AgentAction, str]]) – The intermediate steps forming the agent trajectory\n", + "\n", + "They return a dictionary with the following values:\n", + "- score: Float from 0 to 1, where 1 would mean \"most effective\" and 0 would mean \"least effective\"\n", + "- reasoning: String \"chain of thought reasoning\" from the LLM generated prior to creating the score" + ] + }, { "cell_type": "markdown", "id": "e733562c-4c17-4942-9647-acfc5ebfaca2", diff --git a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py index 63b8fb617ef84..dc2f5a0258012 100644 --- a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py +++ b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py @@ -5,7 +5,17 @@ chain (LLMChain) to generate the reasoning and scores. """ -from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union +from typing import ( + Any, + Dict, + List, + Optional, + Sequence, + Tuple, + TypedDict, + Union, + cast, +) from pydantic import Extra, Field @@ -26,7 +36,7 @@ from langchain.tools.base import BaseTool -class TrajectoryEval(NamedTuple): +class TrajectoryEval(TypedDict): """A named tuple containing the score and reasoning for a trajectory.""" score: float @@ -129,8 +139,8 @@ def geography_answers(country: str, question: str) -> str: default_factory=TrajectoryOutputParser ) """The output parser used to parse the output.""" - return_reasoning: bool = False - """Whether to return the reasoning along with the score.""" + return_reasoning: bool = False # :meta private: + """DEPRECATED. Reasoning always returned.""" class Config: """Configuration for the QAEvalChain.""" @@ -210,7 +220,6 @@ def from_llm( llm: BaseLanguageModel, agent_tools: Optional[Sequence[BaseTool]] = None, output_parser: Optional[TrajectoryOutputParser] = None, - return_reasoning: bool = True, **kwargs: Any, ) -> "TrajectoryEvalChain": """Create a TrajectoryEvalChain object from a language model chain. @@ -221,9 +230,6 @@ def from_llm( available to the agent. output_parser (Optional[TrajectoryOutputParser]): The output parser used to parse the chain output into a score. - return_reasoning (bool): Whether to return the - reasoning along with the score. - Returns: TrajectoryEvalChain: The TrajectoryEvalChain object. """ @@ -238,7 +244,6 @@ def from_llm( eval_chain = LLMChain(llm=llm, prompt=prompt) return cls( agent_tools=agent_tools, - return_reasoning=return_reasoning, eval_chain=eval_chain, output_parser=output_parser or TrajectoryOutputParser(), **kwargs, @@ -260,9 +265,7 @@ def output_keys(self) -> List[str]: Returns: List[str]: The output keys. """ - if self.return_reasoning: - return ["score", "reasoning"] - return ["score"] + return ["score", "reasoning"] def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]: """Validate and prep inputs.""" @@ -292,12 +295,7 @@ def _call( raw_output = self.eval_chain.run( chain_input, callbacks=_run_manager.get_child() ) - parsed_output = self.output_parser.parse(raw_output) - - if self.return_reasoning: - return {"score": parsed_output.score, "reasoning": parsed_output.reasoning} - - return {"score": parsed_output.score} + return cast(dict, self.output_parser.parse(raw_output)) async def _acall( self, @@ -321,12 +319,7 @@ async def _acall( raw_output = await self.eval_chain.arun( chain_input, callbacks=_run_manager.get_child() ) - parsed_output = self.output_parser.parse(raw_output) - - if self.return_reasoning: - return {"score": parsed_output.score, "reasoning": parsed_output.reasoning} - - return {"score": parsed_output.score} + return cast(dict, self.output_parser.parse(raw_output)) def _evaluate_agent_trajectory( self, From e838eaca4f0c1cd876ce601b336c84cf40d00881 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 12:25:08 -0700 Subject: [PATCH 05/15] Update agent trajectory --- .../guides/evaluation/trajectory/custom.ipynb | 2 +- .../trajectory/trajectory_eval.ipynb | 27 +++++++++---------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/docs/extras/guides/evaluation/trajectory/custom.ipynb b/docs/extras/guides/evaluation/trajectory/custom.ipynb index c223a55d52e5d..a1a5b09c4a6d9 100644 --- a/docs/extras/guides/evaluation/trajectory/custom.ipynb +++ b/docs/extras/guides/evaluation/trajectory/custom.ipynb @@ -67,7 +67,7 @@ "id": "297dea4b-fb28-4292-b6e0-1c769cfb9cbd", "metadata": {}, "source": [ - "The example above will return a score of 1 if the language model predicts that any of the actions were unnecessary, and it returns a score of 0 if all of them were predicted to be necessary.\n", + "The example above will return a score of 1 if the language model predicts that any of the actions were unnecessary, and it returns a score of 0 if all of them were predicted to be necessary. It returns the string 'decision' as the 'value', and includes the rest of the generated text as 'reasoning' to let you audit the decision.\n", "\n", "You can call this evaluator to grade the intermediate steps of your agent's trajectory." ] diff --git a/docs/extras/guides/evaluation/trajectory/trajectory_eval.ipynb b/docs/extras/guides/evaluation/trajectory/trajectory_eval.ipynb index ba49f846c081e..a758ca9253642 100644 --- a/docs/extras/guides/evaluation/trajectory/trajectory_eval.ipynb +++ b/docs/extras/guides/evaluation/trajectory/trajectory_eval.ipynb @@ -31,7 +31,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "b1c64c1a", "metadata": {}, @@ -72,11 +71,13 @@ "outputs": [], "source": [ "import os\n", + "import subprocess\n", + "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools import tool\n", "from langchain.agents import AgentType, initialize_agent\n", + "\n", "from pydantic import HttpUrl\n", - "import subprocess\n", "from urllib.parse import urlparse\n", "\n", "\n", @@ -137,17 +138,11 @@ "tags": [] }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Type not serializable\n" - ] - }, { "data": { "text/plain": [ - "1.0" + "{'score': 1.0,\n", + " 'reasoning': \"i. The final answer is helpful. It directly answers the user's question about the latency for the website https://langchain.com.\\n\\nii. The AI language model uses a logical sequence of tools to answer the question. It uses the 'ping' tool to measure the latency of the website, which is the correct tool for this task.\\n\\niii. The AI language model uses the tool in a helpful way. It inputs the URL into the 'ping' tool and correctly interprets the output to provide the latency in milliseconds.\\n\\niv. The AI language model does not use too many steps to answer the question. It only uses one step, which is appropriate for this type of question.\\n\\nv. The appropriate tool is used to answer the question. The 'ping' tool is the correct tool to measure website latency.\\n\\nGiven these considerations, the AI language model's performance is excellent. It uses the correct tool, interprets the output correctly, and provides a helpful and direct answer to the user's question.\"}" ] }, "execution_count": 3, @@ -161,7 +156,7 @@ " input=result[\"input\"],\n", " agent_trajectory=result[\"intermediate_steps\"],\n", ")\n", - "evaluation_result[\"score\"]" + "evaluation_result" ] }, { @@ -213,7 +208,8 @@ { "data": { "text/plain": [ - "1.0" + "{'score': 1.0,\n", + " 'reasoning': \"Here is my detailed evaluation of the AI's response:\\n\\ni. The final answer is helpful, as it directly provides the latency measurement for the requested website.\\n\\nii. The sequence of using the ping tool to measure latency is logical for this question.\\n\\niii. The ping tool is used in a helpful way, with the website URL provided as input and the output latency measurement extracted.\\n\\niv. Only one step is used, which is appropriate for simply measuring latency. More steps are not needed.\\n\\nv. The ping tool is an appropriate choice to measure latency. \\n\\nIn summary, the AI uses an optimal single step approach with the right tool and extracts the needed output. The final answer directly answers the question in a helpful way.\\n\\nOverall\"}" ] }, "execution_count": 6, @@ -227,7 +223,7 @@ " input=result[\"input\"],\n", " agent_trajectory=result[\"intermediate_steps\"],\n", ")\n", - "evaluation_result[\"score\"]" + "evaluation_result" ] }, { @@ -265,7 +261,8 @@ { "data": { "text/plain": [ - "1.0" + "{'score': 1.0,\n", + " 'reasoning': \"i. The final answer is helpful. It directly answers the user's question about the latency for the specified website.\\n\\nii. The AI language model uses a logical sequence of tools to answer the question. In this case, only one tool was needed to answer the question, and the model chose the correct one.\\n\\niii. The AI language model uses the tool in a helpful way. The 'ping' tool was used to determine the latency of the website, which was the information the user was seeking.\\n\\niv. The AI language model does not use too many steps to answer the question. Only one step was needed and used.\\n\\nv. The appropriate tool was used to answer the question. The 'ping' tool is designed to measure latency, which was the information the user was seeking.\\n\\nGiven these considerations, the AI language model's performance in answering this question is excellent.\"}" ] }, "execution_count": 8, @@ -279,7 +276,7 @@ " input=result[\"input\"],\n", " agent_trajectory=result[\"intermediate_steps\"],\n", ")\n", - "evaluation_result[\"score\"]" + "evaluation_result" ] } ], From c9445f742c845df1fc550e475c118c1e8b3fee98 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 12:40:41 -0700 Subject: [PATCH 06/15] nits --- .../string/criteria_eval_chain.ipynb | 48 ++++++++++++++----- .../langchain/evaluation/criteria/__init__.py | 4 +- 2 files changed, 37 insertions(+), 15 deletions(-) diff --git a/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb b/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb index 670347b70534f..4b31fcaa01d45 100644 --- a/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb +++ b/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb @@ -5,16 +5,13 @@ "id": "4cf569a7-9a1d-4489-934e-50e57760c907", "metadata": {}, "source": [ - "# Evaluating Custom Criteria\n", + "# Criteria Evaluation\n", "\n", - "Suppose you want to test a model's output against a custom rubric or custom set of criteria, how would you go about testing this?\n", + "In scenarios where you wish to assess a model's output using a specific rubric or criteria set, the `criteria` evaluator proves to be a handy tool. It allows you to verify if an LLM or Chain's output complies with a defined set of criteria.\n", "\n", - "The `criteria` evaluator is a convenient way to predict whether an LLM or Chain's output complies with a set of criteria, so long as you can\n", - "properly define those criteria.\n", + "To understand its functionality and configurability in depth, refer to the reference documentation of the [CriteriaEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.criteria.eval_chain.CriteriaEvalChain.html#langchain.evaluation.criteria.eval_chain.CriteriaEvalChain) class.\n", "\n", - "For more details, check out the reference docs for the [CriteriaEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.criteria.eval_chain.CriteriaEvalChain.html#langchain.evaluation.criteria.eval_chain.CriteriaEvalChain)'s class definition.\n", - "\n", - "### Without References\n", + "### Usage without references\n", "\n", "In this example, you will use the `CriteriaEvalChain` to check whether an output is concise. First, create the evaluation chain to predict whether outputs are \"concise\"." ] @@ -69,7 +66,7 @@ "source": [ "## Using Reference Labels\n", "\n", - "Some criteria (such as correctness) require reference labels to work correctly. To do this, initialuse the `labeled_criteria` evaluator and call the evaluator with a `reference` string." + "Some criteria (such as correctness) require reference labels to work correctly. To do this, initialize the `labeled_criteria` evaluator and call the evaluator with a `reference` string." ] }, { @@ -108,7 +105,7 @@ "**Default Criteria**\n", "\n", "Most of the time, you'll want to define your own custom criteria (see below), but we also provide some common criteria you can load with a single string.\n", - "Here's a list of pre-implemented criteria:" + "Here's a list of pre-implemented criteria. Note that in the absence of labels, the LLM merely predicts what it thinks the best answer is and is not grounded in actual law or context." ] }, { @@ -154,20 +151,23 @@ "\n", "To evaluate outputs against your own custom criteria, or to be more explicit the definition of any of the default criteria, pass in a dictionary of `\"criterion_name\": \"criterion_description\"`\n", "\n", - "Note: the evaluator still predicts whether the output complies with ALL of the criteria provided. If you specify antagonistic criteria / antonyms, the evaluator won't be very useful." + "Note: it's recommended that you create a single evaluator per criterion. This way, separate feedback can be provided for each aspect. Additionally, if you provide antagonistic criteria, the evaluator won't be very useful, as it will be configured to predict compliance for ALL of the criteria provided." ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 19, "id": "bafa0a11-2617-4663-84bf-24df7d0736be", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "{'reasoning': 'The criterion asks if the output contains numeric or mathematical information. \\n\\nThe submission is a joke that says, \"I ate some square pie but I don\\'t know the square of pi.\" \\n\\nIn this joke, there is a reference to the mathematical term \"square\" and the mathematical constant \"pi\". \\n\\nTherefore, the submission does contain numeric or mathematical information, and it meets the criterion. \\n\\nY', 'value': 'Y', 'score': 1}\n" + "{'reasoning': \"The criterion asks if the output contains numeric or mathematical information. The joke in the submission does contain mathematical information. It refers to the mathematical concept of squaring a number and also mentions 'pi', which is a mathematical constant. Therefore, the submission does meet the criterion.\\n\\nY\", 'value': 'Y', 'score': 1}\n", + "{'reasoning': 'Let\\'s assess the submission based on the given criteria:\\n\\n1. Numeric: The output does not contain any explicit numeric information. The word \"square\" and \"pi\" are mathematical terms but they are not numeric information per se.\\n\\n2. Mathematical: The output does contain mathematical information. The terms \"square\" and \"pi\" are mathematical terms. The joke is a play on the mathematical concept of squaring a number (in this case, pi).\\n\\n3. Grammatical: The output is grammatically correct. The sentence structure, punctuation, and word usage are all correct.\\n\\n4. Logical: The output is logical. It makes sense within the context of the joke. The joke is a play on words between the mathematical concept of squaring a number (pi) and eating a square pie.\\n\\nBased on the above analysis, the submission does not meet all the criteria because it does not contain numeric information.\\nN', 'value': 'N', 'score': 0}\n" ] } ], @@ -181,6 +181,22 @@ "query = \"Tell me a joke\"\n", "prediction = \"I ate some square pie but I don't know the square of pi.\"\n", "eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)\n", + "print(eval_result)\n", + "\n", + "# If you wanted to specify multiple criteria. Generally not recommended\n", + "custom_criterion = {\n", + " \"numeric\": \"Does the output contain numeric information?\",\n", + " \"mathematical\": \"Does the output contain mathematical information?\",\n", + " \"grammatical\": \"Is the output grammatically correct?\",\n", + " \"logical\": \"Is the output logical?\",\n", + "}\n", + "\n", + "eval_chain = load_evaluator(\n", + " EvaluatorType.CRITERIA,\n", + " criteria=custom_criterion,\n", + ")\n", + "eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)\n", + "print(\"Multi-criteria evaluation\")\n", "print(eval_result)" ] }, @@ -402,6 +418,12 @@ "\n", "Remember when selecting criteria to decide whether they ought to require ground truth labels or not. Things like \"correctness\" are best evaluated with ground truth or with extensive context. Also, remember to pick aligned principles for a given chain so that the classification makes sense." ] + }, + { + "cell_type": "markdown", + "id": "a684e2f1", + "metadata": {}, + "source": [] } ], "metadata": { diff --git a/libs/langchain/langchain/evaluation/criteria/__init__.py b/libs/langchain/langchain/evaluation/criteria/__init__.py index 556f4a0d1c95b..e6d42062041f3 100644 --- a/libs/langchain/langchain/evaluation/criteria/__init__.py +++ b/libs/langchain/langchain/evaluation/criteria/__init__.py @@ -2,12 +2,12 @@ These evaluators are useful for evaluating the output of a language model or chain against -custom criteria or rubric. +speciefied criteria or rubric. Classes ------- CriteriaEvalChain : Evaluates the output of a language model or -chain against custom criteria. +chain against specified criteria. Examples -------- From 2610352c49b8cdf0a8c3ec3f4fabb1d5fa3a1944 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 14:18:34 -0700 Subject: [PATCH 07/15] delete qa --- .../string/criteria_eval_chain.ipynb | 18 ++ docs/extras/guides/evaluation/string/qa.ipynb | 227 ------------------ 2 files changed, 18 insertions(+), 227 deletions(-) delete mode 100644 docs/extras/guides/evaluation/string/qa.ipynb diff --git a/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb b/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb index 4b31fcaa01d45..b021ce97e9505 100644 --- a/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb +++ b/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb @@ -59,6 +59,24 @@ "print(eval_result)" ] }, + { + "cell_type": "markdown", + "id": "35e61e4d-b776-4f6b-8c89-da5d3604134a", + "metadata": {}, + "source": [ + "#### Output Format\n", + "\n", + "All string evaluators expose an [evaluate_strings](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.criteria.eval_chain.CriteriaEvalChain.html?highlight=evaluate_strings#langchain.evaluation.criteria.eval_chain.CriteriaEvalChain.evaluate_strings) (or async [aevaluate_strings](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.criteria.eval_chain.CriteriaEvalChain.html?highlight=evaluate_strings#langchain.evaluation.criteria.eval_chain.CriteriaEvalChain.aevaluate_strings)) method, which accepts:\n", + "\n", + "- input (str) – The input to the agent.\n", + "- prediction (str) – The predicted response.\n", + "\n", + "The criteria evaluators return a dictionary with the following values:\n", + "- score: Binary integeer 0 to 1, where 1 would mean that the output is compliant with the criteria, and 0 otherwise\n", + "- value: A \"Y\" or \"N\" corresponding to the score\n", + "- reasoning: String \"chain of thought reasoning\" from the LLM generated prior to creating the score" + ] + }, { "cell_type": "markdown", "id": "c40b1ac7-8f95-48ed-89a2-623bcc746461", diff --git a/docs/extras/guides/evaluation/string/qa.ipynb b/docs/extras/guides/evaluation/string/qa.ipynb deleted file mode 100644 index dde4bd065a921..0000000000000 --- a/docs/extras/guides/evaluation/string/qa.ipynb +++ /dev/null @@ -1,227 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "d63696a8-d035-4cf7-9605-c3210f0b551d", - "metadata": { - "tags": [] - }, - "source": [ - "# QA Correctness\n", - "\n", - "When thinking about a QA system, one of the most important questions to ask is whether the final generated result is correct. The `\"qa\"` evaluator compares a question-answering model's response to a reference answer to provide this level of information. If you are able to annotate a test dataset, this evaluator will be useful.\n", - "\n", - "For more details, check out the reference docs for the [QAEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.qa.eval_chain.QAEvalChain.html#langchain.evaluation.qa.eval_chain.QAEvalChain)'s class definition." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9672fdb9-b53f-41e4-8f72-f21d11edbeac", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.evaluation import load_evaluator\n", - "\n", - "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n", - "\n", - "# Note: the eval_llm is optional. A gpt-4 model will be provided by default if not specified\n", - "evaluator = load_evaluator(\"qa\", eval_llm=llm)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "b4db474a-9c9d-473f-81b1-55070ee584a6", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "{'reasoning': None, 'value': 'CORRECT', 'score': 1}" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "evaluator.evaluate_strings(\n", - " input=\"What's last quarter's sales numbers?\",\n", - " prediction=\"Last quarter we sold 600,000 total units of product.\",\n", - " reference=\"Last quarter we sold 100,000 units of product A, 210,000 units of product B, and 300,000 units of product C.\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "a5b345aa-7f45-4eea-bedf-9b0d5e824be3", - "metadata": {}, - "source": [ - "## SQL Correctness\n", - "\n", - "You can use an LLM to check the equivalence of a SQL query against a reference SQL query using the sql prompt." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "6c803b8c-fe1f-4fb7-8ea0-d9c67b855eb3", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from langchain.evaluation.qa.eval_prompt import SQL_PROMPT\n", - "\n", - "eval_chain = load_evaluator(\"qa\", eval_llm=llm, prompt=SQL_PROMPT)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e28b8d07-248f-405c-bcef-e0ebe3a05c3e", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "{'reasoning': 'The expert answer and the submission are very similar in their structure and logic. Both queries are trying to calculate the sum of sales amounts for the last quarter. They both use the SUM function to add up the sale_amount from the sales table. They also both use the same WHERE clause to filter the sales data to only include sales from the last quarter. The WHERE clause uses the DATEADD function to subtract 1 quarter from the current date (GETDATE()) and only includes sales where the sale_date is greater than or equal to this date and less than the current date.\\n\\nThe main difference between the two queries is that the expert answer uses a subquery to first select the sale_amount from the sales table with the appropriate date filter, and then sums these amounts in the outer query. The submission, on the other hand, does not use a subquery and instead sums the sale_amount directly in the main query with the same date filter.\\n\\nHowever, this difference does not affect the result of the query. Both queries will return the same result, which is the sum of the sales amounts for the last quarter.\\n\\nCORRECT',\n", - " 'value': 'CORRECT',\n", - " 'score': 1}" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "eval_chain.evaluate_strings(\n", - " input=\"What's last quarter's sales numbers?\",\n", - " prediction=\"\"\"SELECT SUM(sale_amount) AS last_quarter_sales\n", - "FROM sales\n", - "WHERE sale_date >= DATEADD(quarter, -1, GETDATE()) AND sale_date < GETDATE();\n", - "\"\"\",\n", - " reference=\"\"\"SELECT SUM(sub.sale_amount) AS last_quarter_sales\n", - "FROM (\n", - " SELECT sale_amount\n", - " FROM sales\n", - " WHERE sale_date >= DATEADD(quarter, -1, GETDATE()) AND sale_date < GETDATE()\n", - ") AS sub;\n", - "\"\"\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e0c3dcad-408e-4d26-9e25-848ebacac2c4", - "metadata": {}, - "source": [ - "## Using Context\n", - "\n", - "Sometimes, reference labels aren't all available, but you have additional knowledge as context from a retrieval system. Often there may be additional information that isn't available to the model you want to evaluate. For this type of scenario, you can use the [ContextQAEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.qa.eval_chain.ContextQAEvalChain.html#langchain.evaluation.qa.eval_chain.ContextQAEvalChain)." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "9f3ae116-3a2f-461d-ba6f-7352b42c1b0c", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "{'reasoning': None, 'value': 'CORRECT', 'score': 1}" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "eval_chain = load_evaluator(\"context_qa\", eval_llm=llm)\n", - "\n", - "eval_chain.evaluate_strings(\n", - " input=\"Who won the NFC championship game in 2023?\",\n", - " prediction=\"Eagles\",\n", - " reference=\"NFC Championship Game 2023: Philadelphia Eagles 31, San Francisco 49ers 7\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "ba5eac17-08b6-4e4f-a896-79e7fc637018", - "metadata": {}, - "source": [ - "## CoT With Context\n", - "\n", - "The same prompt strategies such as chain of thought can be used to make the evaluation results more reliable.\n", - "The [CotQAEvalChain's](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.qa.eval_chain.CotQAEvalChain.html#langchain.evaluation.qa.eval_chain.CotQAEvalChain) default prompt instructs the model to do this." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "26e3b686-98f4-45a5-9854-7071ec2893f1", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "{'reasoning': 'The student\\'s answer is \"Eagles\". The context states that the Philadelphia Eagles won the NFC championship game in 2023. Therefore, the student\\'s answer matches the information provided in the context.',\n", - " 'value': 'GRADE: CORRECT',\n", - " 'score': 1}" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "eval_chain = load_evaluator(\"cot_qa\", eval_llm=llm)\n", - "\n", - "eval_chain.evaluate_strings(\n", - " input=\"Who won the NFC championship game in 2023?\",\n", - " prediction=\"Eagles\",\n", - " reference=\"NFC Championship Game 2023: Philadelphia Eagles 31, San Francisco 49ers 7\",\n", - ")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From d5296933d9008aa8645e541ad4b00072f748ab71 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 15:52:42 -0700 Subject: [PATCH 08/15] Add criteria for pairwise eval --- .../comparison/pairwise_string.ipynb | 117 ++++++++++++++++-- .../string/criteria_eval_chain.ipynb | 4 +- .../evaluation/comparison/eval_chain.py | 91 +++++++++++++- .../langchain/evaluation/comparison/prompt.py | 11 +- .../evaluation/criteria/eval_chain.py | 101 ++++++++++----- .../evaluation/comparison/test_eval_chain.py | 15 +++ .../evaluation/criteria/test_eval_chain.py | 9 +- 7 files changed, 289 insertions(+), 59 deletions(-) diff --git a/docs/extras/guides/evaluation/comparison/pairwise_string.ipynb b/docs/extras/guides/evaluation/comparison/pairwise_string.ipynb index 2475f727f2e48..1f7c29a20ede0 100644 --- a/docs/extras/guides/evaluation/comparison/pairwise_string.ipynb +++ b/docs/extras/guides/evaluation/comparison/pairwise_string.ipynb @@ -43,7 +43,7 @@ { "data": { "text/plain": [ - "{'reasoning': 'Response A is incorrect as it states there are three dogs in the park, which contradicts the reference answer of four. Response B, on the other hand, is accurate as it matches the reference answer. Although Response B is not as detailed or elaborate as Response A, it is more important that the response is accurate. \\n\\nFinal Decision: [[B]]\\n',\n", + "{'reasoning': 'Both responses are relevant to the question asked, as they both provide a numerical answer to the question about the number of dogs in the park. However, Response A is incorrect according to the reference answer, which states that there are four dogs. Response B, on the other hand, is correct as it matches the reference answer. Neither response demonstrates depth of thought, as they both simply provide a numerical answer without any additional information or context. \\n\\nBased on these criteria, Response B is the better response.\\n',\n", " 'value': 'B',\n", " 'score': 0}" ] @@ -62,6 +62,27 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "7491d2e6-4e77-4b17-be6b-7da966785c1d", + "metadata": {}, + "source": [ + "## Methods\n", + "\n", + "\n", + "The pairwise string evaluator can be called using [evaluate_string_pairs](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain.html#langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain.evaluate_string_pairs) (or async [aevaluate_string_pairs](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain.html#langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain.aevaluate_string_pairs)) methods, which accept:\n", + "\n", + "- prediction (str) – The predicted response of the first model, chain, or prompt.\n", + "- prediction_b (str) – The predicted response of the second model, chain, or prompt.\n", + "- input (str) – The input question, prompt, or other text.\n", + "- reference (str) – (Only for the labeled_pairwise_string variant) The reference response.\n", + "\n", + "They return a dictionary with the following values:\n", + "- value: 'A' or 'B', indicating whether `prediction` or `prediction_b` is preferred, respectively\n", + "- score: Integer 0 or 1 mapped from the 'value', where a score of 1 would mean that the first `prediction` is preferred, and a score of 0 would mean `prediction_b` is preferred.\n", + "- reasoning: String \"chain of thought reasoning\" from the LLM generated prior to creating the score" + ] + }, { "cell_type": "markdown", "id": "ed353b93-be71-4479-b9c0-8c97814c2e58", @@ -99,7 +120,7 @@ { "data": { "text/plain": [ - "{'reasoning': \"Response A is accurate but lacks depth and detail. It simply states that addition is a mathematical operation without explaining what it does or how it works. \\n\\nResponse B, on the other hand, provides a more detailed explanation. It not only identifies addition as a mathematical operation, but also explains that it involves adding two numbers to create a third number, the 'sum'. This response is more helpful and informative, providing a clearer understanding of what addition is.\\n\\nTherefore, the better response is B.\\n\",\n", + "{'reasoning': 'Both responses are correct and relevant to the question. However, Response B is more helpful and insightful as it provides a more detailed explanation of what addition is. Response A is correct but lacks depth as it does not explain what the operation of addition entails. \\n\\nFinal Decision: [[B]]',\n", " 'value': 'B',\n", " 'score': 0}" ] @@ -117,6 +138,74 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "4a09b21d-9851-47e8-93d3-90044b2945b0", + "metadata": { + "tags": [] + }, + "source": [ + "## Defining the Criteria\n", + "\n", + "By default, the LLM is instructed to select the 'preferred' response based on helpfulness, relevance, correctness, and depth of thought. You can customize the criteria by passing in a `criteria` argument, where the criteria could take any of the following forms:\n", + "- [`Criteria`](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.criteria.eval_chain.Criteria.html#langchain.evaluation.criteria.eval_chain.Criteria) enum or its string value - to use one of the default criteria and their descriptions\n", + "- [Constitutional principal](https://api.python.langchain.com/en/latest/chains/langchain.chains.constitutional_ai.models.ConstitutionalPrinciple.html#langchain.chains.constitutional_ai.models.ConstitutionalPrinciple) - use one any of the constitutional principles defined in langchain\n", + "- Dictionary: a list of custom criteria, where the key is the name of the criteria, and the value is the description.\n", + "- A list of criteria or constitutional principles - to combine multiple criteria in one.\n", + "\n", + "Below is an example for determining preferred writing responses based on a custom style." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8539e7d9-f7b0-4d32-9c45-593a7915c093", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "custom_criteria = {\n", + " \"simplicity\": \"Is the language straightforward and unpretentious?\",\n", + " \"clarity\": \"Are the sentences clear and easy to understand?\",\n", + " \"precision\": \"Is the writing precise, with no unnecessary words or details?\",\n", + " \"truthfulness\": \"Does the writing feel honest and sincere?\",\n", + " \"subtext\": \"Does the writing suggest deeper meanings or themes?\",\n", + "}\n", + "evaluator = load_evaluator(\"pairwise_string\", criteria=custom_criteria)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "fec7bde8-fbdc-4730-8366-9d90d033c181", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'reasoning': 'Response A is simple, clear, and precise. It uses straightforward language to convey a deep and sincere message about families. The metaphor of joy and sorrow as music is effective and easy to understand.\\n\\nResponse B, on the other hand, is more complex and less clear. The language is more pretentious, with words like \"domicile,\" \"resounds,\" \"abode,\" \"dissonant,\" and \"elegy.\" While it conveys a similar message to Response A, it does so in a more convoluted way. The precision is also lacking due to the use of unnecessary words and details.\\n\\nBoth responses suggest deeper meanings or themes about the shared joy and unique sorrow in families. However, Response A does so in a more effective and accessible way.\\n\\nTherefore, the better response is [[A]].',\n", + " 'value': 'A',\n", + " 'score': 1}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "evaluator.evaluate_string_pairs(\n", + " prediction=\"Every cheerful household shares a similar rhythm of joy; but sorrow, in each household, plays a unique, haunting melody.\",\n", + " prediction_b=\"Where one finds a symphony of joy, every domicile of happiness resounds in harmonious,\"\n", + " \" identical notes; yet, every abode of despair conducts a dissonant orchestra, each\"\n", + " \" playing an elegy of grief that is peculiar and profound to its own existence.\",\n", + " input=\"Write some prose about families.\",\n", + ")" + ] + }, { "cell_type": "markdown", "id": "a25b60b2-627c-408a-be4b-a2e5cbc10726", @@ -129,7 +218,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "id": "de84a958-1330-482b-b950-68bcf23f9e35", "metadata": {}, "outputs": [], @@ -143,7 +232,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, "id": "e162153f-d50a-4a7c-a033-019dabbc954c", "metadata": { "tags": [] @@ -152,12 +241,12 @@ { "data": { "text/plain": [ - "{'reasoning': 'Here is my assessment:\\n\\nResponse B is better because it directly answers the question by stating the number \"4\", which matches the ground truth reference answer. Response A provides an incorrect number of dogs, stating there are three dogs when the reference says there are four. \\n\\nResponse B is more helpful, relevant, accurate and provides the right level of detail by simply stating the number that was asked for. Response A provides an inaccurate number, so is less helpful and accurate.\\n\\nIn summary, Response B better followed the instructions and answered the question correctly per the reference answer.\\n\\n[[B]]',\n", + "{'reasoning': 'Here is my assessment:\\n\\nResponse B is more helpful, insightful, and accurate than Response A. Response B simply states \"4\", which directly answers the question by providing the exact number of dogs mentioned in the reference answer. In contrast, Response A states \"there are three dogs\", which is incorrect according to the reference answer. \\n\\nIn terms of helpfulness, Response B gives the precise number while Response A provides an inaccurate guess. For relevance, both refer to dogs in the park from the question. However, Response B is more correct and factual based on the reference answer. Response A shows some attempt at reasoning but is ultimately incorrect. Response B requires less depth of thought to simply state the factual number.\\n\\nIn summary, Response B is superior in terms of helpfulness, relevance, correctness, and depth. My final decision is: [[B]]\\n',\n", " 'value': 'B',\n", " 'score': 0}" ] }, - "execution_count": 6, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -185,7 +274,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, "id": "fb817efa-3a4d-439d-af8c-773b89d97ec9", "metadata": { "tags": [] @@ -195,7 +284,9 @@ "from langchain.prompts import PromptTemplate\n", "\n", "prompt_template = PromptTemplate.from_template(\n", - " \"\"\"Given the input context, which is most similar to the reference label: A or B?\n", + " \"\"\"Given the input context, which do you prefer: A or B?\n", + "Evaluate based on the following criteria:\n", + "{criteria}\n", "Reason step by step and finally, respond with either [[A]] or [[B]] on its own line.\n", "\n", "DATA\n", @@ -216,7 +307,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, "id": "d40aa4f0-cfd5-4cb4-83c8-8d2300a04c2f", "metadata": { "tags": [] @@ -226,7 +317,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "input_variables=['input', 'prediction', 'prediction_b', 'reference'] output_parser=None partial_variables={} template='Given the input context, which is most similar to the reference label: A or B?\\nReason step by step and finally, respond with either [[A]] or [[B]] on its own line.\\n\\nDATA\\n----\\ninput: {input}\\nreference: {reference}\\nA: {prediction}\\nB: {prediction_b}\\n---\\nReasoning:\\n\\n' template_format='f-string' validate_template=True\n" + "input_variables=['prediction', 'reference', 'prediction_b', 'input'] output_parser=None partial_variables={'criteria': 'helpfulness: Is the submission helpful, insightful, and appropriate?\\nrelevance: Is the submission referring to a real quote from the text?\\ncorrectness: Is the submission correct, accurate, and factual?\\ndepth: Does the submission demonstrate depth of thought?'} template='Given the input context, which do you prefer: A or B?\\nEvaluate based on the following criteria:\\n{criteria}\\nReason step by step and finally, respond with either [[A]] or [[B]] on its own line.\\n\\nDATA\\n----\\ninput: {input}\\nreference: {reference}\\nA: {prediction}\\nB: {prediction_b}\\n---\\nReasoning:\\n\\n' template_format='f-string' validate_template=True\n" ] } ], @@ -237,7 +328,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 11, "id": "9467bb42-7a31-4071-8f66-9ed2c6f06dcd", "metadata": { "tags": [] @@ -246,12 +337,12 @@ { "data": { "text/plain": [ - "{'reasoning': 'Option A is more similar to the reference label because it mentions the same dog\\'s name, \"fido\". Option B mentions a different name, \"spot\". Therefore, A is more similar to the reference label. \\n',\n", + "{'reasoning': 'Helpfulness: Both A and B are helpful as they provide a direct answer to the question.\\nRelevance: A is relevant as it refers to the correct name of the dog from the text. B is not relevant as it provides a different name.\\nCorrectness: A is correct as it accurately states the name of the dog. B is incorrect as it provides a different name.\\nDepth: Both A and B demonstrate a similar level of depth as they both provide a straightforward answer to the question.\\n\\nGiven these evaluations, the preferred response is:\\n',\n", " 'value': 'A',\n", " 'score': 1}" ] }, - "execution_count": 9, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } diff --git a/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb b/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb index b021ce97e9505..61f4cf9e334f7 100644 --- a/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb +++ b/docs/extras/guides/evaluation/string/criteria_eval_chain.ipynb @@ -202,7 +202,7 @@ "print(eval_result)\n", "\n", "# If you wanted to specify multiple criteria. Generally not recommended\n", - "custom_criterion = {\n", + "custom_criteria = {\n", " \"numeric\": \"Does the output contain numeric information?\",\n", " \"mathematical\": \"Does the output contain mathematical information?\",\n", " \"grammatical\": \"Is the output grammatically correct?\",\n", @@ -211,7 +211,7 @@ "\n", "eval_chain = load_evaluator(\n", " EvaluatorType.CRITERIA,\n", - " criteria=custom_criterion,\n", + " criteria=custom_criteria,\n", ")\n", "eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)\n", "print(\"Multi-criteria evaluation\")\n", diff --git a/libs/langchain/langchain/evaluation/comparison/eval_chain.py b/libs/langchain/langchain/evaluation/comparison/eval_chain.py index 2eccdd186fdca..d308300942e4b 100644 --- a/libs/langchain/langchain/evaluation/comparison/eval_chain.py +++ b/libs/langchain/langchain/evaluation/comparison/eval_chain.py @@ -1,18 +1,86 @@ """Base classes for comparing the output of two models.""" from __future__ import annotations -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from pydantic import Extra, Field from langchain.callbacks.manager import Callbacks +from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple from langchain.chains.llm import LLMChain from langchain.evaluation.comparison.prompt import PROMPT, PROMPT_WITH_REFERENCE +from langchain.evaluation.criteria.eval_chain import ( + CRITERIA_TYPE, + Criteria, +) from langchain.evaluation.schema import LLMEvalChain, PairwiseStringEvaluator from langchain.prompts.prompt import PromptTemplate from langchain.schema import RUN_KEY, BaseOutputParser from langchain.schema.language_model import BaseLanguageModel +_SUPPORTED_CRITERIA = { + Criteria.CONCISENESS: "Is the submission concise and to the point?", + Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?", + Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?", + Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?", + Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?", + Criteria.MALICIOUSNESS: "Is the submission malicious in any way?", + Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?", + Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?", + Criteria.MISOGYNY: "Is the submission misogynistic? If so, response Y.", + Criteria.CRIMINALITY: "Is the submission criminal in any way?", + Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?", + Criteria.DEPTH: "Does the submission demonstrate depth of thought?", + Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?", + Criteria.DETAIL: "Does the submission demonstrate attention to detail?", +} + + +def resolve_pairwise_criteria( + criteria: Optional[Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]]] +) -> dict: + """Resolve the criteria for the pairwise evaluator. + + Args: + criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. + + Returns: + dict: The resolved criteria. + + """ + if criteria is None: + _default_criteria = [ + Criteria.HELPFULNESS, + Criteria.RELEVANCE, + Criteria.CORRECTNESS, + Criteria.DEPTH, + ] + return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria} + elif isinstance(criteria, Criteria): + criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} + elif isinstance(criteria, str): + if criteria in _SUPPORTED_CRITERIA: + criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} + else: + criteria_ = {criteria: ""} + elif isinstance(criteria, ConstitutionalPrinciple): + criteria_ = {criteria.name: criteria.critique_request} + elif isinstance(criteria, (list, tuple)): + criteria_ = { + k: v + for criterion in criteria + for k, v in resolve_pairwise_criteria(criterion).items() + } + else: + if not criteria: + raise ValueError( + "Criteria cannot be empty. " + "Please provide a criterion name or a mapping of the criterion name" + " to its description." + ) + criteria_ = dict(criteria) + return criteria_ + class PairwiseStringResultOutputParser(BaseOutputParser[dict]): """A parser for the output of the PairwiseStringEvalChain. @@ -152,6 +220,7 @@ def from_llm( llm: BaseLanguageModel, *, prompt: Optional[PromptTemplate] = None, + criteria: Optional[Union[CRITERIA_TYPE, str]] = None, **kwargs: Any, ) -> PairwiseStringEvalChain: """Initialize the PairwiseStringEvalChain from an LLM. @@ -168,14 +237,16 @@ def from_llm( ValueError: If the input variables are not as expected. """ - expected_input_vars = {"prediction", "prediction_b", "input"} + expected_input_vars = {"prediction", "prediction_b", "input", "criteria"} prompt_ = prompt or PROMPT if expected_input_vars != set(prompt_.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt_.input_variables}" ) - return cls(llm=llm, prompt=prompt_, **kwargs) + criteria_ = resolve_pairwise_criteria(criteria) + criteria_str = "\n".join(f"{k}: {v}" if v else k for k, v in criteria_.items()) + return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs) def _prepare_input( self, @@ -323,6 +394,7 @@ def from_llm( llm: BaseLanguageModel, *, prompt: Optional[PromptTemplate] = None, + criteria: Optional[Union[CRITERIA_TYPE, str]] = None, **kwargs: Any, ) -> PairwiseStringEvalChain: """Initialize the LabeledPairwiseStringEvalChain from an LLM. @@ -330,6 +402,7 @@ def from_llm( Args: llm (BaseLanguageModel): The LLM to use. prompt (PromptTemplate, optional): The prompt to use. + criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. **kwargs (Any): Additional keyword arguments. Returns: @@ -339,11 +412,19 @@ def from_llm( ValueError: If the input variables are not as expected. """ # noqa: E501 - expected_input_vars = {"prediction", "prediction_b", "input", "reference"} + expected_input_vars = { + "prediction", + "prediction_b", + "input", + "reference", + "criteria", + } prompt_ = prompt or PROMPT_WITH_REFERENCE if expected_input_vars != set(prompt_.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt_.input_variables}" ) - return cls(llm=llm, prompt=prompt_, **kwargs) + criteria_ = resolve_pairwise_criteria(criteria) + criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()) + return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs) diff --git a/libs/langchain/langchain/evaluation/comparison/prompt.py b/libs/langchain/langchain/evaluation/comparison/prompt.py index f5d9846495c47..a29e8b413056e 100644 --- a/libs/langchain/langchain/evaluation/comparison/prompt.py +++ b/libs/langchain/langchain/evaluation/comparison/prompt.py @@ -9,7 +9,8 @@ template = """Act as a fair judge and rate the two responses to the question below.\ Choose the response that best followed the instructions and answered the question.\ - Your assessment should weigh helpfulness, relevance, accuracy, depth, creativity, and detail.\ + Your assessment should weigh the following criteria: +{criteria}\ Start by comparing both responses and give a brief rationale.\ Avoid bias from the order of presentation or response length. After giving your rationale, make your final decision using this format:\ @@ -28,12 +29,14 @@ {prediction_b} [/RESPONSE B]""" PROMPT = PromptTemplate( - input_variables=["input", "prediction", "prediction_b"], template=template + input_variables=["input", "prediction", "prediction_b", "criteria"], + template=template, ) template = """Act as a fair judge and rate the two responses to the question below.\ Choose the response that best followed the instructions and answered the question.\ - Your assessment should weigh helpfulness, relevance, accuracy, depth, creativity, and detail.\ + Your assessment should weigh the following criteria: +{criteria}\ Start by comparing both responses and give a brief rationale.\ Avoid bias from the order of presentation or response length.\ Weigh accuracy based on the following ground truth reference\ @@ -60,6 +63,6 @@ [/RESPONSE B]""" PROMPT_WITH_REFERENCE = PromptTemplate( - input_variables=["input", "prediction", "prediction_b", "reference"], + input_variables=["input", "prediction", "prediction_b", "reference", "criteria"], template=template, ) diff --git a/libs/langchain/langchain/evaluation/criteria/eval_chain.py b/libs/langchain/langchain/evaluation/criteria/eval_chain.py index 838c08fd4372d..4de0dc43ab577 100644 --- a/libs/langchain/langchain/evaluation/criteria/eval_chain.py +++ b/libs/langchain/langchain/evaluation/criteria/eval_chain.py @@ -28,6 +28,9 @@ class Criteria(str, Enum): MISOGYNY = "misogyny" CRIMINALITY = "criminality" INSENSITIVITY = "insensitivity" + DEPTH = "depth" + CREATIVITY = "creativity" + DETAIL = "detail" _SUPPORTED_CRITERIA = { @@ -49,6 +52,9 @@ class Criteria(str, Enum): " If so, response Y. If not, respond N.", Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?" " If so, response Y. If not, respond N.", + Criteria.DEPTH: "Does the submission demonstrate depth of thought?", + Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?", + Criteria.DETAIL: "Does the submission demonstrate attention to detail?", } @@ -89,6 +95,51 @@ def parse(self, text: str) -> Any: ] +def resolve_criteria( + criteria: Optional[Union[CRITERIA_TYPE, str]], +) -> Dict[str, str]: + """Resolve the criteria to evaluate. + + Parameters + ---------- + criteria : CRITERIA_TYPE + The criteria to evaluate the runs against. It can be: + - a mapping of a criterion name to its description + - a single criterion name present in one of the default criteria + - a single `ConstitutionalPrinciple` instance + + Returns + ------- + Dict[str, str] + A dictionary mapping criterion names to descriptions. + + Examples + -------- + >>> criterion = "relevance" + >>> CriteriaEvalChain.resolve_criteria(criteria) + {'relevance': 'Is the submission referring to a real quote from the text?'} + """ # noqa: E501 + if criteria is None: + return { + "helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS], + } + if isinstance(criteria, Criteria): + criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} + elif isinstance(criteria, str): + criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} + elif isinstance(criteria, ConstitutionalPrinciple): + criteria_ = {criteria.name: criteria.critique_request} + else: + if not criteria: + raise ValueError( + "Criteria cannot be empty. " + "Please provide a criterion name or a mapping of the criterion name" + " to its description." + ) + criteria_ = dict(criteria) + return criteria_ + + class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): """LLM Chain for evaluating runs against criteria. @@ -190,6 +241,19 @@ def _skip_reference_warning(self) -> str: "\nTo use references, use the labeled_criteria instead." ) + @classmethod + def _resolve_prompt( + cls, prompt: Optional[BasePromptTemplate] = None + ) -> BasePromptTemplate: + expected_input_vars = {"input", "output", "criteria"} + prompt_ = prompt or PROMPT + if expected_input_vars != set(prompt_.input_variables): + raise ValueError( + f"Input variables should be {expected_input_vars}, " + f"but got {prompt_.input_variables}" + ) + return prompt_ + @classmethod def resolve_criteria( cls, @@ -216,38 +280,7 @@ def resolve_criteria( >>> CriteriaEvalChain.resolve_criteria(criteria) {'relevance': 'Is the submission referring to a real quote from the text?'} """ # noqa: E501 - if criteria is None: - return { - "helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS], - } - if isinstance(criteria, Criteria): - criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} - elif isinstance(criteria, str): - criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} - elif isinstance(criteria, ConstitutionalPrinciple): - criteria_ = {criteria.name: criteria.critique_request} - else: - if not criteria: - raise ValueError( - "Criteria cannot be empty. " - "Please provide a criterion name or a mapping of the criterion name" - " to its description." - ) - criteria_ = dict(criteria) - return criteria_ - - @classmethod - def _resolve_prompt( - cls, prompt: Optional[BasePromptTemplate] = None - ) -> BasePromptTemplate: - expected_input_vars = {"input", "output", "criteria"} - prompt_ = prompt or PROMPT - if expected_input_vars != set(prompt_.input_variables): - raise ValueError( - f"Input variables should be {expected_input_vars}, " - f"but got {prompt_.input_variables}" - ) - return prompt_ + return resolve_criteria(criteria) @classmethod def from_llm( @@ -306,7 +339,7 @@ def from_llm( " (LabeledCriteriaEvalChain) instead." ) criteria_ = cls.resolve_criteria(criteria) - criteria_str = " ".join(f"{k}: {v}" for k, v in criteria_.items()) + criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()) prompt_ = prompt_.partial(criteria=criteria_str) return cls( llm=llm, @@ -519,7 +552,7 @@ def from_llm( """ prompt = cls._resolve_prompt(prompt) criteria_ = cls.resolve_criteria(criteria) - criteria_str = " ".join(f"{k}: {v}" for k, v in criteria_.items()) + criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()) prompt_ = prompt.partial(criteria=criteria_str) return cls( llm=llm, diff --git a/libs/langchain/tests/unit_tests/evaluation/comparison/test_eval_chain.py b/libs/langchain/tests/unit_tests/evaluation/comparison/test_eval_chain.py index 36f432a957ede..2839a7208ff10 100644 --- a/libs/langchain/tests/unit_tests/evaluation/comparison/test_eval_chain.py +++ b/libs/langchain/tests/unit_tests/evaluation/comparison/test_eval_chain.py @@ -8,10 +8,25 @@ from langchain.evaluation.comparison.eval_chain import ( LabeledPairwiseStringEvalChain, PairwiseStringEvalChain, + resolve_pairwise_criteria, ) +from langchain.evaluation.criteria.eval_chain import Criteria from tests.unit_tests.llms.fake_llm import FakeLLM +@pytest.mark.parametrize("criterion", list(Criteria)) +def test_resolve_criteria_enum(criterion: Criteria) -> None: + val = resolve_pairwise_criteria(criterion) + assert isinstance(val, dict) + assert next(iter(val)) == criterion.value + + +def test_resolve_criteria_list_enum() -> None: + val = resolve_pairwise_criteria(list(Criteria)) + assert isinstance(val, dict) + assert set(val.keys()) == set(c.value for c in list(Criteria)) + + def test_pairwise_string_comparison_chain() -> None: llm = FakeLLM( queries={ diff --git a/libs/langchain/tests/unit_tests/evaluation/criteria/test_eval_chain.py b/libs/langchain/tests/unit_tests/evaluation/criteria/test_eval_chain.py index ba56249cc78b8..d0ea4731a944f 100644 --- a/libs/langchain/tests/unit_tests/evaluation/criteria/test_eval_chain.py +++ b/libs/langchain/tests/unit_tests/evaluation/criteria/test_eval_chain.py @@ -13,7 +13,7 @@ from tests.unit_tests.llms.fake_llm import FakeLLM -def test_resolve_criteria() -> None: +def test_resolve_criteria_str() -> None: # type: ignore assert CriteriaEvalChain.resolve_criteria("helpfulness") == { "helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS] @@ -23,6 +23,13 @@ def test_resolve_criteria() -> None: } +@pytest.mark.parametrize("criterion", list(Criteria)) +def test_resolve_criteria_enum(criterion: Criteria) -> None: + assert CriteriaEvalChain.resolve_criteria(criterion) == { + criterion.value: _SUPPORTED_CRITERIA[criterion] + } + + def test_criteria_eval_chain() -> None: chain = CriteriaEvalChain.from_llm( llm=FakeLLM( From a5a1b31a8710147a78dca226a71c6263c54d297d Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 16:16:52 -0700 Subject: [PATCH 09/15] comparisons --- .../evaluation/examples/comparisons.ipynb | 65 +++++++++---------- 1 file changed, 31 insertions(+), 34 deletions(-) diff --git a/docs/extras/guides/evaluation/examples/comparisons.ipynb b/docs/extras/guides/evaluation/examples/comparisons.ipynb index decb584ac0a90..5c293d8984187 100644 --- a/docs/extras/guides/evaluation/examples/comparisons.ipynb +++ b/docs/extras/guides/evaluation/examples/comparisons.ipynb @@ -24,18 +24,15 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": { "tags": [] }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.evaluation.comparison import PairwiseStringEvalChain\n", + "from langchain.evaluation import load_evaluator\n", "\n", - "llm = ChatOpenAI(model=\"gpt-4\")\n", - "\n", - "eval_chain = PairwiseStringEvalChain.from_llm(llm=llm)" + "eval_chain = load_evaluator(\"pairwise_string\")" ] }, { @@ -50,7 +47,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "metadata": { "tags": [] }, @@ -59,13 +56,13 @@ "name": "stderr", "output_type": "stream", "text": [ - "Found cached dataset parquet (/Users/wfh/.cache/huggingface/datasets/LangChainDatasets___parquet/LangChainDatasets--langchain-howto-queries-bbb748bbee7e77aa/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)\n" + "Found cached dataset parquet (/Users/wfh/.cache/huggingface/datasets/LangChainDatasets___parquet/LangChainDatasets--langchain-howto-queries-bbb748bbee7e77aa/0.0.0/14a00e99c0d15a23649d0db8944380ac81082d4b021f398733dd84f3a6c569a7)\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "d852a1884480457292c90d8bd9d4f1e6", + "model_id": "a2358d37246640ce95e0f9940194590a", "version_major": 2, "version_minor": 0 }, @@ -94,7 +91,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": { "tags": [] }, @@ -127,7 +124,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": { "tags": [] }, @@ -152,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": { "tags": [] }, @@ -160,7 +157,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "b076d6bf6680422aa9082d4bad4d98a3", + "model_id": "87277cb39a1a4726bb7cc533a24e2ea4", "version_major": 2, "version_minor": 0 }, @@ -170,14 +167,6 @@ }, "metadata": {}, "output_type": "display_data" - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n", - "Retrying langchain.chat_models.openai.acompletion_with_retry.._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n" - ] } ], "source": [ @@ -215,7 +204,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": { "tags": [] }, @@ -252,7 +241,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -270,7 +259,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 8, "metadata": { "tags": [] }, @@ -279,8 +268,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "OpenAI Functions Agent: 90.00%\n", - "Structured Chat Agent: 10.00%\n" + "OpenAI Functions Agent: 95.00%\n", + "None: 5.00%\n" ] } ], @@ -310,7 +299,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 9, "metadata": { "tags": [] }, @@ -349,7 +338,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 10, "metadata": { "tags": [] }, @@ -358,8 +347,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "The \"OpenAI Functions Agent\" would be preferred between 69.90% and 97.21% percent of the time (with 95% confidence).\n", - "The \"Structured Chat Agent\" would be preferred between 2.79% and 30.10% percent of the time (with 95% confidence).\n" + "The \"OpenAI Functions Agent\" would be preferred between 83.18% and 100.00% percent of the time (with 95% confidence).\n", + "The \"Structured Chat Agent\" would be preferred between 0.00% and 16.82% percent of the time (with 95% confidence).\n" ] } ], @@ -380,7 +369,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 11, "metadata": { "tags": [] }, @@ -389,9 +378,17 @@ "name": "stdout", "output_type": "stream", "text": [ - "The p-value is 0.00040. If the null hypothesis is true (i.e., if the selected eval chain actually has no preference between the models),\n", - "then there is a 0.04025% chance of observing the OpenAI Functions Agent be preferred at least 18\n", - "times out of 20 trials.\n" + "The p-value is 0.00000. If the null hypothesis is true (i.e., if the selected eval chain actually has no preference between the models),\n", + "then there is a 0.00038% chance of observing the OpenAI Functions Agent be preferred at least 19\n", + "times out of 19 trials.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/ipykernel_15978/384907688.py:6: DeprecationWarning: 'binom_test' is deprecated in favour of 'binomtest' from version 1.7.0 and will be removed in Scipy 1.12.0.\n", + " p_value = stats.binom_test(successes, n, p=0.5, alternative=\"two-sided\")\n" ] } ], From 9a61726873b389d3ba680261c14e6a317b8d6ad0 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 16:18:19 -0700 Subject: [PATCH 10/15] update intro --- .../docs/guides/evaluation/index.mdx | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/docs_skeleton/docs/guides/evaluation/index.mdx b/docs/docs_skeleton/docs/guides/evaluation/index.mdx index 78227519bf133..c52c32cc1e17e 100644 --- a/docs/docs_skeleton/docs/guides/evaluation/index.mdx +++ b/docs/docs_skeleton/docs/guides/evaluation/index.mdx @@ -6,23 +6,24 @@ import DocCardList from "@theme/DocCardList"; # Evaluation -Language models can be unpredictable. This makes it challenging to ship reliable applications to production, where repeatable, useful outcomes across diverse inputs are a minimum requirement. Tests help demonstrate each component in an LLM application can produce the required or expected functionality. These tests also safeguard against regressions while you improve interconnected pieces of an integrated system. However, measuring the quality of generated text can be challenging. It can be hard to agree on the right set of metrics for your application, and it can be difficult to translate those into better performance. Furthermore, it's common to lack sufficient evaluation data to adequately test the range of inputs and expected outputs for each component when you're just getting started. The LangChain community is building open source tools and guides to help address these challenges. +Building applications with language models involves many moving parts. One of the most critical components is ensuring that the outcomes produced by your models are reliable and useful across a broad array of inputs. This process of assurance is known as "evaluation". -LangChain exposes different types of evaluators for common types of evaluation. Each type has off-the-shelf implementations you can use to get started, as well as an - extensible API so you can create your own or contribute improvements for everyone to use. The following sections have example notebooks for you to get started. +LangChain provides tools to help you evaluate your language models. This is critical because even though language models can sometimes produce unpredictable results, deploying them in a production environment requires repeatable and useful outcomes. -- [String Evaluators](/docs/guides/evaluation/string/): Evaluate the predicted string for a given input, usually against a reference string -- [Trajectory Evaluators](/docs/guides/evaluation/trajectory/): Evaluate the whole trajectory of agent actions -- [Comparison Evaluators](/docs/guides/evaluation/comparison/): Compare predictions from two runs on a common input +We offer various types of evaluators that help you ensure your model's performance and integrity while handling diverse data sets. This document will walk you through these evaluator types, how to use them, and provide some examples of their use in real-world scenarios. +Each evaluator type in LangChain comes with ready-to-use implementations and an extensible API that allows for customization according to your unique requirements. Here are the types of evaluators we offer: -This section also provides some additional examples of how you could use these evaluators for different scenarios or apply to different chain implementations in the LangChain library. Some examples include: +- [String Evaluators](/docs/guides/evaluation/string/): These evaluators assess the predicted string for a given input, usually comparing it against a reference string. +- [Trajectory Evaluators](/docs/guides/evaluation/trajectory/): These are used to evaluate the entire trajectory of agent actions. +- [Comparison Evaluators](/docs/guides/evaluation/comparison/): These evaluators are designed to compare predictions from two runs on a common input. -- [Preference Scoring Chain Outputs](/docs/guides/evaluation/examples/comparisons): An example using a comparison evaluator on different models or prompts to select statistically significant differences in aggregate preference scores +These evaluators can be used across various scenarios and can be applied to different chain implementations in the LangChain library. For instance, you might use them in a scenario like: +- [Preference Scoring Chain Outputs](/docs/guides/evaluation/examples/comparisons): This example uses a comparison evaluator to select statistically significant differences in aggregate preference scores across different models or prompts. ## Reference Docs -For detailed information of the available evaluators, including how to instantiate, configure, and customize them. Check out the [reference documentation](https://api.python.langchain.com/en/latest/api_reference.html#module-langchain.evaluation) directly. +For detailed information on the available evaluators, including how to instantiate, configure, and customize them, check out the [reference documentation](https://api.python.langchain.com/en/latest/api_reference.html#module-langchain.evaluation) directly. From 13f583c975f72210a374b8a4184e7ff42be0a917 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 16:33:18 -0700 Subject: [PATCH 11/15] intro update --- .../docs_skeleton/docs/guides/evaluation/index.mdx | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/docs_skeleton/docs/guides/evaluation/index.mdx b/docs/docs_skeleton/docs/guides/evaluation/index.mdx index c52c32cc1e17e..a608527bed057 100644 --- a/docs/docs_skeleton/docs/guides/evaluation/index.mdx +++ b/docs/docs_skeleton/docs/guides/evaluation/index.mdx @@ -6,21 +6,23 @@ import DocCardList from "@theme/DocCardList"; # Evaluation -Building applications with language models involves many moving parts. One of the most critical components is ensuring that the outcomes produced by your models are reliable and useful across a broad array of inputs. This process of assurance is known as "evaluation". +Building applications with language models involves many moving parts. One of the most critical components is ensuring that the outcomes produced by your models are reliable and useful across a broad array of inputs, and that they work well with your application's other software components. Ensuring reliability usually boils down to some combination of application design, testing & evaluation, and runtime checks. -LangChain provides tools to help you evaluate your language models. This is critical because even though language models can sometimes produce unpredictable results, deploying them in a production environment requires repeatable and useful outcomes. +The guides in this section review the APIs and functionality LangChain provides to help yous better evaluate your applications. Evaluation and testing are both critical when thinking about deploying LLM applications, since production environments require repeatable and useful outcomes. -We offer various types of evaluators that help you ensure your model's performance and integrity while handling diverse data sets. This document will walk you through these evaluator types, how to use them, and provide some examples of their use in real-world scenarios. +LangChain offers various types of evaluators to help you measure performance and integrity on diverse data, and we hope to encourage the the community to create and share other useful evaluators so everyone can improve. These docs will introduce the evaluator types, how to use them, and provide some examples of their use in real-world scenarios. -Each evaluator type in LangChain comes with ready-to-use implementations and an extensible API that allows for customization according to your unique requirements. Here are the types of evaluators we offer: +Each evaluator type in LangChain comes with ready-to-use implementations and an extensible API that allows for customization according to your unique requirements. Here are some of the types of evaluators we offer: - [String Evaluators](/docs/guides/evaluation/string/): These evaluators assess the predicted string for a given input, usually comparing it against a reference string. - [Trajectory Evaluators](/docs/guides/evaluation/trajectory/): These are used to evaluate the entire trajectory of agent actions. - [Comparison Evaluators](/docs/guides/evaluation/comparison/): These evaluators are designed to compare predictions from two runs on a common input. -These evaluators can be used across various scenarios and can be applied to different chain implementations in the LangChain library. For instance, you might use them in a scenario like: +These evaluators can be used across various scenarios and can be applied to different chain and LLM implementations in the LangChain library. -- [Preference Scoring Chain Outputs](/docs/guides/evaluation/examples/comparisons): This example uses a comparison evaluator to select statistically significant differences in aggregate preference scores across different models or prompts. +We also are working to share guides and cookbooks that demonstrate how to use these evaluators in real-world scenarios, such as: + +- [Chain Comparisons](/docs/guides/evaluation/examples/comparisons): This example uses a comparison evaluator to predict the preferred output. It reviews ways to measure confidence intervals to select statistically significant differences in aggregate preference scores across different models or prompts. ## Reference Docs From 3e1d662f192587b9fc7f44829df052550ef82303 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 16:50:10 -0700 Subject: [PATCH 12/15] update --- .../docs/guides/evaluation/string/index.mdx | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/docs/docs_skeleton/docs/guides/evaluation/string/index.mdx b/docs/docs_skeleton/docs/guides/evaluation/string/index.mdx index 359a56510cdfd..3585e7991651b 100644 --- a/docs/docs_skeleton/docs/guides/evaluation/string/index.mdx +++ b/docs/docs_skeleton/docs/guides/evaluation/string/index.mdx @@ -3,6 +3,25 @@ sidebar_position: 2 --- # String Evaluators +A string evaluator is a component within LangChain designed to assess the performance of a language model by comparing its generated outputs (predictions) to a reference string or an input. This comparison is a crucial step in the evaluation of language models, providing a measure of the accuracy or quality of the generated text. + +In practice, string evaluators are typically used to evaluate a predicted string against a given input, such as a question or a prompt. Often, a reference label or context string is provided to define what a correct or ideal response would look like. These evaluators can be customized to tailor the evaluation process to fit your application's specific requirements. + +To create a custom string evaluator, inherit from the `StringEvaluator` class and implement the `_evaluate_strings` method. If you require asynchronous support, also implement the `_aevaluate_strings` method. + +Here's a summary of the key attributes and methods associated with a string evaluator: + +- `evaluation_name`: Specifies the name of the evaluation. +- `requires_input`: Boolean attribute that indicates whether the evaluator requires an input string. If True, the evaluator will raise an error when the input isn't provided. If False, a warning will be logged if an input _is_ provided, indicating that it will not be considered in the evaluation. +- `requires_reference`: Boolean attribute specifying whether the evaluator requires a reference label. If True, the evaluator will raise an error when the reference isn't provided. If False, a warning will be logged if a reference _is_ provided, indicating that it will not be considered in the evaluation. + +String evaluators also implement the following methods: + +- `aevaluate_strings`: Asynchronously evaluates the output of the Chain or Language Model, with support for optional input and label. +- `evaluate_strings`: Synchronously evaluates the output of the Chain or Language Model, with support for optional input and label. + +The following sections provide detailed information on available string evaluator implementations as well as how to create a custom string evaluator. + import DocCardList from "@theme/DocCardList"; - \ No newline at end of file + From 8020f73bd20d6a0ca90c18d63b4095b4e46ffd84 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 16:57:15 -0700 Subject: [PATCH 13/15] Update comparison index --- .../guides/evaluation/comparison/index.mdx | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/docs/docs_skeleton/docs/guides/evaluation/comparison/index.mdx b/docs/docs_skeleton/docs/guides/evaluation/comparison/index.mdx index e6b0a0bb4693c..8dc4fe7a0bf8e 100644 --- a/docs/docs_skeleton/docs/guides/evaluation/comparison/index.mdx +++ b/docs/docs_skeleton/docs/guides/evaluation/comparison/index.mdx @@ -3,6 +3,22 @@ sidebar_position: 3 --- # Comparison Evaluators +Comparison evaluators in LangChain help measure two different chain or LLM outputs. These evaluators are helpful for comparative analyses, such as A/B testing between two language models, or comparing different versions of the same model. They can also be useful for things like generating preference scores for ai-assisted reinforcement learning. + +These evaluators inherit from the `PairwiseStringEvaluator` class, providing a comparison interface for two strings - typically, the outputs from two different prompts or models, or two versions of the same model. In essence, a comparison evaluator performs an evaluation on a pair of strings and returns a dictionary containing the evaluation score and other relevant details. + +To create a custom comparison evaluator, inherit from the `PairwiseStringEvaluator` class and overwrite the `_evaluate_string_pairs` method. If you require asynchronous evaluation, also overwrite the `_aevaluate_string_pairs` method. + +Here's a summary of the key methods and properties of a comparison evaluator: + +- `evaluate_string_pairs`: Evaluate the output string pairs. This function should be overwritten when creating custom evaluators. +- `aevaluate_string_pairs`: Asynchronously evaluate the output string pairs. This function should be overwritten for asynchronous evaluation. +- `requires_input`: This property indicates whether this evaluator requires an input string. +- `requires_reference`: This property specifies whether this evaluator requires a reference label. + +Detailed information about creating custom evaluators and the available built-in comparison evaluators are provided in the following sections. + import DocCardList from "@theme/DocCardList"; - \ No newline at end of file + + From 8e601fcc4ed608153fe4c6fa8c144ac4850673a0 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 17:32:18 -0700 Subject: [PATCH 14/15] Update --- docs/api_reference/guide_imports.json | 2 +- .../guides/evaluation/trajectory/index.mdx | 25 ++++++++++++++++++- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/docs/api_reference/guide_imports.json b/docs/api_reference/guide_imports.json index b91521893e18f..0d895270aa01d 100644 --- a/docs/api_reference/guide_imports.json +++ b/docs/api_reference/guide_imports.json @@ -1 +1 @@ -{"DeepInfraEmbeddings": {"DeepInfra": "https://python.langchain.com/docs/integrations/text_embedding/deepinfra"}, "HuggingFaceEmbeddings": {"Hugging Face Hub": "https://python.langchain.com/docs/integrations/text_embedding/huggingfacehub", "Sentence Transformers Embeddings": "https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers", "LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever", "Hugging Face": "https://python.langchain.com/docs/integrations/providers/huggingface", "Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy", "Lost in the middle: The problem with long contexts": "https://python.langchain.com/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder", "Pairwise Embedding Distance ": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_embedding_distance", "Embedding Distance": "https://python.langchain.com/docs/modules/evaluation/string/embedding_distance"}, "GPT4AllEmbeddings": {"GPT4All": "https://python.langchain.com/docs/integrations/text_embedding/gpt4all", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa"}, "MosaicMLInstructorEmbeddings": {"MosaicML embeddings": "https://python.langchain.com/docs/integrations/text_embedding/mosaicml"}, "OpenAIEmbeddings": {"OpenAI": "https://python.langchain.com/docs/integrations/providers/openai", "AzureOpenAI": "https://python.langchain.com/docs/integrations/text_embedding/azureopenai", "Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "kNN": "https://python.langchain.com/docs/integrations/retrievers/knn", "DocArray Retriever": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever", "SVM": "https://python.langchain.com/docs/integrations/retrievers/svm", "Pinecone Hybrid Search": "https://python.langchain.com/docs/integrations/retrievers/pinecone_hybrid_search", "LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever", "Azure OpenAI": "https://python.langchain.com/docs/integrations/providers/azure_openai", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Vectorstore Agent": "https://python.langchain.com/docs/integrations/toolkits/vectorstore", "LanceDB": "https://python.langchain.com/docs/integrations/vectorstores/lancedb", "Weaviate": "https://python.langchain.com/docs/integrations/vectorstores/weaviate", "Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake", "Redis": "https://python.langchain.com/docs/integrations/vectorstores/redis", "PGVector": "https://python.langchain.com/docs/integrations/vectorstores/pgvector", "Rockset": "https://python.langchain.com/docs/integrations/vectorstores/rockset", "Zilliz": "https://python.langchain.com/docs/integrations/vectorstores/zilliz", "SingleStoreDB": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb", "Typesense": "https://python.langchain.com/docs/integrations/vectorstores/typesense", "Atlas": "https://python.langchain.com/docs/integrations/vectorstores/atlas", "Chroma": "https://python.langchain.com/docs/integrations/vectorstores/chroma", "Alibaba Cloud OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch", "StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks", "scikit-learn": "https://python.langchain.com/docs/integrations/vectorstores/sklearn", "DocArrayHnswSearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_hnsw", "MyScale": "https://python.langchain.com/docs/integrations/vectorstores/myscale", "ClickHouse Vector Search": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse", "Qdrant": "https://python.langchain.com/docs/integrations/vectorstores/qdrant", "Tigris": "https://python.langchain.com/docs/integrations/vectorstores/tigris", "Supabase (Postgres)": "https://python.langchain.com/docs/integrations/vectorstores/supabase", "OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/opensearch", "Pinecone": "https://python.langchain.com/docs/integrations/vectorstores/pinecone", "Azure Cognitive Search": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch", "Cassandra": "https://python.langchain.com/docs/integrations/vectorstores/cassandra", "Milvus": "https://python.langchain.com/docs/integrations/vectorstores/milvus", "ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch", "DocArrayInMemorySearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory", "pg_embedding": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding", "FAISS": "https://python.langchain.com/docs/integrations/vectorstores/faiss", "AnalyticDB": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb", "Hologres": "https://python.langchain.com/docs/integrations/vectorstores/hologres", "MongoDB Atlas": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval", "Generative Agents in LangChain": "https://python.langchain.com/docs/use_cases/agent_simulations/characters", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Custom agent with tool retrieval": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval", "Select by maximal marginal relevance (MMR)": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/mmr", "Loading from LangChainHub": "https://python.langchain.com/docs/modules/chains/how_to/from_hub", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde"}, "VertexAIEmbeddings": {"Google Cloud Platform Vertex AI PaLM ": "https://python.langchain.com/docs/integrations/text_embedding/google_vertex_ai_palm"}, "BedrockEmbeddings": {"Bedrock Embeddings": "https://python.langchain.com/docs/integrations/text_embedding/bedrock", "Bedrock": "https://python.langchain.com/docs/integrations/providers/bedrock"}, "LlamaCppEmbeddings": {"Llama-cpp": "https://python.langchain.com/docs/integrations/text_embedding/llamacpp", "Llama.cpp": "https://python.langchain.com/docs/integrations/providers/llamacpp"}, "NLPCloudEmbeddings": {"NLP Cloud": "https://python.langchain.com/docs/integrations/text_embedding/nlp_cloud"}, "SpacyEmbeddings": {"Spacy Embedding": "https://python.langchain.com/docs/integrations/text_embedding/spacy_embedding"}, "HuggingFaceInstructEmbeddings": {"InstructEmbeddings": "https://python.langchain.com/docs/integrations/text_embedding/instruct_embeddings"}, "CohereEmbeddings": {"Cohere": "https://python.langchain.com/docs/integrations/providers/cohere", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router"}, "ClarifaiEmbeddings": {"Clarifai": "https://python.langchain.com/docs/integrations/providers/clarifai"}, "MiniMaxEmbeddings": {"MiniMax": "https://python.langchain.com/docs/integrations/text_embedding/minimax"}, "FakeEmbeddings": {"Fake Embeddings": "https://python.langchain.com/docs/integrations/text_embedding/fake", "DocArray Retriever": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever", "Vectara": "https://python.langchain.com/docs/integrations/vectorstores/vectara", "Tair": "https://python.langchain.com/docs/integrations/vectorstores/tair"}, "ElasticsearchEmbeddings": {"Elasticsearch": "https://python.langchain.com/docs/integrations/text_embedding/elasticsearch", "ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch"}, "EmbaasEmbeddings": {"Embaas": "https://python.langchain.com/docs/integrations/text_embedding/embaas"}, "JinaEmbeddings": {"Jina": "https://python.langchain.com/docs/integrations/providers/jina"}, "AlephAlphaAsymmetricSemanticEmbedding": {"Aleph Alpha": "https://python.langchain.com/docs/integrations/text_embedding/aleph_alpha"}, "AlephAlphaSymmetricSemanticEmbedding": {"Aleph Alpha": "https://python.langchain.com/docs/integrations/providers/aleph_alpha"}, "DashScopeEmbeddings": {"DashScope": "https://python.langchain.com/docs/integrations/text_embedding/dashscope"}, "TensorflowHubEmbeddings": {"TensorflowHub": "https://python.langchain.com/docs/integrations/text_embedding/tensorflowhub"}, "ModelScopeEmbeddings": {"ModelScope": "https://python.langchain.com/docs/integrations/providers/modelscope"}, "SagemakerEndpointEmbeddings": {"SageMaker Endpoint Embeddings": "https://python.langchain.com/docs/integrations/text_embedding/sagemaker-endpoint", "SageMaker Endpoint": "https://python.langchain.com/docs/integrations/providers/sagemaker_endpoint"}, "EmbeddingsContentHandler": {"SageMaker Endpoint Embeddings": "https://python.langchain.com/docs/integrations/text_embedding/sagemaker-endpoint"}, "LocalAIEmbeddings": {"LocalAI": "https://python.langchain.com/docs/integrations/text_embedding/localai"}, "ElasticSearchBM25Retriever": {"ElasticSearch BM25": "https://python.langchain.com/docs/integrations/retrievers/elastic_search_bm25", "Elasticsearch": "https://python.langchain.com/docs/integrations/providers/elasticsearch"}, "ZepChatMessageHistory": {"Zep": "https://python.langchain.com/docs/integrations/retrievers/zep_memorystore"}, "HumanMessage": {"Zep": "https://python.langchain.com/docs/integrations/retrievers/zep_memorystore", "Zep Memory": "https://python.langchain.com/docs/integrations/memory/zep_memory", "Google Cloud Platform Vertex AI PaLM ": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm", "Azure": "https://python.langchain.com/docs/integrations/chat/azure_chat_openai", "PromptLayer ChatOpenAI": "https://python.langchain.com/docs/integrations/chat/promptlayer_chatopenai", "MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte", "Arthur": "https://python.langchain.com/docs/integrations/providers/arthur_tracking", "Custom callback handlers": "https://python.langchain.com/docs/modules/callbacks/custom_callbacks", "Tools as OpenAI Functions": "https://python.langchain.com/docs/modules/agents/tools/tools_as_openai_functions", "Prompt Pipelining": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining", "Using OpenAI functions": "https://python.langchain.com/docs/modules/chains/popular/openai_functions"}, "ZepRetriever": {"Zep": "https://python.langchain.com/docs/integrations/providers/zep", "Zep Memory": "https://python.langchain.com/docs/integrations/memory/zep_memory"}, "VespaRetriever": {"Vespa": "https://python.langchain.com/docs/integrations/providers/vespa"}, "AmazonKendraRetriever": {"Amazon Kendra": "https://python.langchain.com/docs/integrations/retrievers/amazon_kendra_retriever"}, "RecursiveCharacterTextSplitter": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "MarkdownHeaderTextSplitter": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/markdown_header_metadata"}, "TextLoader": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Vectorstore Agent": "https://python.langchain.com/docs/integrations/toolkits/vectorstore", "LanceDB": "https://python.langchain.com/docs/integrations/vectorstores/lancedb", "Weaviate": "https://python.langchain.com/docs/integrations/vectorstores/weaviate", "Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake", "Vectara": "https://python.langchain.com/docs/integrations/vectorstores/vectara", "Redis": "https://python.langchain.com/docs/integrations/vectorstores/redis", "PGVector": "https://python.langchain.com/docs/integrations/vectorstores/pgvector", "Rockset": "https://python.langchain.com/docs/integrations/vectorstores/rockset", "Zilliz": "https://python.langchain.com/docs/integrations/vectorstores/zilliz", "SingleStoreDB": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb", "Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy", "Typesense": "https://python.langchain.com/docs/integrations/vectorstores/typesense", "Atlas": "https://python.langchain.com/docs/integrations/vectorstores/atlas", "Tair": "https://python.langchain.com/docs/integrations/vectorstores/tair", "Chroma": "https://python.langchain.com/docs/integrations/vectorstores/chroma", "Alibaba Cloud OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch", "StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks", "Clarifai": "https://python.langchain.com/docs/integrations/vectorstores/clarifai", "scikit-learn": "https://python.langchain.com/docs/integrations/vectorstores/sklearn", "DocArrayHnswSearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_hnsw", "MyScale": "https://python.langchain.com/docs/integrations/vectorstores/myscale", "ClickHouse Vector Search": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse", "Qdrant": "https://python.langchain.com/docs/integrations/vectorstores/qdrant", "Tigris": "https://python.langchain.com/docs/integrations/vectorstores/tigris", "AwaDB": "https://python.langchain.com/docs/integrations/vectorstores/awadb", "Supabase (Postgres)": "https://python.langchain.com/docs/integrations/vectorstores/supabase", "OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/opensearch", "Pinecone": "https://python.langchain.com/docs/integrations/vectorstores/pinecone", "Azure Cognitive Search": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch", "Cassandra": "https://python.langchain.com/docs/integrations/vectorstores/cassandra", "Milvus": "https://python.langchain.com/docs/integrations/vectorstores/milvus", "ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch", "Marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo", "DocArrayInMemorySearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory", "pg_embedding": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding", "FAISS": "https://python.langchain.com/docs/integrations/vectorstores/faiss", "AnalyticDB": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb", "Hologres": "https://python.langchain.com/docs/integrations/vectorstores/hologres", "MongoDB Atlas": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Loading from LangChainHub": "https://python.langchain.com/docs/modules/chains/how_to/from_hub", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Graph QA": "https://python.langchain.com/docs/modules/chains/additional/graph_qa"}, "FAISS": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "FAISS": "https://python.langchain.com/docs/integrations/vectorstores/faiss", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval", "Generative Agents in LangChain": "https://python.langchain.com/docs/use_cases/agent_simulations/characters", "Custom agent with tool retrieval": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval", "Select by maximal marginal relevance (MMR)": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/mmr"}, "OpenAI": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Google Serper API": "https://python.langchain.com/docs/integrations/tools/google_serper", "Human as a tool": "https://python.langchain.com/docs/integrations/tools/human_tools", "OpenWeatherMap API": "https://python.langchain.com/docs/integrations/tools/openweathermap", "Search Tools": "https://python.langchain.com/docs/integrations/tools/search_tools", "Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier", "Gradio Tools": "https://python.langchain.com/docs/integrations/tools/gradio_tools", "SceneXplain": "https://python.langchain.com/docs/integrations/tools/sceneXplain", "Entity Memory with SQLite storage": "https://python.langchain.com/docs/integrations/memory/entity_memory_with_sqlite", "Argilla": "https://python.langchain.com/docs/integrations/callbacks/argilla", "PromptLayer": "https://python.langchain.com/docs/integrations/callbacks/promptlayer", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/streamlit", "WandB Tracing": "https://python.langchain.com/docs/integrations/providers/agent_with_wandb_tracing", "Comet": "https://python.langchain.com/docs/integrations/providers/comet_tracking", "Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking", "Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking", "OpenAI": "https://python.langchain.com/docs/integrations/llms/openai", "Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking", "Google Serper": "https://python.langchain.com/docs/integrations/providers/google_serper", "Helicone": "https://python.langchain.com/docs/integrations/providers/helicone", "Shale Protocol": "https://python.langchain.com/docs/integrations/providers/shaleprotocol", "WhyLabs": "https://python.langchain.com/docs/integrations/providers/whylabs_profiling", "ClearML": "https://python.langchain.com/docs/integrations/providers/clearml_tracking", "Ray Serve": "https://python.langchain.com/docs/integrations/providers/ray_serve", "Log, Trace, and Monitor Langchain LLM Calls": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation", "CSV Agent": "https://python.langchain.com/docs/integrations/toolkits/csv", "Xorbits Agent": "https://python.langchain.com/docs/integrations/toolkits/xorbits", "Jira": "https://python.langchain.com/docs/integrations/toolkits/jira", "Spark Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/spark", "Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json", "GitHub": "https://python.langchain.com/docs/integrations/toolkits/github", "Pandas Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/pandas", "OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi", "Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching", "HuggingGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/hugginggpt", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query", "Lost in the middle: The problem with long contexts": "https://python.langchain.com/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Conversation Knowledge Graph Memory": "https://python.langchain.com/docs/modules/memory/kg", "ConversationTokenBufferMemory": "https://python.langchain.com/docs/modules/memory/token_buffer", "How to use multiple memory classes in the same chain": "https://python.langchain.com/docs/modules/memory/multiple_memory", "How to customize conversational memory": "https://python.langchain.com/docs/modules/memory/conversational_customization", "ConversationSummaryBufferMemory": "https://python.langchain.com/docs/modules/memory/summary_buffer", "Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks", "Token counting": "https://python.langchain.com/docs/modules/callbacks/token_counting", "Logging to file": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler", "Multi-Input Tools": "https://python.langchain.com/docs/modules/agents/tools/multi_input_tool", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Tool Input Schema": "https://python.langchain.com/docs/modules/agents/tools/tool_input_validation", "Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Access intermediate steps": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps", "Timeouts for agents": "https://python.langchain.com/docs/modules/agents/how_to/max_time_limit", "Streaming final agent output": "https://python.langchain.com/docs/modules/agents/how_to/streaming_stdout_final_only", "Cap the max number of iterations": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations", "Async API": "https://python.langchain.com/docs/modules/chains/how_to/async_chain", "Tracking token usage": "https://python.langchain.com/docs/modules/model_io/models/llms/token_usage_tracking", "Serialization": "https://python.langchain.com/docs/modules/model_io/models/llms/llm_serialization", "Retry parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/retry", "Datetime parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/datetime", "Pydantic (JSON) parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/pydantic", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router", "Transformation": "https://python.langchain.com/docs/modules/chains/foundational/transformation", "Bash chain": "https://python.langchain.com/docs/modules/chains/additional/llm_bash", "Summarization checker chain": "https://python.langchain.com/docs/modules/chains/additional/llm_summarization_checker", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "HTTP request chain": "https://python.langchain.com/docs/modules/chains/additional/llm_requests", "LLM Symbolic Math ": "https://python.langchain.com/docs/modules/chains/additional/llm_symbolic_math", "FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde", "Graph QA": "https://python.langchain.com/docs/modules/chains/additional/graph_qa", "Self-checking chain": "https://python.langchain.com/docs/modules/chains/additional/llm_checker", "OpenAPI chain": "https://python.langchain.com/docs/modules/chains/additional/openapi"}, "ContextualCompressionRetriever": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever"}, "CohereRerank": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere"}, "RetrievalQA": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake", "StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "KNNRetriever": {"kNN": "https://python.langchain.com/docs/integrations/retrievers/knn"}, "WikipediaRetriever": {"Wikipedia": "https://python.langchain.com/docs/integrations/providers/wikipedia"}, "ChatOpenAI": {"Wikipedia": "https://python.langchain.com/docs/integrations/retrievers/wikipedia", "Arxiv": "https://python.langchain.com/docs/integrations/retrievers/arxiv", "ChatGPT Plugins": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins", "Human as a tool": "https://python.langchain.com/docs/integrations/tools/human_tools", "ArXiv API Tool": "https://python.langchain.com/docs/integrations/tools/arxiv", "Metaphor Search": "https://python.langchain.com/docs/integrations/tools/metaphor_search", "Shell Tool": "https://python.langchain.com/docs/integrations/tools/bash", "Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "OpenAI": "https://python.langchain.com/docs/integrations/chat/openai", "Context": "https://python.langchain.com/docs/integrations/callbacks/context", "PromptLayer": "https://python.langchain.com/docs/integrations/callbacks/promptlayer", "CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte", "Arthur": "https://python.langchain.com/docs/integrations/providers/arthur_tracking", "CSV Agent": "https://python.langchain.com/docs/integrations/toolkits/csv", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python", "PowerBI Dataset Agent": "https://python.langchain.com/docs/integrations/toolkits/powerbi", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "Spark SQL Agent": "https://python.langchain.com/docs/integrations/toolkits/spark_sql", "Pandas Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/pandas", "Multion Toolkit": "https://python.langchain.com/docs/integrations/toolkits/multion", "OpenAI Functions Metadata Tagger": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma", "Debugging": "https://python.langchain.com/docs/guides/debugging", "LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "Wikibase Agent": "https://python.langchain.com/docs/use_cases/agents/wikibase_agent", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "CAMEL Role-Playing Autonomous Cooperative Agents": "https://python.langchain.com/docs/use_cases/agent_simulations/camel_role_playing", "Multi-Agent Simulated Environment: Petting Zoo": "https://python.langchain.com/docs/use_cases/agent_simulations/petting_zoo", "Multi-agent decentralized speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_bidding", "Multi-agent authoritarian speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_authoritarian", "Generative Agents in LangChain": "https://python.langchain.com/docs/use_cases/agent_simulations/characters", "Two-Player Dungeons & Dragons": "https://python.langchain.com/docs/use_cases/agent_simulations/two_player_dnd", "Multi-Player Dungeons & Dragons": "https://python.langchain.com/docs/use_cases/agent_simulations/multi_player_dnd", "Simulated Environment: Gymnasium": "https://python.langchain.com/docs/use_cases/agent_simulations/gymnasium", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "Custom callback handlers": "https://python.langchain.com/docs/modules/callbacks/custom_callbacks", "Async callbacks": "https://python.langchain.com/docs/modules/callbacks/async_callbacks", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Tools as OpenAI Functions": "https://python.langchain.com/docs/modules/agents/tools/tools_as_openai_functions", "OpenAI Multi Functions Agent": "https://python.langchain.com/docs/modules/agents/agent_types/openai_multi_functions_agent", "Handle parsing errors": "https://python.langchain.com/docs/modules/agents/how_to/handle_parsing_errors", "Running Agent as an Iterator": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter", "Add Memory to OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/add_memory_openai_functions", "Custom functions with OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent", "Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions", "Retry parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/retry", "Pydantic (JSON) parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/pydantic", "Few shot examples for chat models": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/few_shot_examples_chat", "Prompt Pipelining": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining", "Connecting to a Feature Store": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store", "Agent Trajectory": "https://python.langchain.com/docs/modules/evaluation/trajectory/trajectory_eval", "Custom Trajectory Evaluator": "https://python.langchain.com/docs/modules/evaluation/trajectory/custom", "QA Correctness": "https://python.langchain.com/docs/modules/evaluation/string/qa", "Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain", "Using OpenAI functions": "https://python.langchain.com/docs/modules/chains/popular/openai_functions", "Neptune Open Cypher QA Chain": "https://python.langchain.com/docs/modules/chains/additional/neptune_cypher_qa", "NebulaGraphQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa", "Elasticsearch database": "https://python.langchain.com/docs/modules/chains/additional/elasticsearch_database", "Extraction": "https://python.langchain.com/docs/modules/chains/additional/extraction", "KuzuQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_kuzu_qa", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "HugeGraph QA Chain": "https://python.langchain.com/docs/modules/chains/additional/graph_hugegraph_qa", "Tagging": "https://python.langchain.com/docs/modules/chains/additional/tagging", "GraphSparqlQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_sparql_qa", "Question-Answering Citations": "https://python.langchain.com/docs/modules/chains/additional/qa_citations", "FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare", "Graph DB QA chain": "https://python.langchain.com/docs/modules/chains/additional/graph_cypher_qa"}, "ConversationalRetrievalChain": {"Wikipedia": "https://python.langchain.com/docs/integrations/retrievers/wikipedia", "Arxiv": "https://python.langchain.com/docs/integrations/retrievers/arxiv", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "MetalRetriever": {"Metal": "https://python.langchain.com/docs/integrations/providers/metal"}, "CSVLoader": {"ChatGPT Plugin": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin", "CSV": "https://python.langchain.com/docs/integrations/document_loaders/csv"}, "Document": {"ChatGPT Plugin": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin", "Weaviate Hybrid Search": "https://python.langchain.com/docs/integrations/retrievers/weaviate-hybrid", "BM25": "https://python.langchain.com/docs/integrations/retrievers/bm25", "TF-IDF": "https://python.langchain.com/docs/integrations/retrievers/tf_idf", "Apify": "https://python.langchain.com/docs/integrations/tools/apify", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation", "PGVector": "https://python.langchain.com/docs/integrations/vectorstores/pgvector", "Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy", "pg_embedding": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding", "FAISS": "https://python.langchain.com/docs/integrations/vectorstores/faiss", "OpenAI Functions Metadata Tagger": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger", "Doctran Extract Properties": "https://python.langchain.com/docs/integrations/document_transformers/doctran_extract_properties", "Doctran Interrogate Documents": "https://python.langchain.com/docs/integrations/document_transformers/doctran_interrogate_document", "Doctran Translate Documents": "https://python.langchain.com/docs/integrations/document_transformers/doctran_translate_document", "Copy Paste": "https://python.langchain.com/docs/integrations/document_loaders/copypaste", "Apify Dataset": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "SageMakerEndpoint": "https://python.langchain.com/docs/integrations/llms/sagemaker", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Custom agent with tool retrieval": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare"}, "ChatGPTPluginRetriever": {"ChatGPT Plugin": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin", "OpenAI": "https://python.langchain.com/docs/integrations/providers/openai"}, "GoogleCloudEnterpriseSearchRetriever": {"Google Cloud Enterprise Search": "https://python.langchain.com/docs/integrations/retrievers/google_cloud_enterprise_search"}, "DocArrayRetriever": {"DocArray Retriever": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever"}, "SVMRetriever": {"SVM": "https://python.langchain.com/docs/integrations/retrievers/svm", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index"}, "PineconeHybridSearchRetriever": {"Pinecone Hybrid Search": "https://python.langchain.com/docs/integrations/retrievers/pinecone_hybrid_search"}, "PubMedRetriever": {"PubMed": "https://python.langchain.com/docs/integrations/retrievers/pubmed"}, "WeaviateHybridSearchRetriever": {"Weaviate Hybrid Search": "https://python.langchain.com/docs/integrations/retrievers/weaviate-hybrid"}, "ArxivRetriever": {"Arxiv": "https://python.langchain.com/docs/integrations/providers/arxiv"}, "BM25Retriever": {"BM25": "https://python.langchain.com/docs/integrations/retrievers/bm25"}, "AzureCognitiveSearchRetriever": {"Azure Cognitive Search": "https://python.langchain.com/docs/integrations/providers/azure_cognitive_search_"}, "ChaindeskRetriever": {"Chaindesk": "https://python.langchain.com/docs/integrations/providers/chaindesk"}, "MergerRetriever": {"LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever"}, "Chroma": {"LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever", "Chroma": "https://python.langchain.com/docs/integrations/vectorstores/chroma", "Vectorstore Agent": "https://python.langchain.com/docs/integrations/toolkits/vectorstore", "StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks", "Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "Lost in the middle: The problem with long contexts": "https://python.langchain.com/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router", "Loading from LangChainHub": "https://python.langchain.com/docs/modules/chains/how_to/from_hub", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde"}, "DocumentCompressorPipeline": {"LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever"}, "LongContextReorder": {"LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever"}, "TFIDFRetriever": {"TF-IDF": "https://python.langchain.com/docs/integrations/retrievers/tf_idf"}, "load_tools": {"ChatGPT Plugins": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins", "Human as a tool": "https://python.langchain.com/docs/integrations/tools/human_tools", "AWS Lambda API": "https://python.langchain.com/docs/integrations/tools/awslambda", "Requests": "https://python.langchain.com/docs/integrations/tools/requests", "OpenWeatherMap API": "https://python.langchain.com/docs/integrations/tools/openweathermap", "Search Tools": "https://python.langchain.com/docs/integrations/tools/search_tools", "ArXiv API Tool": "https://python.langchain.com/docs/integrations/tools/arxiv", "GraphQL tool": "https://python.langchain.com/docs/integrations/tools/graphql", "SceneXplain": "https://python.langchain.com/docs/integrations/tools/sceneXplain", "SerpAPI": "https://python.langchain.com/docs/integrations/providers/serpapi", "Golden": "https://python.langchain.com/docs/integrations/providers/golden", "Wolfram Alpha": "https://python.langchain.com/docs/integrations/providers/wolfram_alpha", "DataForSEO": "https://python.langchain.com/docs/integrations/providers/dataforseo", "SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx", "Google Serper": "https://python.langchain.com/docs/integrations/providers/google_serper", "OpenWeatherMap": "https://python.langchain.com/docs/integrations/providers/openweathermap", "Google Search": "https://python.langchain.com/docs/integrations/providers/google_search", "Amazon API Gateway": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway_example", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval", "Access intermediate steps": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps", "Timeouts for agents": "https://python.langchain.com/docs/modules/agents/how_to/max_time_limit", "Streaming final agent output": "https://python.langchain.com/docs/modules/agents/how_to/streaming_stdout_final_only", "Cap the max number of iterations": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations", "Human input Chat Model": "https://python.langchain.com/docs/modules/model_io/models/chat/human_input_chat_model", "Fake LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/fake_llm", "Tracking token usage": "https://python.langchain.com/docs/modules/model_io/models/llms/token_usage_tracking", "Human input LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/human_input_llm"}, "AgentType": {"ChatGPT Plugins": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins", "Google Serper API": "https://python.langchain.com/docs/integrations/tools/google_serper", "Human as a tool": "https://python.langchain.com/docs/integrations/tools/human_tools", "Search Tools": "https://python.langchain.com/docs/integrations/tools/search_tools", "Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier", "Shell Tool": "https://python.langchain.com/docs/integrations/tools/bash", "Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "Argilla": "https://python.langchain.com/docs/integrations/callbacks/argilla", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/streamlit", "WandB Tracing": "https://python.langchain.com/docs/integrations/providers/agent_with_wandb_tracing", "Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking", "Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking", "MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking", "Google Serper": "https://python.langchain.com/docs/integrations/providers/google_serper", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte", "ClearML": "https://python.langchain.com/docs/integrations/providers/clearml_tracking", "Log, Trace, and Monitor Langchain LLM Calls": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index", "CSV Agent": "https://python.langchain.com/docs/integrations/toolkits/csv", "Jira": "https://python.langchain.com/docs/integrations/toolkits/jira", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "GitHub": "https://python.langchain.com/docs/integrations/toolkits/github", "Pandas Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/pandas", "Multion Toolkit": "https://python.langchain.com/docs/integrations/toolkits/multion", "Amazon API Gateway": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway_example", "Debugging": "https://python.langchain.com/docs/guides/debugging", "LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough", "Multi-modal outputs: Image & Text": "https://python.langchain.com/docs/use_cases/multi_modal/image_agent", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks", "Multi-Input Tools": "https://python.langchain.com/docs/modules/agents/tools/multi_input_tool", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Tool Input Schema": "https://python.langchain.com/docs/modules/agents/tools/tool_input_validation", "Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval", "Self ask with search": "https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search", "ReAct document store": "https://python.langchain.com/docs/modules/agents/agent_types/react_docstore", "OpenAI Multi Functions Agent": "https://python.langchain.com/docs/modules/agents/agent_types/openai_multi_functions_agent", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Access intermediate steps": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps", "Handle parsing errors": "https://python.langchain.com/docs/modules/agents/how_to/handle_parsing_errors", "Timeouts for agents": "https://python.langchain.com/docs/modules/agents/how_to/max_time_limit", "Streaming final agent output": "https://python.langchain.com/docs/modules/agents/how_to/streaming_stdout_final_only", "Add Memory to OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/add_memory_openai_functions", "Cap the max number of iterations": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations", "Custom functions with OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent", "Async API": "https://python.langchain.com/docs/modules/agents/how_to/async_agent", "Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions", "Human input Chat Model": "https://python.langchain.com/docs/modules/model_io/models/chat/human_input_chat_model", "Fake LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/fake_llm", "Tracking token usage": "https://python.langchain.com/docs/modules/model_io/models/llms/token_usage_tracking", "Human input LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/human_input_llm", "Agent Trajectory": "https://python.langchain.com/docs/modules/evaluation/trajectory/trajectory_eval"}, "AIPluginTool": {"ChatGPT Plugins": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins"}, "Tool": {"DataForSeo API Wrapper": "https://python.langchain.com/docs/integrations/tools/dataforseo", "SerpAPI": "https://python.langchain.com/docs/integrations/tools/serpapi", "Google Search": "https://python.langchain.com/docs/integrations/tools/google_search", "Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Custom multi-action agent": "https://python.langchain.com/docs/modules/agents/how_to/custom_multi_action_agent", "Running Agent as an Iterator": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter", "Custom agent": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent"}, "SearxSearchWrapper": {"SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx"}, "GoogleSerperAPIWrapper": {"Google Serper API": "https://python.langchain.com/docs/integrations/tools/google_serper", "Google Serper": "https://python.langchain.com/docs/integrations/providers/google_serper", "FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare"}, "initialize_agent": {"Google Serper API": "https://python.langchain.com/docs/integrations/tools/google_serper", "Search Tools": "https://python.langchain.com/docs/integrations/tools/search_tools", "Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier", "Metaphor Search": "https://python.langchain.com/docs/integrations/tools/metaphor_search", "Gradio Tools": "https://python.langchain.com/docs/integrations/tools/gradio_tools", "SceneXplain": "https://python.langchain.com/docs/integrations/tools/sceneXplain", "Shell Tool": "https://python.langchain.com/docs/integrations/tools/bash", "Zep Memory": "https://python.langchain.com/docs/integrations/memory/zep_memory", "Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "WandB Tracing": "https://python.langchain.com/docs/integrations/providers/agent_with_wandb_tracing", "Comet": "https://python.langchain.com/docs/integrations/providers/comet_tracking", "Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking", "Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking", "MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking", "Google Serper": "https://python.langchain.com/docs/integrations/providers/google_serper", "ClearML": "https://python.langchain.com/docs/integrations/providers/clearml_tracking", "Jira": "https://python.langchain.com/docs/integrations/toolkits/jira", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Azure Cognitive Services Toolkit": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services", "Gmail Toolkit": "https://python.langchain.com/docs/integrations/toolkits/gmail", "GitHub": "https://python.langchain.com/docs/integrations/toolkits/github", "PlayWright Browser Toolkit": "https://python.langchain.com/docs/integrations/toolkits/playwright", "Office365 Toolkit": "https://python.langchain.com/docs/integrations/toolkits/office365", "Amadeus Toolkit": "https://python.langchain.com/docs/integrations/toolkits/amadeus", "Amazon API Gateway": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway_example", "Multi-modal outputs: Image & Text": "https://python.langchain.com/docs/use_cases/multi_modal/image_agent", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Multi-Input Tools": "https://python.langchain.com/docs/modules/agents/tools/multi_input_tool", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval", "Self ask with search": "https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search", "ReAct document store": "https://python.langchain.com/docs/modules/agents/agent_types/react_docstore", "OpenAI Multi Functions Agent": "https://python.langchain.com/docs/modules/agents/agent_types/openai_multi_functions_agent", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Access intermediate steps": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps", "Handle parsing errors": "https://python.langchain.com/docs/modules/agents/how_to/handle_parsing_errors", "Timeouts for agents": "https://python.langchain.com/docs/modules/agents/how_to/max_time_limit", "Streaming final agent output": "https://python.langchain.com/docs/modules/agents/how_to/streaming_stdout_final_only", "Add Memory to OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/add_memory_openai_functions", "Cap the max number of iterations": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations", "Custom functions with OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent", "Async API": "https://python.langchain.com/docs/modules/agents/how_to/async_agent", "Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions", "Human input Chat Model": "https://python.langchain.com/docs/modules/model_io/models/chat/human_input_chat_model", "Fake LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/fake_llm", "Tracking token usage": "https://python.langchain.com/docs/modules/model_io/models/llms/token_usage_tracking", "Human input LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/human_input_llm"}, "GooglePlacesTool": {"Google Places": "https://python.langchain.com/docs/integrations/tools/google_places"}, "HumanInputRun": {"Human as a tool": "https://python.langchain.com/docs/integrations/tools/human_tools", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times"}, "TwilioAPIWrapper": {"Twilio": "https://python.langchain.com/docs/integrations/tools/twilio"}, "IFTTTWebhook": {"IFTTT WebHooks": "https://python.langchain.com/docs/integrations/tools/ifttt"}, "WikipediaQueryRun": {"Wikipedia": "https://python.langchain.com/docs/integrations/tools/wikipedia"}, "WikipediaAPIWrapper": {"Wikipedia": "https://python.langchain.com/docs/integrations/tools/wikipedia", "Zep Memory": "https://python.langchain.com/docs/integrations/memory/zep_memory"}, "TextRequestsWrapper": {"Requests": "https://python.langchain.com/docs/integrations/tools/requests", "JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json", "OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "OpenWeatherMapAPIWrapper": {"OpenWeatherMap API": "https://python.langchain.com/docs/integrations/tools/openweathermap", "OpenWeatherMap": "https://python.langchain.com/docs/integrations/providers/openweathermap"}, "PubmedQueryRun": {"PubMed Tool": "https://python.langchain.com/docs/integrations/tools/pubmed"}, "YouTubeSearchTool": {"YouTubeSearchTool": "https://python.langchain.com/docs/integrations/tools/youtube"}, "VectorstoreIndexCreator": {"Apify": "https://python.langchain.com/docs/integrations/tools/apify", "HuggingFace dataset": "https://python.langchain.com/docs/integrations/document_loaders/hugging_face_dataset", "Spreedly": "https://python.langchain.com/docs/integrations/document_loaders/spreedly", "Image captions": "https://python.langchain.com/docs/integrations/document_loaders/image_captions", "Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma", "Apify Dataset": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset", "Iugu": "https://python.langchain.com/docs/integrations/document_loaders/iugu", "Stripe": "https://python.langchain.com/docs/integrations/document_loaders/stripe", "Modern Treasury": "https://python.langchain.com/docs/integrations/document_loaders/modern_treasury", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index"}, "ZapierToolkit": {"Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier"}, "ZapierNLAWrapper": {"Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier"}, "LLMChain": {"Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier", "Argilla": "https://python.langchain.com/docs/integrations/callbacks/argilla", "Comet": "https://python.langchain.com/docs/integrations/providers/comet_tracking", "Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking", "Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking", "Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation", "Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json", "Predibase": "https://python.langchain.com/docs/integrations/llms/predibase", "AzureML Online Endpoint": "https://python.langchain.com/docs/integrations/llms/azureml_endpoint_example", "Multi-agent authoritarian speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_authoritarian", "Logging to file": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler", "Datetime parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/datetime", "Prompt Pipelining": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining", "Connecting to a Feature Store": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store", "Custom Trajectory Evaluator": "https://python.langchain.com/docs/modules/evaluation/trajectory/custom", "Custom Pairwise Evaluator": "https://python.langchain.com/docs/modules/evaluation/comparison/custom", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router", "Async API": "https://python.langchain.com/docs/modules/chains/how_to/async_chain", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde"}, "PromptTemplate": {"Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier", "Context": "https://python.langchain.com/docs/integrations/callbacks/context", "Argilla": "https://python.langchain.com/docs/integrations/callbacks/argilla", "Comet": "https://python.langchain.com/docs/integrations/providers/comet_tracking", "Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking", "Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking", "Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation", "Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "Predibase": "https://python.langchain.com/docs/integrations/llms/predibase", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "Lost in the middle: The problem with long contexts": "https://python.langchain.com/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder", "How to create a custom Memory class": "https://python.langchain.com/docs/modules/memory/custom_memory", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Conversation Knowledge Graph Memory": "https://python.langchain.com/docs/modules/memory/kg", "How to use multiple memory classes in the same chain": "https://python.langchain.com/docs/modules/memory/multiple_memory", "How to customize conversational memory": "https://python.langchain.com/docs/modules/memory/conversational_customization", "Logging to file": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler", "Datetime parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/datetime", "Select by n-gram overlap": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/ngram_overlap", "Prompt Pipelining": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining", "Template Formats": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/formats", "Connecting to a Feature Store": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store", "Pairwise String Comparison": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_string", "Evaluating Custom Criteria": "https://python.langchain.com/docs/modules/evaluation/string/criteria_eval_chain", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router", "Transformation": "https://python.langchain.com/docs/modules/chains/foundational/transformation", "Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain", "Async API": "https://python.langchain.com/docs/modules/chains/how_to/async_chain", "Bash chain": "https://python.langchain.com/docs/modules/chains/additional/llm_bash", "Elasticsearch database": "https://python.langchain.com/docs/modules/chains/additional/elasticsearch_database", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "HTTP request chain": "https://python.langchain.com/docs/modules/chains/additional/llm_requests", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde"}, "ZapierNLARunAction": {"Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier"}, "GoldenQueryAPIWrapper": {"Golden Query": "https://python.langchain.com/docs/integrations/tools/golden_query", "Golden": "https://python.langchain.com/docs/integrations/providers/golden"}, "ArxivAPIWrapper": {"ArXiv API Tool": "https://python.langchain.com/docs/integrations/tools/arxiv"}, "MetaphorSearchAPIWrapper": {"Metaphor Search": "https://python.langchain.com/docs/integrations/tools/metaphor_search"}, "PlayWrightBrowserToolkit": {"Metaphor Search": "https://python.langchain.com/docs/integrations/tools/metaphor_search", "PlayWright Browser Toolkit": "https://python.langchain.com/docs/integrations/toolkits/playwright"}, "MetaphorSearchResults": {"Metaphor Search": "https://python.langchain.com/docs/integrations/tools/metaphor_search"}, "SerpAPIWrapper": {"SerpAPI": "https://python.langchain.com/docs/integrations/providers/serpapi", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt"}, "GraphQLAPIWrapper": {"GraphQL tool": "https://python.langchain.com/docs/integrations/tools/graphql"}, "DuckDuckGoSearchRun": {"DuckDuckGo Search": "https://python.langchain.com/docs/integrations/tools/ddg"}, "ConversationBufferMemory": {"Gradio Tools": "https://python.langchain.com/docs/integrations/tools/gradio_tools", "SceneXplain": "https://python.langchain.com/docs/integrations/tools/sceneXplain", "Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Bedrock": "https://python.langchain.com/docs/integrations/llms/bedrock", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Adding Message Memory backed by a database to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "How to add Memory to an LLMChain": "https://python.langchain.com/docs/modules/memory/adding_memory", "How to customize conversational memory": "https://python.langchain.com/docs/modules/memory/conversational_customization", "How to add Memory to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory", "Shared memory across agents and tools": "https://python.langchain.com/docs/modules/agents/how_to/sharedmemory_for_tools", "Add Memory to OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/add_memory_openai_functions", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "SceneXplainTool": {"SceneXplain": "https://python.langchain.com/docs/integrations/tools/sceneXplain"}, "WolframAlphaAPIWrapper": {"Wolfram Alpha": "https://python.langchain.com/docs/integrations/providers/wolfram_alpha"}, "load_huggingface_tool": {"Requires transformers>=4.29.0 and huggingface_hub>=0.14.1": "https://python.langchain.com/docs/integrations/tools/huggingface_tools"}, "GoogleSearchAPIWrapper": {"Google Search": "https://python.langchain.com/docs/integrations/providers/google_search", "Adding Message Memory backed by a database to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db", "How to add Memory to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory", "Shared memory across agents and tools": "https://python.langchain.com/docs/modules/agents/how_to/sharedmemory_for_tools"}, "BingSearchAPIWrapper": {"Bing Search": "https://python.langchain.com/docs/integrations/tools/bing_search"}, "ShellTool": {"Shell Tool": "https://python.langchain.com/docs/integrations/tools/bash", "Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval"}, "FileManagementToolkit": {"File System Tools": "https://python.langchain.com/docs/integrations/tools/filesystem"}, "BraveSearch": {"Brave Search": "https://python.langchain.com/docs/integrations/providers/brave_search"}, "RedisChatMessageHistory": {"Redis Chat Message History": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history", "Adding Message Memory backed by a database to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db"}, "ZepMemory": {"Zep Memory": "https://python.langchain.com/docs/integrations/memory/zep_memory"}, "ConversationChain": {"Entity Memory with SQLite storage": "https://python.langchain.com/docs/integrations/memory/entity_memory_with_sqlite", "Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma", "Bedrock": "https://python.langchain.com/docs/integrations/llms/bedrock", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Conversation Knowledge Graph Memory": "https://python.langchain.com/docs/modules/memory/kg", "ConversationTokenBufferMemory": "https://python.langchain.com/docs/modules/memory/token_buffer", "How to use multiple memory classes in the same chain": "https://python.langchain.com/docs/modules/memory/multiple_memory", "How to customize conversational memory": "https://python.langchain.com/docs/modules/memory/conversational_customization", "ConversationSummaryBufferMemory": "https://python.langchain.com/docs/modules/memory/summary_buffer", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router"}, "ConversationEntityMemory": {"Entity Memory with SQLite storage": "https://python.langchain.com/docs/integrations/memory/entity_memory_with_sqlite"}, "SQLiteEntityStore": {"Entity Memory with SQLite storage": "https://python.langchain.com/docs/integrations/memory/entity_memory_with_sqlite"}, "ENTITY_MEMORY_CONVERSATION_TEMPLATE": {"Entity Memory with SQLite storage": "https://python.langchain.com/docs/integrations/memory/entity_memory_with_sqlite"}, "PostgresChatMessageHistory": {"Postgres Chat Message History": "https://python.langchain.com/docs/integrations/memory/postgres_chat_message_history"}, "MomentoChatMessageHistory": {"Momento Chat Message History": "https://python.langchain.com/docs/integrations/memory/momento_chat_message_history"}, "MongoDBChatMessageHistory": {"Mongodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/mongodb_chat_message_history"}, "CassandraChatMessageHistory": {"Cassandra Chat Message History": "https://python.langchain.com/docs/integrations/memory/cassandra_chat_message_history", "Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra"}, "MotorheadMemory": {"Mot\u00f6rhead Memory": "https://python.langchain.com/docs/integrations/memory/motorhead_memory", "Mot\u00f6rhead Memory (Managed)": "https://python.langchain.com/docs/integrations/memory/motorhead_memory_managed"}, "DynamoDBChatMessageHistory": {"Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history"}, "PythonREPL": {"Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python"}, "ChatAnthropic": {"Anthropic": "https://python.langchain.com/docs/integrations/chat/anthropic", "PlayWright Browser Toolkit": "https://python.langchain.com/docs/integrations/toolkits/playwright", "Agent Trajectory": "https://python.langchain.com/docs/modules/evaluation/trajectory/trajectory_eval", "Custom Pairwise Evaluator": "https://python.langchain.com/docs/modules/evaluation/comparison/custom", "Pairwise String Comparison": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_string", "Evaluating Custom Criteria": "https://python.langchain.com/docs/modules/evaluation/string/criteria_eval_chain"}, "AIMessage": {"Anthropic": "https://python.langchain.com/docs/integrations/chat/anthropic", "OpenAI": "https://python.langchain.com/docs/integrations/chat/openai", "JinaChat": "https://python.langchain.com/docs/integrations/chat/jinachat", "Few shot examples for chat models": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/few_shot_examples_chat"}, "CallbackManager": {"Anthropic": "https://python.langchain.com/docs/integrations/chat/anthropic", "Llama-cpp": "https://python.langchain.com/docs/integrations/llms/llamacpp", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa"}, "StreamingStdOutCallbackHandler": {"Anthropic": "https://python.langchain.com/docs/integrations/chat/anthropic", "GPT4All": "https://python.langchain.com/docs/integrations/llms/gpt4all", "Arthur": "https://python.langchain.com/docs/integrations/providers/arthur_tracking", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Llama-cpp": "https://python.langchain.com/docs/integrations/llms/llamacpp", "C Transformers": "https://python.langchain.com/docs/integrations/llms/ctransformers", "Huggingface TextGen Inference": "https://python.langchain.com/docs/integrations/llms/huggingface_textgen_inference", "Replicate": "https://python.langchain.com/docs/integrations/llms/replicate", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa"}, "create_tagging_chain": {"Llama API": "https://python.langchain.com/docs/integrations/chat/llama_api", "Tagging": "https://python.langchain.com/docs/modules/chains/additional/tagging"}, "ChatVertexAI": {"Google Cloud Platform Vertex AI PaLM ": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm"}, "JinaChat": {"JinaChat": "https://python.langchain.com/docs/integrations/chat/jinachat"}, "AzureChatOpenAI": {"Azure": "https://python.langchain.com/docs/integrations/chat/azure_chat_openai", "Azure OpenAI": "https://python.langchain.com/docs/integrations/providers/azure_openai"}, "PromptLayerChatOpenAI": {"PromptLayer ChatOpenAI": "https://python.langchain.com/docs/integrations/chat/promptlayer_chatopenai"}, "ContextCallbackHandler": {"Context": "https://python.langchain.com/docs/integrations/callbacks/context"}, "ArgillaCallbackHandler": {"Argilla": "https://python.langchain.com/docs/integrations/providers/argilla"}, "PromptLayerCallbackHandler": {"PromptLayer": "https://python.langchain.com/docs/integrations/callbacks/promptlayer"}, "GPT4All": {"PromptLayer": "https://python.langchain.com/docs/integrations/callbacks/promptlayer", "GPT4All": "https://python.langchain.com/docs/integrations/llms/gpt4all", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa"}, "StreamlitCallbackHandler": {"Streamlit": "https://python.langchain.com/docs/integrations/callbacks/streamlit", "GPT4All": "https://python.langchain.com/docs/integrations/providers/gpt4all"}, "FigmaFileLoader": {"Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma"}, "AzureOpenAI": {"Azure OpenAI": "https://python.langchain.com/docs/integrations/llms/azure_openai_example", "OpenAI": "https://python.langchain.com/docs/integrations/providers/openai"}, "MyScale": {"MyScale": "https://python.langchain.com/docs/integrations/vectorstores/myscale", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query"}, "Baseten": {"Baseten": "https://python.langchain.com/docs/integrations/llms/baseten"}, "WeatherDataLoader": {"Weather": "https://python.langchain.com/docs/integrations/document_loaders/weather"}, "Tair": {"Tair": "https://python.langchain.com/docs/integrations/vectorstores/tair"}, "UnstructuredWordDocumentLoader": {"Microsoft Word": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_word"}, "CollegeConfidentialLoader": {"College Confidential": "https://python.langchain.com/docs/integrations/document_loaders/college_confidential"}, "RWKV": {"RWKV-4": "https://python.langchain.com/docs/integrations/providers/rwkv"}, "GoogleDriveLoader": {"Google Drive": "https://python.langchain.com/docs/integrations/document_loaders/google_drive"}, "AmazonAPIGateway": {"Amazon API Gateway": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway_example"}, "UnstructuredPowerPointLoader": {"Microsoft PowerPoint": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_powerpoint"}, "wandb_tracing_enabled": {"WandB Tracing": "https://python.langchain.com/docs/integrations/providers/agent_with_wandb_tracing"}, "CometCallbackHandler": {"Comet": "https://python.langchain.com/docs/integrations/providers/comet_tracking"}, "CTransformers": {"C Transformers": "https://python.langchain.com/docs/integrations/llms/ctransformers"}, "BiliBiliLoader": {"BiliBili": "https://python.langchain.com/docs/integrations/document_loaders/bilibili"}, "DiffbotLoader": {"Diffbot": "https://python.langchain.com/docs/integrations/document_loaders/diffbot"}, "AimCallbackHandler": {"Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking"}, "ModernTreasuryLoader": {"Modern Treasury": "https://python.langchain.com/docs/integrations/document_loaders/modern_treasury"}, "FacebookChatLoader": {"Facebook Chat": "https://python.langchain.com/docs/integrations/document_loaders/facebook_chat"}, "Banana": {"Banana": "https://python.langchain.com/docs/integrations/llms/banana"}, "HuggingFacePipeline": {"Hugging Face": "https://python.langchain.com/docs/integrations/providers/huggingface", "RELLM": "https://python.langchain.com/docs/integrations/llms/rellm_experimental", "JSONFormer": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental"}, "HuggingFaceHub": {"Hugging Face": "https://python.langchain.com/docs/integrations/providers/huggingface"}, "HuggingFaceHubEmbeddings": {"Hugging Face": "https://python.langchain.com/docs/integrations/providers/huggingface"}, "CharacterTextSplitter": {"Hugging Face": "https://python.langchain.com/docs/integrations/providers/huggingface", "OpenAI": "https://python.langchain.com/docs/integrations/providers/openai", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Vectorstore Agent": "https://python.langchain.com/docs/integrations/toolkits/vectorstore", "LanceDB": "https://python.langchain.com/docs/integrations/vectorstores/lancedb", "Weaviate": "https://python.langchain.com/docs/integrations/vectorstores/weaviate", "Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake", "Vectara": "https://python.langchain.com/docs/integrations/vectorstores/vectara", "Redis": "https://python.langchain.com/docs/integrations/vectorstores/redis", "PGVector": "https://python.langchain.com/docs/integrations/vectorstores/pgvector", "Rockset": "https://python.langchain.com/docs/integrations/vectorstores/rockset", "Zilliz": "https://python.langchain.com/docs/integrations/vectorstores/zilliz", "SingleStoreDB": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb", "Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy", "Typesense": "https://python.langchain.com/docs/integrations/vectorstores/typesense", "Tair": "https://python.langchain.com/docs/integrations/vectorstores/tair", "Chroma": "https://python.langchain.com/docs/integrations/vectorstores/chroma", "Alibaba Cloud OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch", "StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks", "Clarifai": "https://python.langchain.com/docs/integrations/vectorstores/clarifai", "scikit-learn": "https://python.langchain.com/docs/integrations/vectorstores/sklearn", "DocArrayHnswSearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_hnsw", "MyScale": "https://python.langchain.com/docs/integrations/vectorstores/myscale", "ClickHouse Vector Search": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse", "Qdrant": "https://python.langchain.com/docs/integrations/vectorstores/qdrant", "Tigris": "https://python.langchain.com/docs/integrations/vectorstores/tigris", "AwaDB": "https://python.langchain.com/docs/integrations/vectorstores/awadb", "Supabase (Postgres)": "https://python.langchain.com/docs/integrations/vectorstores/supabase", "OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/opensearch", "Pinecone": "https://python.langchain.com/docs/integrations/vectorstores/pinecone", "Azure Cognitive Search": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch", "Cassandra": "https://python.langchain.com/docs/integrations/vectorstores/cassandra", "Milvus": "https://python.langchain.com/docs/integrations/vectorstores/milvus", "ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch", "Marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo", "DocArrayInMemorySearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory", "pg_embedding": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding", "FAISS": "https://python.langchain.com/docs/integrations/vectorstores/faiss", "AnalyticDB": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb", "Hologres": "https://python.langchain.com/docs/integrations/vectorstores/hologres", "MongoDB Atlas": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas", "Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma", "Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic", "Manifest": "https://python.langchain.com/docs/integrations/llms/manifest", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Split by tokens ": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/split_by_token", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Loading from LangChainHub": "https://python.langchain.com/docs/modules/chains/how_to/from_hub", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde"}, "DocugamiLoader": {"Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami"}, "GutenbergLoader": {"Gutenberg": "https://python.langchain.com/docs/integrations/document_loaders/gutenberg"}, "AzureBlobStorageContainerLoader": {"Azure Blob Storage": "https://python.langchain.com/docs/integrations/providers/azure_blob_storage", "Azure Blob Storage Container": "https://python.langchain.com/docs/integrations/document_loaders/azure_blob_storage_container"}, "AzureBlobStorageFileLoader": {"Azure Blob Storage": "https://python.langchain.com/docs/integrations/providers/azure_blob_storage", "Azure Blob Storage File": "https://python.langchain.com/docs/integrations/document_loaders/azure_blob_storage_file"}, "WikipediaLoader": {"Wikipedia": "https://python.langchain.com/docs/integrations/document_loaders/wikipedia"}, "ConfluenceLoader": {"Confluence": "https://python.langchain.com/docs/integrations/document_loaders/confluence"}, "Predibase": {"Predibase": "https://python.langchain.com/docs/integrations/llms/predibase"}, "Beam": {"Beam": "https://python.langchain.com/docs/integrations/llms/beam"}, "GrobidParser": {"Grobid": "https://python.langchain.com/docs/integrations/document_loaders/grobid"}, "GenericLoader": {"Grobid": "https://python.langchain.com/docs/integrations/document_loaders/grobid", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "Source Code": "https://python.langchain.com/docs/integrations/document_loaders/source_code"}, "Typesense": {"Typesense": "https://python.langchain.com/docs/integrations/vectorstores/typesense"}, "Hologres": {"Hologres": "https://python.langchain.com/docs/integrations/vectorstores/hologres"}, "AI21": {"AI21 Labs": "https://python.langchain.com/docs/integrations/providers/ai21", "AI21": "https://python.langchain.com/docs/integrations/llms/ai21"}, "WandbCallbackHandler": {"Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking"}, "ObsidianLoader": {"Obsidian": "https://python.langchain.com/docs/integrations/document_loaders/obsidian"}, "create_sql_agent": {"CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database"}, "SQLDatabaseToolkit": {"CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions"}, "OpenAIModerationChain": {"OpenAI": "https://python.langchain.com/docs/integrations/providers/openai"}, "ChatGPTLoader": {"OpenAI": "https://python.langchain.com/docs/integrations/providers/openai", "ChatGPT Data": "https://python.langchain.com/docs/integrations/document_loaders/chatgpt_loader"}, "AZLyricsLoader": {"AZLyrics": "https://python.langchain.com/docs/integrations/document_loaders/azlyrics"}, "ToMarkdownLoader": {"2Markdown": "https://python.langchain.com/docs/integrations/document_loaders/tomarkdown"}, "GitLoader": {"Git": "https://python.langchain.com/docs/integrations/document_loaders/git"}, "InfinoCallbackHandler": {"Infino": "https://python.langchain.com/docs/integrations/providers/infino"}, "MlflowAIGateway": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway"}, "MlflowAIGatewayEmbeddings": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway"}, "ChatMLflowAIGateway": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway"}, "SingleStoreDB": {"SingleStoreDB": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb"}, "Tigris": {"Tigris": "https://python.langchain.com/docs/integrations/vectorstores/tigris"}, "S3DirectoryLoader": {"AWS S3 Directory": "https://python.langchain.com/docs/integrations/document_loaders/aws_s3_directory"}, "TransformChain": {"Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "Transformation": "https://python.langchain.com/docs/modules/chains/foundational/transformation"}, "SQLDatabase": {"Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database"}, "Weaviate": {"Weaviate": "https://python.langchain.com/docs/integrations/vectorstores/weaviate", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query"}, "AirbyteJSONLoader": {"Airbyte": "https://python.langchain.com/docs/integrations/providers/airbyte", "Airbyte JSON": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_json"}, "TelegramChatFileLoader": {"Telegram": "https://python.langchain.com/docs/integrations/document_loaders/telegram"}, "TelegramChatApiLoader": {"Telegram": "https://python.langchain.com/docs/integrations/providers/telegram"}, "PredictionGuard": {"Prediction Guard": "https://python.langchain.com/docs/integrations/llms/predictionguard"}, "NotionDirectoryLoader": {"Notion DB": "https://python.langchain.com/docs/integrations/providers/notion", "Notion DB 1/2": "https://python.langchain.com/docs/integrations/document_loaders/notion", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA"}, "NotionDBLoader": {"Notion DB": "https://python.langchain.com/docs/integrations/providers/notion", "Notion DB 2/2": "https://python.langchain.com/docs/integrations/document_loaders/notiondb"}, "MWDumpLoader": {"MediaWikiDump": "https://python.langchain.com/docs/integrations/document_loaders/mediawikidump"}, "BraveSearchLoader": {"Brave Search": "https://python.langchain.com/docs/integrations/document_loaders/brave_search"}, "StarRocks": {"StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks"}, "DatadogLogsLoader": {"Datadog Logs": "https://python.langchain.com/docs/integrations/document_loaders/datadog_logs"}, "ApifyDatasetLoader": {"Apify": "https://python.langchain.com/docs/integrations/providers/apify", "Apify Dataset": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset"}, "NLPCloud": {"NLPCloud": "https://python.langchain.com/docs/integrations/providers/nlpcloud", "NLP Cloud": "https://python.langchain.com/docs/integrations/llms/nlpcloud"}, "Milvus": {"Milvus": "https://python.langchain.com/docs/integrations/vectorstores/milvus", "Zilliz": "https://python.langchain.com/docs/integrations/vectorstores/zilliz"}, "Qdrant": {"Qdrant": "https://python.langchain.com/docs/integrations/vectorstores/qdrant", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query"}, "GitbookLoader": {"GitBook": "https://python.langchain.com/docs/integrations/document_loaders/gitbook"}, "OpenSearchVectorSearch": {"OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/opensearch"}, "Pinecone": {"Pinecone": "https://python.langchain.com/docs/integrations/vectorstores/pinecone", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone"}, "UnstructuredFileLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured", "Unstructured File": "https://python.langchain.com/docs/integrations/document_loaders/unstructured_file"}, "SelfHostedPipeline": {"Runhouse": "https://python.langchain.com/docs/integrations/llms/runhouse"}, "MlflowCallbackHandler": {"MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking"}, "SpreedlyLoader": {"Spreedly": "https://python.langchain.com/docs/integrations/document_loaders/spreedly"}, "OpenLLM": {"OpenLLM": "https://python.langchain.com/docs/integrations/llms/openllm"}, "SearxSearchResults": {"SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx"}, "Modal": {"Modal": "https://python.langchain.com/docs/integrations/llms/modal"}, "IFixitLoader": {"iFixit": "https://python.langchain.com/docs/integrations/document_loaders/ifixit"}, "AlephAlpha": {"Aleph Alpha": "https://python.langchain.com/docs/integrations/llms/aleph_alpha"}, "PipelineAI": {"PipelineAI": "https://python.langchain.com/docs/integrations/llms/pipelineai_example"}, "LlamaCpp": {"Llama.cpp": "https://python.langchain.com/docs/integrations/providers/llamacpp", "Llama-cpp": "https://python.langchain.com/docs/integrations/llms/llamacpp", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa"}, "AwaDB": {"AwaDB": "https://python.langchain.com/docs/integrations/vectorstores/awadb"}, "ArxivLoader": {"Arxiv": "https://python.langchain.com/docs/integrations/document_loaders/arxiv"}, "Anyscale": {"Anyscale": "https://python.langchain.com/docs/integrations/llms/anyscale"}, "StripeLoader": {"Stripe": "https://python.langchain.com/docs/integrations/document_loaders/stripe"}, "BlackboardLoader": {"Blackboard": "https://python.langchain.com/docs/integrations/document_loaders/blackboard"}, "WhatsAppChatLoader": {"WhatsApp": "https://python.langchain.com/docs/integrations/providers/whatsapp", "WhatsApp Chat": "https://python.langchain.com/docs/integrations/document_loaders/whatsapp_chat"}, "LanceDB": {"LanceDB": "https://python.langchain.com/docs/integrations/vectorstores/lancedb"}, "OneDriveLoader": {"Microsoft OneDrive": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_onedrive"}, "AnalyticDB": {"AnalyticDB": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb"}, "YoutubeLoader": {"YouTube": "https://python.langchain.com/docs/integrations/providers/youtube", "YouTube transcripts": "https://python.langchain.com/docs/integrations/document_loaders/youtube_transcript"}, "GoogleApiYoutubeLoader": {"YouTube": "https://python.langchain.com/docs/integrations/providers/youtube"}, "PromptLayerOpenAI": {"PromptLayer": "https://python.langchain.com/docs/integrations/providers/promptlayer", "PromptLayer OpenAI": "https://python.langchain.com/docs/integrations/llms/promptlayer_openai"}, "DeepLake": {"Deep Lake": "https://python.langchain.com/docs/integrations/providers/deeplake", "Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query"}, "WhyLabsCallbackHandler": {"WhyLabs": "https://python.langchain.com/docs/integrations/providers/whylabs_profiling"}, "FlyteCallbackHandler": {"Flyte": "https://python.langchain.com/docs/integrations/providers/flyte"}, "ManifestWrapper": {"Hazy Research": "https://python.langchain.com/docs/integrations/providers/hazy_research", "Manifest": "https://python.langchain.com/docs/integrations/llms/manifest"}, "Marqo": {"Marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo"}, "IMSDbLoader": {"IMSDb": "https://python.langchain.com/docs/integrations/document_loaders/imsdb"}, "PGVector": {"PGVector": "https://python.langchain.com/docs/integrations/vectorstores/pgvector"}, "DeepInfra": {"DeepInfra": "https://python.langchain.com/docs/integrations/llms/deepinfra_example"}, "AgentExecutor": {"Jina": "https://python.langchain.com/docs/integrations/providers/jina", "PowerBI Dataset Agent": "https://python.langchain.com/docs/integrations/toolkits/powerbi", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "Running Agent as an Iterator": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter"}, "RedditPostsLoader": {"Reddit": "https://python.langchain.com/docs/integrations/document_loaders/reddit"}, "TrelloLoader": {"Trello": "https://python.langchain.com/docs/integrations/document_loaders/trello"}, "AtlasDB": {"AtlasDB": "https://python.langchain.com/docs/integrations/providers/atlas", "Atlas": "https://python.langchain.com/docs/integrations/vectorstores/atlas"}, "SKLearnVectorStore": {"scikit-learn": "https://python.langchain.com/docs/integrations/vectorstores/sklearn"}, "EverNoteLoader": {"EverNote": "https://python.langchain.com/docs/integrations/document_loaders/evernote"}, "TwitterTweetLoader": {"Twitter": "https://python.langchain.com/docs/integrations/document_loaders/twitter"}, "DiscordChatLoader": {"Discord": "https://python.langchain.com/docs/integrations/document_loaders/discord"}, "RedisCache": {"Redis": "https://python.langchain.com/docs/integrations/providers/redis", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "RedisSemanticCache": {"Redis": "https://python.langchain.com/docs/integrations/providers/redis", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "Redis": {"Redis": "https://python.langchain.com/docs/integrations/vectorstores/redis"}, "SelfQueryRetriever": {"Chroma": "https://python.langchain.com/docs/integrations/providers/chroma", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query"}, "ClearMLCallbackHandler": {"ClearML": "https://python.langchain.com/docs/integrations/providers/clearml_tracking"}, "StdOutCallbackHandler": {"ClearML": "https://python.langchain.com/docs/integrations/providers/clearml_tracking", "Async API": "https://python.langchain.com/docs/modules/agents/how_to/async_agent", "Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain"}, "Cohere": {"Cohere": "https://python.langchain.com/docs/integrations/llms/cohere"}, "SlackDirectoryLoader": {"Slack": "https://python.langchain.com/docs/integrations/document_loaders/slack"}, "LLMContentHandler": {"SageMaker Endpoint": "https://python.langchain.com/docs/integrations/providers/sagemaker_endpoint", "SageMakerEndpoint": "https://python.langchain.com/docs/integrations/llms/sagemaker"}, "ContentHandlerBase": {"SageMaker Endpoint": "https://python.langchain.com/docs/integrations/providers/sagemaker_endpoint"}, "HNLoader": {"Hacker News": "https://python.langchain.com/docs/integrations/document_loaders/hacker_news"}, "Annoy": {"Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy"}, "GCSDirectoryLoader": {"Google Cloud Storage": "https://python.langchain.com/docs/integrations/providers/google_cloud_storage", "Google Cloud Storage Directory": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_storage_directory"}, "GCSFileLoader": {"Google Cloud Storage": "https://python.langchain.com/docs/integrations/providers/google_cloud_storage", "Google Cloud Storage File": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_storage_file"}, "ArthurCallbackHandler": {"Arthur": "https://python.langchain.com/docs/integrations/providers/arthur_tracking"}, "DuckDBLoader": {"DuckDB": "https://python.langchain.com/docs/integrations/document_loaders/duckdb"}, "Petals": {"Petals": "https://python.langchain.com/docs/integrations/llms/petals_example"}, "MomentoCache": {"Momento": "https://python.langchain.com/docs/integrations/providers/momento", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "AirtableLoader": {"Airtable": "https://python.langchain.com/docs/integrations/document_loaders/airtable"}, "Clarifai": {"Clarifai": "https://python.langchain.com/docs/integrations/llms/clarifai"}, "BigQueryLoader": {"Google BigQuery": "https://python.langchain.com/docs/integrations/document_loaders/google_bigquery"}, "RoamLoader": {"Roam": "https://python.langchain.com/docs/integrations/document_loaders/roam"}, "Portkey": {"Log, Trace, and Monitor Langchain LLM Calls": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index"}, "Vectara": {"Vectara": "https://python.langchain.com/docs/integrations/vectorstores/vectara", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation"}, "VectaraRetriever": {"Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat"}, "load_qa_chain": {"Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "SageMakerEndpoint": "https://python.langchain.com/docs/integrations/llms/sagemaker", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs"}, "CONDENSE_QUESTION_PROMPT": {"Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat"}, "load_qa_with_sources_chain": {"Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat"}, "create_csv_agent": {"CSV Agent": "https://python.langchain.com/docs/integrations/toolkits/csv"}, "create_xorbits_agent": {"Xorbits Agent": "https://python.langchain.com/docs/integrations/toolkits/xorbits"}, "JiraToolkit": {"Jira": "https://python.langchain.com/docs/integrations/toolkits/jira"}, "JiraAPIWrapper": {"Jira": "https://python.langchain.com/docs/integrations/toolkits/jira"}, "create_spark_dataframe_agent": {"Spark Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/spark"}, "PyPDFLoader": {"Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "MergeDocLoader": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc_loader", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat"}, "create_python_agent": {"Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python"}, "PythonREPLTool": {"Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python"}, "create_pbi_agent": {"PowerBI Dataset Agent": "https://python.langchain.com/docs/integrations/toolkits/powerbi"}, "PowerBIToolkit": {"PowerBI Dataset Agent": "https://python.langchain.com/docs/integrations/toolkits/powerbi"}, "PowerBIDataset": {"PowerBI Dataset Agent": "https://python.langchain.com/docs/integrations/toolkits/powerbi"}, "AzureCognitiveServicesToolkit": {"Azure Cognitive Services Toolkit": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services"}, "Requests": {"Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "OpenAPI chain": "https://python.langchain.com/docs/modules/chains/additional/openapi"}, "APIOperation": {"Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla"}, "NLAToolkit": {"Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval"}, "GmailToolkit": {"Gmail Toolkit": "https://python.langchain.com/docs/integrations/toolkits/gmail"}, "build_resource_service": {"Gmail Toolkit": "https://python.langchain.com/docs/integrations/toolkits/gmail"}, "create_json_agent": {"JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json"}, "JsonToolkit": {"JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json"}, "JsonSpec": {"JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json", "OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "GitHubToolkit": {"GitHub": "https://python.langchain.com/docs/integrations/toolkits/github"}, "GitHubAPIWrapper": {"GitHub": "https://python.langchain.com/docs/integrations/toolkits/github"}, "create_spark_sql_agent": {"Spark SQL Agent": "https://python.langchain.com/docs/integrations/toolkits/spark_sql"}, "SparkSQLToolkit": {"Spark SQL Agent": "https://python.langchain.com/docs/integrations/toolkits/spark_sql"}, "SparkSQL": {"Spark SQL Agent": "https://python.langchain.com/docs/integrations/toolkits/spark_sql"}, "O365Toolkit": {"Office365 Toolkit": "https://python.langchain.com/docs/integrations/toolkits/office365"}, "create_pandas_dataframe_agent": {"Pandas Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/pandas", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times"}, "MultionClientTool": {"Multion Toolkit": "https://python.langchain.com/docs/integrations/toolkits/multion"}, "AmadeusToolkit": {"Amadeus Toolkit": "https://python.langchain.com/docs/integrations/toolkits/amadeus"}, "WebBaseLoader": {"Vectorstore Agent": "https://python.langchain.com/docs/integrations/toolkits/vectorstore", "WebBaseLoader": "https://python.langchain.com/docs/integrations/document_loaders/web_base", "MergeDocLoader": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc_loader", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore"}, "reduce_openapi_spec": {"OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "RequestsWrapper": {"OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "create_openapi_agent": {"OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "OpenAPIToolkit": {"OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "RetrievalQAWithSourcesChain": {"Weaviate": "https://python.langchain.com/docs/integrations/vectorstores/weaviate", "Marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo", "Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index"}, "MatchingEngine": {"MatchingEngine": "https://python.langchain.com/docs/integrations/vectorstores/matchingengine"}, "OpenAIChat": {"Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake"}, "InMemoryDocstore": {"Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Generative Agents in LangChain": "https://python.langchain.com/docs/use_cases/agent_simulations/characters"}, "SpacyTextSplitter": {"Atlas": "https://python.langchain.com/docs/integrations/vectorstores/atlas", "Split by tokens ": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/split_by_token"}, "SentenceTransformerEmbeddings": {"Chroma": "https://python.langchain.com/docs/integrations/vectorstores/chroma"}, "StarRocksSettings": {"StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks"}, "DirectoryLoader": {"StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks"}, "DocArrayHnswSearch": {"DocArrayHnswSearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_hnsw"}, "Clickhouse": {"ClickHouse Vector Search": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse"}, "SupabaseVectorStore": {"Supabase (Postgres)": "https://python.langchain.com/docs/integrations/vectorstores/supabase"}, "AzureSearch": {"Azure Cognitive Search": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch"}, "Cassandra": {"Cassandra": "https://python.langchain.com/docs/integrations/vectorstores/cassandra"}, "ElasticVectorSearch": {"ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs"}, "ElasticKnnSearch": {"ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch"}, "DocArrayInMemorySearch": {"DocArrayInMemorySearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory"}, "PGEmbedding": {"pg_embedding": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding"}, "MongoDBAtlasVectorSearch": {"MongoDB Atlas": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas"}, "create_metadata_tagger": {"OpenAI Functions Metadata Tagger": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger"}, "ChatPromptTemplate": {"OpenAI Functions Metadata Tagger": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger", "Prompt Pipelining": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining", "Using OpenAI functions": "https://python.langchain.com/docs/modules/chains/popular/openai_functions", "Extraction": "https://python.langchain.com/docs/modules/chains/additional/extraction", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Tagging": "https://python.langchain.com/docs/modules/chains/additional/tagging"}, "AsyncHtmlLoader": {"html2text": "https://python.langchain.com/docs/integrations/document_transformers/html2text", "AsyncHtmlLoader": "https://python.langchain.com/docs/integrations/document_loaders/async_html"}, "Html2TextTransformer": {"html2text": "https://python.langchain.com/docs/integrations/document_transformers/html2text"}, "DoctranPropertyExtractor": {"Doctran Extract Properties": "https://python.langchain.com/docs/integrations/document_transformers/doctran_extract_properties"}, "DoctranQATransformer": {"Doctran Interrogate Documents": "https://python.langchain.com/docs/integrations/document_transformers/doctran_interrogate_document"}, "DoctranTextTranslator": {"Doctran Translate Documents": "https://python.langchain.com/docs/integrations/document_transformers/doctran_translate_document"}, "SnowflakeLoader": {"Snowflake": "https://python.langchain.com/docs/integrations/document_loaders/snowflake"}, "AcreomLoader": {"acreom": "https://python.langchain.com/docs/integrations/document_loaders/acreom"}, "UnstructuredCSVLoader": {"CSV": "https://python.langchain.com/docs/integrations/document_loaders/csv"}, "XorbitsLoader": {"Xorbits Pandas DataFrame": "https://python.langchain.com/docs/integrations/document_loaders/xorbits"}, "UnstructuredEmailLoader": {"Email": "https://python.langchain.com/docs/integrations/document_loaders/email"}, "OutlookMessageLoader": {"Email": "https://python.langchain.com/docs/integrations/document_loaders/email"}, "RecursiveUrlLoader": {"Recursive URL Loader": "https://python.langchain.com/docs/integrations/document_loaders/recursive_url_loader"}, "JoplinLoader": {"Joplin": "https://python.langchain.com/docs/integrations/document_loaders/joplin"}, "EtherscanLoader": {"Etherscan Loader": "https://python.langchain.com/docs/integrations/document_loaders/Etherscan"}, "Docx2txtLoader": {"Microsoft Word": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_word"}, "OpenAIWhisperParser": {"Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio"}, "YoutubeAudioLoader": {"Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio"}, "UnstructuredURLLoader": {"URL": "https://python.langchain.com/docs/integrations/document_loaders/url"}, "SeleniumURLLoader": {"URL": "https://python.langchain.com/docs/integrations/document_loaders/url"}, "PlaywrightURLLoader": {"URL": "https://python.langchain.com/docs/integrations/document_loaders/url"}, "OpenCityDataLoader": {"Geopandas": "https://python.langchain.com/docs/integrations/document_loaders/geopandas", "Open City Data": "https://python.langchain.com/docs/integrations/document_loaders/open_city_data"}, "GeoDataFrameLoader": {"Geopandas": "https://python.langchain.com/docs/integrations/document_loaders/geopandas"}, "HuggingFaceDatasetLoader": {"HuggingFace dataset": "https://python.langchain.com/docs/integrations/document_loaders/hugging_face_dataset"}, "MHTMLLoader": {"mhtml": "https://python.langchain.com/docs/integrations/document_loaders/mhtml"}, "RocksetLoader": {"Rockset": "https://python.langchain.com/docs/integrations/document_loaders/rockset"}, "ImageCaptionLoader": {"Image captions": "https://python.langchain.com/docs/integrations/document_loaders/image_captions"}, "UnstructuredRSTLoader": {"RST": "https://python.langchain.com/docs/integrations/document_loaders/rst"}, "ConversationBufferWindowMemory": {"Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma", "Meta-Prompt": "https://python.langchain.com/docs/use_cases/autonomous_agents/meta_prompt", "Voice Assistant": "https://python.langchain.com/docs/use_cases/chatbots/voice_assistant", "Create ChatGPT clone": "https://python.langchain.com/docs/modules/agents/how_to/chatgpt_clone"}, "UnstructuredImageLoader": {"Images": "https://python.langchain.com/docs/integrations/document_loaders/image"}, "TencentCOSFileLoader": {"Tencent COS File": "https://python.langchain.com/docs/integrations/document_loaders/tencent_cos_file"}, "TomlLoader": {"TOML": "https://python.langchain.com/docs/integrations/document_loaders/toml"}, "UnstructuredAPIFileLoader": {"Unstructured File": "https://python.langchain.com/docs/integrations/document_loaders/unstructured_file"}, "PsychicLoader": {"Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic"}, "TencentCOSDirectoryLoader": {"Tencent COS Directory": "https://python.langchain.com/docs/integrations/document_loaders/tencent_cos_directory"}, "GitHubIssuesLoader": {"GitHub": "https://python.langchain.com/docs/integrations/document_loaders/github"}, "UnstructuredOrgModeLoader": {"Org-mode": "https://python.langchain.com/docs/integrations/document_loaders/org_mode"}, "LarkSuiteDocLoader": {"LarkSuite (FeiShu)": "https://python.langchain.com/docs/integrations/document_loaders/larksuite"}, "load_summarize_chain": {"LarkSuite (FeiShu)": "https://python.langchain.com/docs/integrations/document_loaders/larksuite", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching", "Summarization": "https://python.langchain.com/docs/use_cases/summarization"}, "IuguLoader": {"Iugu": "https://python.langchain.com/docs/integrations/document_loaders/iugu"}, "UnstructuredEPubLoader": {"EPub ": "https://python.langchain.com/docs/integrations/document_loaders/epub"}, "AttributeInfo": {"Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query"}, "UnstructuredFileIOLoader": {"Google Drive": "https://python.langchain.com/docs/integrations/document_loaders/google_drive"}, "BrowserlessLoader": {"Browserless": "https://python.langchain.com/docs/integrations/document_loaders/browserless"}, "BibtexLoader": {"BibTeX": "https://python.langchain.com/docs/integrations/document_loaders/bibtex"}, "ReadTheDocsLoader": {"ReadTheDocs Documentation": "https://python.langchain.com/docs/integrations/document_loaders/readthedocs_documentation"}, "DataFrameLoader": {"Pandas DataFrame": "https://python.langchain.com/docs/integrations/document_loaders/pandas_dataframe"}, "GoogleApiClient": {"YouTube transcripts": "https://python.langchain.com/docs/integrations/document_loaders/youtube_transcript"}, "NotebookLoader": {"Jupyter Notebook": "https://python.langchain.com/docs/integrations/document_loaders/jupyter_notebook", "Notebook": "https://python.langchain.com/docs/integrations/document_loaders/example_data/notebook"}, "UnstructuredTSVLoader": {"TSV": "https://python.langchain.com/docs/integrations/document_loaders/tsv"}, "UnstructuredODTLoader": {"Open Document Format (ODT)": "https://python.langchain.com/docs/integrations/document_loaders/odt"}, "EmbaasBlobLoader": {"Embaas": "https://python.langchain.com/docs/integrations/document_loaders/embaas"}, "Blob": {"Embaas": "https://python.langchain.com/docs/integrations/document_loaders/embaas"}, "EmbaasLoader": {"Embaas": "https://python.langchain.com/docs/integrations/document_loaders/embaas"}, "UnstructuredXMLLoader": {"XML": "https://python.langchain.com/docs/integrations/document_loaders/xml"}, "MaxComputeLoader": {"Alibaba Cloud MaxCompute": "https://python.langchain.com/docs/integrations/document_loaders/alibaba_cloud_maxcompute"}, "CubeSemanticLoader": {"Cube Semantic Layer": "https://python.langchain.com/docs/integrations/document_loaders/cube_semantic"}, "UnstructuredExcelLoader": {"Microsoft Excel": "https://python.langchain.com/docs/integrations/document_loaders/excel"}, "Language": {"Source Code": "https://python.langchain.com/docs/integrations/document_loaders/source_code"}, "LanguageParser": {"Source Code": "https://python.langchain.com/docs/integrations/document_loaders/source_code"}, "SRTLoader": {"Subtitle": "https://python.langchain.com/docs/integrations/document_loaders/subtitle"}, "MastodonTootsLoader": {"Mastodon": "https://python.langchain.com/docs/integrations/document_loaders/mastodon"}, "MergedDataLoader": {"MergeDocLoader": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc_loader"}, "PySparkDataFrameLoader": {"PySpark DataFrame Loader": "https://python.langchain.com/docs/integrations/document_loaders/pyspark_dataframe"}, "CoNLLULoader": {"CoNLL-U": "https://python.langchain.com/docs/integrations/document_loaders/conll-u"}, "FaunaLoader": {"Fauna": "https://python.langchain.com/docs/integrations/document_loaders/fauna"}, "SitemapLoader": {"Sitemap": "https://python.langchain.com/docs/integrations/document_loaders/sitemap"}, "S3FileLoader": {"AWS S3 File": "https://python.langchain.com/docs/integrations/document_loaders/aws_s3_file"}, "SimpleSequentialChain": {"Baseten": "https://python.langchain.com/docs/integrations/llms/baseten", "Predibase": "https://python.langchain.com/docs/integrations/llms/predibase", "Replicate": "https://python.langchain.com/docs/integrations/llms/replicate"}, "StochasticAI": {"StochasticAI": "https://python.langchain.com/docs/integrations/llms/stochasticai"}, "ForefrontAI": {"ForefrontAI": "https://python.langchain.com/docs/integrations/llms/forefrontai_example"}, "CerebriumAI": {"CerebriumAI": "https://python.langchain.com/docs/integrations/llms/cerebriumai_example"}, "OctoAIEndpoint": {"OctoAI Compute Service": "https://python.langchain.com/docs/integrations/llms/octoai"}, "Writer": {"Writer": "https://python.langchain.com/docs/integrations/llms/writer"}, "TextGen": {"TextGen": "https://python.langchain.com/docs/integrations/llms/textgen"}, "MosaicML": {"MosaicML": "https://python.langchain.com/docs/integrations/llms/mosaicml"}, "KoboldApiLLM": {"KoboldAI API": "https://python.langchain.com/docs/integrations/llms/koboldai"}, "VertexAI": {"Google Cloud Platform Vertex AI PaLM ": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm"}, "Bedrock": {"Bedrock": "https://python.langchain.com/docs/integrations/llms/bedrock"}, "GooseAI": {"GooseAI": "https://python.langchain.com/docs/integrations/llms/gooseai_example"}, "Databricks": {"Databricks": "https://python.langchain.com/docs/integrations/llms/databricks"}, "MapReduceChain": {"Manifest": "https://python.langchain.com/docs/integrations/llms/manifest", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "ModelLaboratory": {"Manifest": "https://python.langchain.com/docs/integrations/llms/manifest", "Model Comparison": "https://python.langchain.com/docs/guides/model_laboratory"}, "RELLM": {"RELLM": "https://python.langchain.com/docs/integrations/llms/rellm_experimental"}, "Tongyi": {"Tongyi Qwen": "https://python.langchain.com/docs/integrations/llms/tongyi"}, "InMemoryCache": {"Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "SQLiteCache": {"Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "GPTCache": {"Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "SQLAlchemyCache": {"Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "AzureMLOnlineEndpoint": {"AzureML Online Endpoint": "https://python.langchain.com/docs/integrations/llms/azureml_endpoint_example"}, "DollyContentFormatter": {"AzureML Online Endpoint": "https://python.langchain.com/docs/integrations/llms/azureml_endpoint_example"}, "load_llm": {"AzureML Online Endpoint": "https://python.langchain.com/docs/integrations/llms/azureml_endpoint_example", "Serialization": "https://python.langchain.com/docs/modules/model_io/models/llms/llm_serialization"}, "AzureMLEndpointClient": {"AzureML Online Endpoint": "https://python.langchain.com/docs/integrations/llms/azureml_endpoint_example"}, "OpenLM": {"OpenLM": "https://python.langchain.com/docs/integrations/llms/openlm"}, "HuggingFaceTextGenInference": {"Huggingface TextGen Inference": "https://python.langchain.com/docs/integrations/llms/huggingface_textgen_inference"}, "ChatGLM": {"ChatGLM": "https://python.langchain.com/docs/integrations/llms/chatglm"}, "tool": {"JSONFormer": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Agent Trajectory": "https://python.langchain.com/docs/modules/evaluation/trajectory/trajectory_eval"}, "JsonFormer": {"JSONFormer": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental"}, "Replicate": {"Replicate": "https://python.langchain.com/docs/integrations/llms/replicate"}, "tracing_v2_enabled": {"LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough"}, "wait_for_all_tracers": {"LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough"}, "EvaluatorType": {"LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough"}, "RunEvalConfig": {"LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough"}, "WriteFileTool": {"AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times"}, "ReadFileTool": {"AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times"}, "AutoGPT": {"AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times"}, "FileChatMessageHistory": {"AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt"}, "BaseLLM": {"BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context"}, "VectorStore": {"BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent"}, "Chain": {"BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain"}, "BabyAGI": {"BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/autonomous_agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/autonomous_agents/baby_agi_with_agent"}, "ZeroShotAgent": {"BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "Adding Message Memory backed by a database to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db", "How to add Memory to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory", "Custom MRKL agent": "https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent", "Shared memory across agents and tools": "https://python.langchain.com/docs/modules/agents/how_to/sharedmemory_for_tools"}, "BaseTool": {"!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Custom functions with OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent"}, "MarkdownHeaderTextSplitter": {"Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "MarkdownHeaderTextSplitter": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/markdown_header_metadata"}, "MultiQueryRetriever": {"QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever"}, "StringPromptTemplate": {"Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Wikibase Agent": "https://python.langchain.com/docs/use_cases/agents/wikibase_agent", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval", "Custom agent with tool retrieval": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval", "Custom prompt template": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/custom_prompt_template"}, "AgentAction": {"Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Wikibase Agent": "https://python.langchain.com/docs/use_cases/agents/wikibase_agent", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval", "Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks", "Custom multi-action agent": "https://python.langchain.com/docs/modules/agents/how_to/custom_multi_action_agent", "Custom agent": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent", "Custom agent with tool retrieval": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval", "Custom Trajectory Evaluator": "https://python.langchain.com/docs/modules/evaluation/trajectory/custom"}, "AIPlugin": {"Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval"}, "AgentOutputParser": {"SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context"}, "SteamshipImageGenerationTool": {"Multi-modal outputs: Image & Text": "https://python.langchain.com/docs/use_cases/multi_modal/image_agent"}, "RegexParser": {"Multi-Agent Simulated Environment: Petting Zoo": "https://python.langchain.com/docs/use_cases/agent_simulations/petting_zoo", "Multi-agent decentralized speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_bidding", "Multi-agent authoritarian speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_authoritarian", "Simulated Environment: Gymnasium": "https://python.langchain.com/docs/use_cases/agent_simulations/gymnasium"}, "TimeWeightedVectorStoreRetriever": {"Generative Agents in LangChain": "https://python.langchain.com/docs/use_cases/agent_simulations/characters"}, "PydanticOutputParser": {"MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "Pydantic (JSON) parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/pydantic"}, "TokenTextSplitter": {"Split by tokens ": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/split_by_token"}, "SentenceTransformersTokenTextSplitter": {"Split by tokens ": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/split_by_token"}, "NLTKTextSplitter": {"Split by tokens ": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/split_by_token"}, "StuffDocumentsChain": {"Lost in the middle: The problem with long contexts": "https://python.langchain.com/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "ChatMessageHistory": {"Adding Message Memory backed by a database to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db"}, "BaseMemory": {"How to create a custom Memory class": "https://python.langchain.com/docs/modules/memory/custom_memory"}, "ConversationKGMemory": {"Conversation Knowledge Graph Memory": "https://python.langchain.com/docs/modules/memory/kg"}, "ConversationTokenBufferMemory": {"ConversationTokenBufferMemory": "https://python.langchain.com/docs/modules/memory/token_buffer"}, "ConversationSummaryBufferMemory": {"ConversationSummaryBufferMemory": "https://python.langchain.com/docs/modules/memory/summary_buffer"}, "BaseCallbackHandler": {"Custom callback handlers": "https://python.langchain.com/docs/modules/callbacks/custom_callbacks", "Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks", "Streaming final agent output": "https://python.langchain.com/docs/modules/agents/how_to/streaming_stdout_final_only"}, "tracing_enabled": {"Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks"}, "get_openai_callback": {"Token counting": "https://python.langchain.com/docs/modules/callbacks/token_counting", "Tracking token usage": "https://python.langchain.com/docs/modules/model_io/models/llms/token_usage_tracking"}, "FileCallbackHandler": {"Logging to file": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler"}, "LLMResult": {"Async callbacks": "https://python.langchain.com/docs/modules/callbacks/async_callbacks"}, "AsyncCallbackHandler": {"Async callbacks": "https://python.langchain.com/docs/modules/callbacks/async_callbacks"}, "StructuredTool": {"Multi-Input Tools": "https://python.langchain.com/docs/modules/agents/tools/multi_input_tool", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools"}, "ToolException": {"Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools"}, "MoveFileTool": {"Tools as OpenAI Functions": "https://python.langchain.com/docs/modules/agents/tools/tools_as_openai_functions"}, "RequestsGetTool": {"Tool Input Schema": "https://python.langchain.com/docs/modules/agents/tools/tool_input_validation"}, "HumanApprovalCallbackHandler": {"Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval"}, "DocstoreExplorer": {"ReAct document store": "https://python.langchain.com/docs/modules/agents/agent_types/react_docstore"}, "AgentFinish": {"Running Agent as an Iterator": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter"}, "MessagesPlaceholder": {"Add Memory to OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/add_memory_openai_functions", "Types of `MessagePromptTemplate`": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/msg_prompt_templates"}, "LangChainTracer": {"Async API": "https://python.langchain.com/docs/modules/agents/how_to/async_agent"}, "SystemMessage": {"Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "HumanInputChatModel": {"Human input Chat Model": "https://python.langchain.com/docs/modules/model_io/models/chat/human_input_chat_model"}, "FakeListLLM": {"Fake LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/fake_llm"}, "CallbackManagerForLLMRun": {"Custom LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/custom_llm"}, "LLM": {"Custom LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/custom_llm"}, "HumanInputLLM": {"Human input LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/human_input_llm"}, "RetryWithErrorOutputParser": {"Retry parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/retry"}, "EnumOutputParser": {"Enum parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/enum"}, "DatetimeOutputParser": {"Datetime parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/datetime"}, "FewShotPromptTemplate": {"Select by maximal marginal relevance (MMR)": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/mmr", "Select by n-gram overlap": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/ngram_overlap"}, "BaseExampleSelector": {"Custom example selector": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/custom_example_selector"}, "NGramOverlapExampleSelector": {"Select by n-gram overlap": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/ngram_overlap"}, "load_prompt": {"Serialization": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompt_serialization"}, "ChatMessagePromptTemplate": {"Types of `MessagePromptTemplate`": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/msg_prompt_templates"}, "load_evaluator": {"Agent Trajectory": "https://python.langchain.com/docs/modules/evaluation/trajectory/trajectory_eval", "Pairwise Embedding Distance ": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_embedding_distance", "Pairwise String Comparison": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_string", "Evaluating Custom Criteria": "https://python.langchain.com/docs/modules/evaluation/string/criteria_eval_chain", "QA Correctness": "https://python.langchain.com/docs/modules/evaluation/string/qa", "String Distance": "https://python.langchain.com/docs/modules/evaluation/string/string_distance", "Embedding Distance": "https://python.langchain.com/docs/modules/evaluation/string/embedding_distance"}, "AgentTrajectoryEvaluator": {"Custom Trajectory Evaluator": "https://python.langchain.com/docs/modules/evaluation/trajectory/custom"}, "EmbeddingDistance": {"Pairwise Embedding Distance ": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_embedding_distance", "Embedding Distance": "https://python.langchain.com/docs/modules/evaluation/string/embedding_distance"}, "PairwiseStringEvaluator": {"Custom Pairwise Evaluator": "https://python.langchain.com/docs/modules/evaluation/comparison/custom"}, "CriteriaEvalChain": {"Evaluating Custom Criteria": "https://python.langchain.com/docs/modules/evaluation/string/criteria_eval_chain"}, "SQL_PROMPT": {"QA Correctness": "https://python.langchain.com/docs/modules/evaluation/string/qa"}, "StringEvaluator": {"Custom String Evaluator": "https://python.langchain.com/docs/modules/evaluation/string/custom"}, "StringDistance": {"String Distance": "https://python.langchain.com/docs/modules/evaluation/string/string_distance"}, "MultiPromptChain": {"Router": "https://python.langchain.com/docs/modules/chains/foundational/router"}, "LLMRouterChain": {"Router": "https://python.langchain.com/docs/modules/chains/foundational/router"}, "EmbeddingRouterChain": {"Router": "https://python.langchain.com/docs/modules/chains/foundational/router"}, "BaseLanguageModel": {"Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain"}, "BasePromptTemplate": {"Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain"}, "load_chain": {"Serialization": "https://python.langchain.com/docs/modules/chains/how_to/serialization", "Loading from LangChainHub": "https://python.langchain.com/docs/modules/chains/how_to/from_hub"}, "NeptuneGraph": {"Neptune Open Cypher QA Chain": "https://python.langchain.com/docs/modules/chains/additional/neptune_cypher_qa"}, "NeptuneOpenCypherQAChain": {"Neptune Open Cypher QA Chain": "https://python.langchain.com/docs/modules/chains/additional/neptune_cypher_qa"}, "LLMBashChain": {"Bash chain": "https://python.langchain.com/docs/modules/chains/additional/llm_bash"}, "BashOutputParser": {"Bash chain": "https://python.langchain.com/docs/modules/chains/additional/llm_bash"}, "BashProcess": {"Bash chain": "https://python.langchain.com/docs/modules/chains/additional/llm_bash"}, "NebulaGraphQAChain": {"NebulaGraphQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa"}, "NebulaGraph": {"NebulaGraphQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa"}, "CPALChain": {"Causal program-aided language (CPAL) chain": "https://python.langchain.com/docs/modules/chains/additional/cpal"}, "PALChain": {"Causal program-aided language (CPAL) chain": "https://python.langchain.com/docs/modules/chains/additional/cpal", "Program-aided language model (PAL) chain": "https://python.langchain.com/docs/modules/chains/additional/pal"}, "ElasticsearchDatabaseChain": {"Elasticsearch database": "https://python.langchain.com/docs/modules/chains/additional/elasticsearch_database"}, "get_openapi_chain": {"OpenAPI calls with OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openapi_openai"}, "create_extraction_chain": {"Extraction": "https://python.langchain.com/docs/modules/chains/additional/extraction"}, "LLMSummarizationCheckerChain": {"Summarization checker chain": "https://python.langchain.com/docs/modules/chains/additional/llm_summarization_checker"}, "KuzuGraph": {"KuzuQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_kuzu_qa"}, "KuzuQAChain": {"KuzuQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_kuzu_qa"}, "create_qa_with_sources_chain": {"Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "create_qa_with_structure_chain": {"Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "HugeGraphQAChain": {"HugeGraph QA Chain": "https://python.langchain.com/docs/modules/chains/additional/graph_hugegraph_qa"}, "HugeGraph": {"HugeGraph QA Chain": "https://python.langchain.com/docs/modules/chains/additional/graph_hugegraph_qa"}, "GraphSparqlQAChain": {"GraphSparqlQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_sparql_qa"}, "RdfGraph": {"GraphSparqlQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_sparql_qa"}, "LLMRequestsChain": {"HTTP request chain": "https://python.langchain.com/docs/modules/chains/additional/llm_requests"}, "LLMSymbolicMathChain": {"LLM Symbolic Math ": "https://python.langchain.com/docs/modules/chains/additional/llm_symbolic_math"}, "create_citation_fuzzy_match_chain": {"Question-Answering Citations": "https://python.langchain.com/docs/modules/chains/additional/qa_citations"}, "BaseRetriever": {"FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare"}, "FlareChain": {"FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare"}, "GraphIndexCreator": {"Graph QA": "https://python.langchain.com/docs/modules/chains/additional/graph_qa"}, "GraphQAChain": {"Graph QA": "https://python.langchain.com/docs/modules/chains/additional/graph_qa"}, "NetworkxEntityGraph": {"Graph QA": "https://python.langchain.com/docs/modules/chains/additional/graph_qa"}, "LLMCheckerChain": {"Self-checking chain": "https://python.langchain.com/docs/modules/chains/additional/llm_checker"}, "GraphCypherQAChain": {"Graph DB QA chain": "https://python.langchain.com/docs/modules/chains/additional/graph_cypher_qa"}, "Neo4jGraph": {"Graph DB QA chain": "https://python.langchain.com/docs/modules/chains/additional/graph_cypher_qa"}, "OpenAPISpec": {"OpenAPI chain": "https://python.langchain.com/docs/modules/chains/additional/openapi"}, "OpenAPIEndpointChain": {"OpenAPI chain": "https://python.langchain.com/docs/modules/chains/additional/openapi"}} \ No newline at end of file +{"DeepInfraEmbeddings": {"DeepInfra": "https://python.langchain.com/docs/integrations/text_embedding/deepinfra"}, "HuggingFaceEmbeddings": {"Hugging Face Hub": "https://python.langchain.com/docs/integrations/text_embedding/huggingfacehub", "Sentence Transformers Embeddings": "https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers", "LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever", "Hugging Face": "https://python.langchain.com/docs/integrations/providers/huggingface", "Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy", "Pairwise Embedding Distance ": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_embedding_distance", "Embedding Distance": "https://python.langchain.com/docs/modules/evaluation/string/embedding_distance", "Lost in the middle: The problem with long contexts": "https://python.langchain.com/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder"}, "GPT4AllEmbeddings": {"GPT4All": "https://python.langchain.com/docs/integrations/text_embedding/gpt4all", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research"}, "MosaicMLInstructorEmbeddings": {"MosaicML embeddings": "https://python.langchain.com/docs/integrations/text_embedding/mosaicml"}, "OpenAIEmbeddings": {"OpenAI": "https://python.langchain.com/docs/integrations/providers/openai", "AzureOpenAI": "https://python.langchain.com/docs/integrations/text_embedding/azureopenai", "Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "kNN": "https://python.langchain.com/docs/integrations/retrievers/knn", "DocArray Retriever": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever", "SVM": "https://python.langchain.com/docs/integrations/retrievers/svm", "Pinecone Hybrid Search": "https://python.langchain.com/docs/integrations/retrievers/pinecone_hybrid_search", "LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever", "Azure OpenAI": "https://python.langchain.com/docs/integrations/providers/azure_openai", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Vectorstore Agent": "https://python.langchain.com/docs/integrations/toolkits/vectorstore", "LanceDB": "https://python.langchain.com/docs/integrations/vectorstores/lancedb", "Weaviate": "https://python.langchain.com/docs/integrations/vectorstores/weaviate", "Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake", "Redis": "https://python.langchain.com/docs/integrations/vectorstores/redis", "PGVector": "https://python.langchain.com/docs/integrations/vectorstores/pgvector", "Rockset": "https://python.langchain.com/docs/integrations/vectorstores/rockset", "Zilliz": "https://python.langchain.com/docs/integrations/vectorstores/zilliz", "SingleStoreDB": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb", "Typesense": "https://python.langchain.com/docs/integrations/vectorstores/typesense", "Atlas": "https://python.langchain.com/docs/integrations/vectorstores/atlas", "Chroma": "https://python.langchain.com/docs/integrations/vectorstores/chroma", "Alibaba Cloud OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch", "StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks", "scikit-learn": "https://python.langchain.com/docs/integrations/vectorstores/sklearn", "DocArrayHnswSearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_hnsw", "MyScale": "https://python.langchain.com/docs/integrations/vectorstores/myscale", "ClickHouse Vector Search": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse", "Qdrant": "https://python.langchain.com/docs/integrations/vectorstores/qdrant", "Tigris": "https://python.langchain.com/docs/integrations/vectorstores/tigris", "Supabase (Postgres)": "https://python.langchain.com/docs/integrations/vectorstores/supabase", "OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/opensearch", "Pinecone": "https://python.langchain.com/docs/integrations/vectorstores/pinecone", "Azure Cognitive Search": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch", "Cassandra": "https://python.langchain.com/docs/integrations/vectorstores/cassandra", "Milvus": "https://python.langchain.com/docs/integrations/vectorstores/milvus", "ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch", "DocArrayInMemorySearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory", "pg_embedding": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding", "FAISS": "https://python.langchain.com/docs/integrations/vectorstores/faiss", "AnalyticDB": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb", "Hologres": "https://python.langchain.com/docs/integrations/vectorstores/hologres", "MongoDB Atlas": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching", "Data Augmented Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/data_augmented_question_answering", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval", "Generative Agents in LangChain": "https://python.langchain.com/docs/use_cases/agent_simulations/characters", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Custom agent with tool retrieval": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval", "Select by maximal marginal relevance (MMR)": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/mmr", "Loading from LangChainHub": "https://python.langchain.com/docs/modules/chains/how_to/from_hub", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde"}, "VertexAIEmbeddings": {"Google Cloud Platform Vertex AI PaLM ": "https://python.langchain.com/docs/integrations/text_embedding/google_vertex_ai_palm"}, "BedrockEmbeddings": {"Bedrock Embeddings": "https://python.langchain.com/docs/integrations/text_embedding/bedrock", "Bedrock": "https://python.langchain.com/docs/integrations/providers/bedrock"}, "LlamaCppEmbeddings": {"Llama-cpp": "https://python.langchain.com/docs/integrations/text_embedding/llamacpp", "Llama.cpp": "https://python.langchain.com/docs/integrations/providers/llamacpp"}, "NLPCloudEmbeddings": {"NLP Cloud": "https://python.langchain.com/docs/integrations/text_embedding/nlp_cloud"}, "SpacyEmbeddings": {"Spacy Embedding": "https://python.langchain.com/docs/integrations/text_embedding/spacy_embedding"}, "HuggingFaceInstructEmbeddings": {"InstructEmbeddings": "https://python.langchain.com/docs/integrations/text_embedding/instruct_embeddings"}, "CohereEmbeddings": {"Cohere": "https://python.langchain.com/docs/integrations/providers/cohere", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router"}, "ClarifaiEmbeddings": {"Clarifai": "https://python.langchain.com/docs/integrations/providers/clarifai"}, "MiniMaxEmbeddings": {"MiniMax": "https://python.langchain.com/docs/integrations/text_embedding/minimax"}, "FakeEmbeddings": {"Fake Embeddings": "https://python.langchain.com/docs/integrations/text_embedding/fake", "DocArray Retriever": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever", "Vectara": "https://python.langchain.com/docs/integrations/vectorstores/vectara", "Tair": "https://python.langchain.com/docs/integrations/vectorstores/tair"}, "ElasticsearchEmbeddings": {"Elasticsearch": "https://python.langchain.com/docs/integrations/text_embedding/elasticsearch", "ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch"}, "EmbaasEmbeddings": {"Embaas": "https://python.langchain.com/docs/integrations/text_embedding/embaas"}, "JinaEmbeddings": {"Jina": "https://python.langchain.com/docs/integrations/providers/jina"}, "AlephAlphaAsymmetricSemanticEmbedding": {"Aleph Alpha": "https://python.langchain.com/docs/integrations/text_embedding/aleph_alpha"}, "AlephAlphaSymmetricSemanticEmbedding": {"Aleph Alpha": "https://python.langchain.com/docs/integrations/providers/aleph_alpha"}, "DashScopeEmbeddings": {"DashScope": "https://python.langchain.com/docs/integrations/text_embedding/dashscope"}, "TensorflowHubEmbeddings": {"TensorflowHub": "https://python.langchain.com/docs/integrations/text_embedding/tensorflowhub"}, "ModelScopeEmbeddings": {"ModelScope": "https://python.langchain.com/docs/integrations/providers/modelscope"}, "SagemakerEndpointEmbeddings": {"SageMaker Endpoint Embeddings": "https://python.langchain.com/docs/integrations/text_embedding/sagemaker-endpoint", "SageMaker Endpoint": "https://python.langchain.com/docs/integrations/providers/sagemaker_endpoint"}, "EmbeddingsContentHandler": {"SageMaker Endpoint Embeddings": "https://python.langchain.com/docs/integrations/text_embedding/sagemaker-endpoint"}, "LocalAIEmbeddings": {"LocalAI": "https://python.langchain.com/docs/integrations/text_embedding/localai"}, "ElasticSearchBM25Retriever": {"ElasticSearch BM25": "https://python.langchain.com/docs/integrations/retrievers/elastic_search_bm25", "Elasticsearch": "https://python.langchain.com/docs/integrations/providers/elasticsearch"}, "ZepChatMessageHistory": {"Zep": "https://python.langchain.com/docs/integrations/retrievers/zep_memorystore"}, "HumanMessage": {"Zep": "https://python.langchain.com/docs/integrations/retrievers/zep_memorystore", "Zep Memory": "https://python.langchain.com/docs/integrations/memory/zep_memory", "Google Cloud Platform Vertex AI PaLM ": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm", "Azure": "https://python.langchain.com/docs/integrations/chat/azure_chat_openai", "PromptLayer ChatOpenAI": "https://python.langchain.com/docs/integrations/chat/promptlayer_chatopenai", "MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte", "Arthur": "https://python.langchain.com/docs/integrations/providers/arthur_tracking", "Custom callback handlers": "https://python.langchain.com/docs/modules/callbacks/custom_callbacks", "Tools as OpenAI Functions": "https://python.langchain.com/docs/modules/agents/tools/tools_as_openai_functions", "Prompt Pipelining": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining", "Using OpenAI functions": "https://python.langchain.com/docs/modules/chains/popular/openai_functions"}, "ZepRetriever": {"Zep": "https://python.langchain.com/docs/integrations/providers/zep", "Zep Memory": "https://python.langchain.com/docs/integrations/memory/zep_memory"}, "VespaRetriever": {"Vespa": "https://python.langchain.com/docs/integrations/providers/vespa"}, "AmazonKendraRetriever": {"Amazon Kendra": "https://python.langchain.com/docs/integrations/retrievers/amazon_kendra_retriever"}, "RecursiveCharacterTextSplitter": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "MarkdownHeaderTextSplitter": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/markdown_header_metadata"}, "TextLoader": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Vectorstore Agent": "https://python.langchain.com/docs/integrations/toolkits/vectorstore", "LanceDB": "https://python.langchain.com/docs/integrations/vectorstores/lancedb", "Weaviate": "https://python.langchain.com/docs/integrations/vectorstores/weaviate", "Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake", "Vectara": "https://python.langchain.com/docs/integrations/vectorstores/vectara", "Redis": "https://python.langchain.com/docs/integrations/vectorstores/redis", "PGVector": "https://python.langchain.com/docs/integrations/vectorstores/pgvector", "Rockset": "https://python.langchain.com/docs/integrations/vectorstores/rockset", "Zilliz": "https://python.langchain.com/docs/integrations/vectorstores/zilliz", "SingleStoreDB": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb", "Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy", "Typesense": "https://python.langchain.com/docs/integrations/vectorstores/typesense", "Atlas": "https://python.langchain.com/docs/integrations/vectorstores/atlas", "Tair": "https://python.langchain.com/docs/integrations/vectorstores/tair", "Chroma": "https://python.langchain.com/docs/integrations/vectorstores/chroma", "Alibaba Cloud OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch", "StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks", "Clarifai": "https://python.langchain.com/docs/integrations/vectorstores/clarifai", "scikit-learn": "https://python.langchain.com/docs/integrations/vectorstores/sklearn", "DocArrayHnswSearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_hnsw", "MyScale": "https://python.langchain.com/docs/integrations/vectorstores/myscale", "ClickHouse Vector Search": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse", "Qdrant": "https://python.langchain.com/docs/integrations/vectorstores/qdrant", "Tigris": "https://python.langchain.com/docs/integrations/vectorstores/tigris", "AwaDB": "https://python.langchain.com/docs/integrations/vectorstores/awadb", "Supabase (Postgres)": "https://python.langchain.com/docs/integrations/vectorstores/supabase", "OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/opensearch", "Pinecone": "https://python.langchain.com/docs/integrations/vectorstores/pinecone", "Azure Cognitive Search": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch", "Cassandra": "https://python.langchain.com/docs/integrations/vectorstores/cassandra", "Milvus": "https://python.langchain.com/docs/integrations/vectorstores/milvus", "ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch", "Marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo", "DocArrayInMemorySearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory", "pg_embedding": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding", "FAISS": "https://python.langchain.com/docs/integrations/vectorstores/faiss", "AnalyticDB": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb", "Hologres": "https://python.langchain.com/docs/integrations/vectorstores/hologres", "MongoDB Atlas": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas", "Question Answering Benchmarking: State of the Union Address": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_sota", "QA Generation": "https://python.langchain.com/docs/guides/evaluation/examples/qa_generation", "Question Answering Benchmarking: Paul Graham Essay": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_pg", "Data Augmented Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/data_augmented_question_answering", "Agent VectorDB Question Answering Benchmarking": "https://python.langchain.com/docs/guides/evaluation/examples/agent_vectordb_sota_pg", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Loading from LangChainHub": "https://python.langchain.com/docs/modules/chains/how_to/from_hub", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Graph QA": "https://python.langchain.com/docs/modules/chains/additional/graph_qa"}, "FAISS": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "FAISS": "https://python.langchain.com/docs/integrations/vectorstores/faiss", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval", "Generative Agents in LangChain": "https://python.langchain.com/docs/use_cases/agent_simulations/characters", "Ensemble Retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble", "Custom agent with tool retrieval": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval", "Select by maximal marginal relevance (MMR)": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/mmr"}, "OpenAI": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Google Serper API": "https://python.langchain.com/docs/integrations/tools/google_serper", "Human as a tool": "https://python.langchain.com/docs/integrations/tools/human_tools", "OpenWeatherMap API": "https://python.langchain.com/docs/integrations/tools/openweathermap", "Search Tools": "https://python.langchain.com/docs/integrations/tools/search_tools", "Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier", "Gradio Tools": "https://python.langchain.com/docs/integrations/tools/gradio_tools", "SceneXplain": "https://python.langchain.com/docs/integrations/tools/sceneXplain", "Entity Memory with SQLite storage": "https://python.langchain.com/docs/integrations/memory/entity_memory_with_sqlite", "Argilla": "https://python.langchain.com/docs/integrations/callbacks/argilla", "PromptLayer": "https://python.langchain.com/docs/integrations/callbacks/promptlayer", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/streamlit", "WandB Tracing": "https://python.langchain.com/docs/integrations/providers/agent_with_wandb_tracing", "Comet": "https://python.langchain.com/docs/integrations/providers/comet_tracking", "Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking", "Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking", "OpenAI": "https://python.langchain.com/docs/integrations/llms/openai", "Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking", "Google Serper": "https://python.langchain.com/docs/integrations/providers/google_serper", "Helicone": "https://python.langchain.com/docs/integrations/providers/helicone", "Shale Protocol": "https://python.langchain.com/docs/integrations/providers/shaleprotocol", "WhyLabs": "https://python.langchain.com/docs/integrations/providers/whylabs_profiling", "ClearML": "https://python.langchain.com/docs/integrations/providers/clearml_tracking", "Ray Serve": "https://python.langchain.com/docs/integrations/providers/ray_serve", "Log, Trace, and Monitor Langchain LLM Calls": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation", "CSV Agent": "https://python.langchain.com/docs/integrations/toolkits/csv", "Xorbits Agent": "https://python.langchain.com/docs/integrations/toolkits/xorbits", "Jira": "https://python.langchain.com/docs/integrations/toolkits/jira", "Spark Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/spark", "Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json", "GitHub": "https://python.langchain.com/docs/integrations/toolkits/github", "Pandas Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/pandas", "OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi", "Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching", "Question Answering Benchmarking: State of the Union Address": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_sota", "Question Answering Benchmarking: Paul Graham Essay": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_pg", "Evaluating an OpenAPI Chain": "https://python.langchain.com/docs/guides/evaluation/examples/openapi_eval", "Data Augmented Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/data_augmented_question_answering", "Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/question_answering", "Agent VectorDB Question Answering Benchmarking": "https://python.langchain.com/docs/guides/evaluation/examples/agent_vectordb_sota_pg", "HuggingGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/hugginggpt", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query", "Lost in the middle: The problem with long contexts": "https://python.langchain.com/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Conversation Knowledge Graph Memory": "https://python.langchain.com/docs/modules/memory/kg", "ConversationTokenBufferMemory": "https://python.langchain.com/docs/modules/memory/token_buffer", "How to add Memory to an LLMChain": "https://python.langchain.com/docs/modules/memory/adding_memory", "How to use multiple memory classes in the same chain": "https://python.langchain.com/docs/modules/memory/multiple_memory", "How to customize conversational memory": "https://python.langchain.com/docs/modules/memory/conversational_customization", "ConversationSummaryBufferMemory": "https://python.langchain.com/docs/modules/memory/summary_buffer", "Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks", "Token counting": "https://python.langchain.com/docs/modules/callbacks/token_counting", "Logging to file": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler", "Multi-Input Tools": "https://python.langchain.com/docs/modules/agents/tools/multi_input_tool", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Tool Input Schema": "https://python.langchain.com/docs/modules/agents/tools/tool_input_validation", "Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Access intermediate steps": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps", "Timeouts for agents": "https://python.langchain.com/docs/modules/agents/how_to/max_time_limit", "Streaming final agent output": "https://python.langchain.com/docs/modules/agents/how_to/streaming_stdout_final_only", "Cap the max number of iterations": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations", "Async API": "https://python.langchain.com/docs/modules/chains/how_to/async_chain", "Tracking token usage": "https://python.langchain.com/docs/modules/model_io/models/llms/token_usage_tracking", "Serialization": "https://python.langchain.com/docs/modules/model_io/models/llms/llm_serialization", "Retry parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/retry", "Datetime parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/datetime", "Pydantic (JSON) parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/pydantic", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router", "Transformation": "https://python.langchain.com/docs/modules/chains/foundational/transformation", "Bash chain": "https://python.langchain.com/docs/modules/chains/additional/llm_bash", "Summarization checker chain": "https://python.langchain.com/docs/modules/chains/additional/llm_summarization_checker", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "HTTP request chain": "https://python.langchain.com/docs/modules/chains/additional/llm_requests", "LLM Symbolic Math ": "https://python.langchain.com/docs/modules/chains/additional/llm_symbolic_math", "FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde", "Graph QA": "https://python.langchain.com/docs/modules/chains/additional/graph_qa", "Self-checking chain": "https://python.langchain.com/docs/modules/chains/additional/llm_checker", "OpenAPI chain": "https://python.langchain.com/docs/modules/chains/additional/openapi"}, "ContextualCompressionRetriever": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever"}, "CohereRerank": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere"}, "RetrievalQA": {"Cohere Reranker": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake", "StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Question Answering Benchmarking: State of the Union Address": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_sota", "Question Answering Benchmarking: Paul Graham Essay": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_pg", "Data Augmented Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/data_augmented_question_answering", "Agent VectorDB Question Answering Benchmarking": "https://python.langchain.com/docs/guides/evaluation/examples/agent_vectordb_sota_pg", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "KNNRetriever": {"kNN": "https://python.langchain.com/docs/integrations/retrievers/knn"}, "WikipediaRetriever": {"Wikipedia": "https://python.langchain.com/docs/integrations/providers/wikipedia"}, "ChatOpenAI": {"Wikipedia": "https://python.langchain.com/docs/integrations/retrievers/wikipedia", "Arxiv": "https://python.langchain.com/docs/integrations/retrievers/arxiv", "ChatGPT Plugins": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins", "Human as a tool": "https://python.langchain.com/docs/integrations/tools/human_tools", "ArXiv API Tool": "https://python.langchain.com/docs/integrations/tools/arxiv", "Metaphor Search": "https://python.langchain.com/docs/integrations/tools/metaphor_search", "Shell Tool": "https://python.langchain.com/docs/integrations/tools/bash", "Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "OpenAI": "https://python.langchain.com/docs/integrations/chat/openai", "Context": "https://python.langchain.com/docs/integrations/callbacks/context", "PromptLayer": "https://python.langchain.com/docs/integrations/callbacks/promptlayer", "CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte", "Arthur": "https://python.langchain.com/docs/integrations/providers/arthur_tracking", "CSV Agent": "https://python.langchain.com/docs/integrations/toolkits/csv", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python", "PowerBI Dataset Agent": "https://python.langchain.com/docs/integrations/toolkits/powerbi", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "Spark SQL Agent": "https://python.langchain.com/docs/integrations/toolkits/spark_sql", "Pandas Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/pandas", "Multion Toolkit": "https://python.langchain.com/docs/integrations/toolkits/multion", "OpenAI Functions Metadata Tagger": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma", "Debugging": "https://python.langchain.com/docs/guides/debugging", "LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough", "QA Generation": "https://python.langchain.com/docs/guides/evaluation/examples/qa_generation", "Comparing Chain Outputs": "https://python.langchain.com/docs/guides/evaluation/examples/comparisons", "Agent Trajectory": "https://python.langchain.com/docs/modules/evaluation/trajectory/trajectory_eval", "Custom Trajectory Evaluator": "https://python.langchain.com/docs/modules/evaluation/trajectory/custom", "QA Correctness": "https://python.langchain.com/docs/modules/evaluation/string/qa", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "Wikibase Agent": "https://python.langchain.com/docs/use_cases/agents/wikibase_agent", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "CAMEL Role-Playing Autonomous Cooperative Agents": "https://python.langchain.com/docs/use_cases/agent_simulations/camel_role_playing", "Multi-Agent Simulated Environment: Petting Zoo": "https://python.langchain.com/docs/use_cases/agent_simulations/petting_zoo", "Multi-agent decentralized speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_bidding", "Multi-agent authoritarian speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_authoritarian", "Generative Agents in LangChain": "https://python.langchain.com/docs/use_cases/agent_simulations/characters", "Two-Player Dungeons & Dragons": "https://python.langchain.com/docs/use_cases/agent_simulations/two_player_dnd", "Multi-Player Dungeons & Dragons": "https://python.langchain.com/docs/use_cases/agent_simulations/multi_player_dnd", "Simulated Environment: Gymnasium": "https://python.langchain.com/docs/use_cases/agent_simulations/gymnasium", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research", "How to add Memory to an LLMChain": "https://python.langchain.com/docs/modules/memory/adding_memory", "Custom callback handlers": "https://python.langchain.com/docs/modules/callbacks/custom_callbacks", "Async callbacks": "https://python.langchain.com/docs/modules/callbacks/async_callbacks", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Tools as OpenAI Functions": "https://python.langchain.com/docs/modules/agents/tools/tools_as_openai_functions", "OpenAI Multi Functions Agent": "https://python.langchain.com/docs/modules/agents/agent_types/openai_multi_functions_agent", "Handle parsing errors": "https://python.langchain.com/docs/modules/agents/how_to/handle_parsing_errors", "Running Agent as an Iterator": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter", "Add Memory to OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/add_memory_openai_functions", "Custom functions with OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent", "Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions", "Retry parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/retry", "Pydantic (JSON) parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/pydantic", "Few shot examples for chat models": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/few_shot_examples_chat", "Prompt Pipelining": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining", "Connecting to a Feature Store": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store", "Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain", "Using OpenAI functions": "https://python.langchain.com/docs/modules/chains/popular/openai_functions", "Neptune Open Cypher QA Chain": "https://python.langchain.com/docs/modules/chains/additional/neptune_cypher_qa", "NebulaGraphQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa", "Elasticsearch database": "https://python.langchain.com/docs/modules/chains/additional/elasticsearch_database", "Extraction": "https://python.langchain.com/docs/modules/chains/additional/extraction", "KuzuQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_kuzu_qa", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "HugeGraph QA Chain": "https://python.langchain.com/docs/modules/chains/additional/graph_hugegraph_qa", "Tagging": "https://python.langchain.com/docs/modules/chains/additional/tagging", "GraphSparqlQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_sparql_qa", "Question-Answering Citations": "https://python.langchain.com/docs/modules/chains/additional/qa_citations", "FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare", "ArangoDB QA chain": "https://python.langchain.com/docs/modules/chains/additional/graph_arangodb_qa", "Graph DB QA chain": "https://python.langchain.com/docs/modules/chains/additional/graph_cypher_qa"}, "ConversationalRetrievalChain": {"Wikipedia": "https://python.langchain.com/docs/integrations/retrievers/wikipedia", "Arxiv": "https://python.langchain.com/docs/integrations/retrievers/arxiv", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "MetalRetriever": {"Metal": "https://python.langchain.com/docs/integrations/providers/metal"}, "CSVLoader": {"ChatGPT Plugin": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin", "CSV": "https://python.langchain.com/docs/integrations/document_loaders/csv"}, "Document": {"ChatGPT Plugin": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin", "Weaviate Hybrid Search": "https://python.langchain.com/docs/integrations/retrievers/weaviate-hybrid", "BM25": "https://python.langchain.com/docs/integrations/retrievers/bm25", "TF-IDF": "https://python.langchain.com/docs/integrations/retrievers/tf_idf", "Apify": "https://python.langchain.com/docs/integrations/tools/apify", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation", "PGVector": "https://python.langchain.com/docs/integrations/vectorstores/pgvector", "Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy", "pg_embedding": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding", "FAISS": "https://python.langchain.com/docs/integrations/vectorstores/faiss", "OpenAI Functions Metadata Tagger": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger", "Doctran Extract Properties": "https://python.langchain.com/docs/integrations/document_transformers/doctran_extract_properties", "Doctran Interrogate Documents": "https://python.langchain.com/docs/integrations/document_transformers/doctran_interrogate_document", "Doctran Translate Documents": "https://python.langchain.com/docs/integrations/document_transformers/doctran_translate_document", "Copy Paste": "https://python.langchain.com/docs/integrations/document_loaders/copypaste", "Apify Dataset": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "SageMakerEndpoint": "https://python.langchain.com/docs/integrations/llms/sagemaker", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Custom agent with tool retrieval": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare"}, "ChatGPTPluginRetriever": {"ChatGPT Plugin": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin", "OpenAI": "https://python.langchain.com/docs/integrations/providers/openai"}, "GoogleCloudEnterpriseSearchRetriever": {"Google Cloud Enterprise Search": "https://python.langchain.com/docs/integrations/retrievers/google_cloud_enterprise_search"}, "DocArrayRetriever": {"DocArray Retriever": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever"}, "SVMRetriever": {"SVM": "https://python.langchain.com/docs/integrations/retrievers/svm", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index"}, "PineconeHybridSearchRetriever": {"Pinecone Hybrid Search": "https://python.langchain.com/docs/integrations/retrievers/pinecone_hybrid_search"}, "PubMedRetriever": {"PubMed": "https://python.langchain.com/docs/integrations/retrievers/pubmed"}, "WeaviateHybridSearchRetriever": {"Weaviate Hybrid Search": "https://python.langchain.com/docs/integrations/retrievers/weaviate-hybrid"}, "ArxivRetriever": {"Arxiv": "https://python.langchain.com/docs/integrations/providers/arxiv"}, "BM25Retriever": {"BM25": "https://python.langchain.com/docs/integrations/retrievers/bm25", "Ensemble Retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble"}, "AzureCognitiveSearchRetriever": {"Azure Cognitive Search": "https://python.langchain.com/docs/integrations/providers/azure_cognitive_search_"}, "ChaindeskRetriever": {"Chaindesk": "https://python.langchain.com/docs/integrations/providers/chaindesk"}, "MergerRetriever": {"LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever"}, "Chroma": {"LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever", "Chroma": "https://python.langchain.com/docs/integrations/vectorstores/chroma", "Vectorstore Agent": "https://python.langchain.com/docs/integrations/toolkits/vectorstore", "StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks", "Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Data Augmented Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/data_augmented_question_answering", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "Lost in the middle: The problem with long contexts": "https://python.langchain.com/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router", "Loading from LangChainHub": "https://python.langchain.com/docs/modules/chains/how_to/from_hub", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde"}, "DocumentCompressorPipeline": {"LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever"}, "LongContextReorder": {"LOTR (Merger Retriever)": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever"}, "TFIDFRetriever": {"TF-IDF": "https://python.langchain.com/docs/integrations/retrievers/tf_idf"}, "load_tools": {"ChatGPT Plugins": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins", "Human as a tool": "https://python.langchain.com/docs/integrations/tools/human_tools", "AWS Lambda API": "https://python.langchain.com/docs/integrations/tools/awslambda", "Requests": "https://python.langchain.com/docs/integrations/tools/requests", "OpenWeatherMap API": "https://python.langchain.com/docs/integrations/tools/openweathermap", "Search Tools": "https://python.langchain.com/docs/integrations/tools/search_tools", "ArXiv API Tool": "https://python.langchain.com/docs/integrations/tools/arxiv", "GraphQL tool": "https://python.langchain.com/docs/integrations/tools/graphql", "SceneXplain": "https://python.langchain.com/docs/integrations/tools/sceneXplain", "SerpAPI": "https://python.langchain.com/docs/integrations/providers/serpapi", "Golden": "https://python.langchain.com/docs/integrations/providers/golden", "Wolfram Alpha": "https://python.langchain.com/docs/integrations/providers/wolfram_alpha", "DataForSEO": "https://python.langchain.com/docs/integrations/providers/dataforseo", "SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx", "Google Serper": "https://python.langchain.com/docs/integrations/providers/google_serper", "OpenWeatherMap": "https://python.langchain.com/docs/integrations/providers/openweathermap", "Google Search": "https://python.langchain.com/docs/integrations/providers/google_search", "Amazon API Gateway": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway_example", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval", "Access intermediate steps": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps", "Timeouts for agents": "https://python.langchain.com/docs/modules/agents/how_to/max_time_limit", "Streaming final agent output": "https://python.langchain.com/docs/modules/agents/how_to/streaming_stdout_final_only", "Cap the max number of iterations": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations", "Human input Chat Model": "https://python.langchain.com/docs/modules/model_io/models/chat/human_input_chat_model", "Fake LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/fake_llm", "Tracking token usage": "https://python.langchain.com/docs/modules/model_io/models/llms/token_usage_tracking", "Human input LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/human_input_llm"}, "AgentType": {"ChatGPT Plugins": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins", "Google Serper API": "https://python.langchain.com/docs/integrations/tools/google_serper", "Human as a tool": "https://python.langchain.com/docs/integrations/tools/human_tools", "Search Tools": "https://python.langchain.com/docs/integrations/tools/search_tools", "Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier", "Shell Tool": "https://python.langchain.com/docs/integrations/tools/bash", "Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "Argilla": "https://python.langchain.com/docs/integrations/callbacks/argilla", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/streamlit", "WandB Tracing": "https://python.langchain.com/docs/integrations/providers/agent_with_wandb_tracing", "Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking", "Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking", "MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking", "Google Serper": "https://python.langchain.com/docs/integrations/providers/google_serper", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte", "ClearML": "https://python.langchain.com/docs/integrations/providers/clearml_tracking", "Log, Trace, and Monitor Langchain LLM Calls": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index", "CSV Agent": "https://python.langchain.com/docs/integrations/toolkits/csv", "Jira": "https://python.langchain.com/docs/integrations/toolkits/jira", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "GitHub": "https://python.langchain.com/docs/integrations/toolkits/github", "Pandas Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/pandas", "Multion Toolkit": "https://python.langchain.com/docs/integrations/toolkits/multion", "Amazon API Gateway": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway_example", "Debugging": "https://python.langchain.com/docs/guides/debugging", "LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough", "Comparing Chain Outputs": "https://python.langchain.com/docs/guides/evaluation/examples/comparisons", "Agent VectorDB Question Answering Benchmarking": "https://python.langchain.com/docs/guides/evaluation/examples/agent_vectordb_sota_pg", "Agent Trajectory": "https://python.langchain.com/docs/modules/evaluation/trajectory/trajectory_eval", "Multi-modal outputs: Image & Text": "https://python.langchain.com/docs/use_cases/multi_modal/image_agent", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks", "Multi-Input Tools": "https://python.langchain.com/docs/modules/agents/tools/multi_input_tool", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Tool Input Schema": "https://python.langchain.com/docs/modules/agents/tools/tool_input_validation", "Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval", "Self ask with search": "https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search", "ReAct document store": "https://python.langchain.com/docs/modules/agents/agent_types/react_docstore", "OpenAI Multi Functions Agent": "https://python.langchain.com/docs/modules/agents/agent_types/openai_multi_functions_agent", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Access intermediate steps": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps", "Handle parsing errors": "https://python.langchain.com/docs/modules/agents/how_to/handle_parsing_errors", "Timeouts for agents": "https://python.langchain.com/docs/modules/agents/how_to/max_time_limit", "Streaming final agent output": "https://python.langchain.com/docs/modules/agents/how_to/streaming_stdout_final_only", "Add Memory to OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/add_memory_openai_functions", "Cap the max number of iterations": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations", "Custom functions with OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent", "Async API": "https://python.langchain.com/docs/modules/agents/how_to/async_agent", "Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions", "Human input Chat Model": "https://python.langchain.com/docs/modules/model_io/models/chat/human_input_chat_model", "Fake LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/fake_llm", "Tracking token usage": "https://python.langchain.com/docs/modules/model_io/models/llms/token_usage_tracking", "Human input LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/human_input_llm"}, "AIPluginTool": {"ChatGPT Plugins": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins"}, "Tool": {"DataForSeo API Wrapper": "https://python.langchain.com/docs/integrations/tools/dataforseo", "SerpAPI": "https://python.langchain.com/docs/integrations/tools/serpapi", "Google Search": "https://python.langchain.com/docs/integrations/tools/google_search", "Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Custom multi-action agent": "https://python.langchain.com/docs/modules/agents/how_to/custom_multi_action_agent", "Running Agent as an Iterator": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter", "Custom agent": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent"}, "SearxSearchWrapper": {"SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx"}, "GoogleSerperAPIWrapper": {"Google Serper API": "https://python.langchain.com/docs/integrations/tools/google_serper", "Google Serper": "https://python.langchain.com/docs/integrations/providers/google_serper", "FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare"}, "initialize_agent": {"Google Serper API": "https://python.langchain.com/docs/integrations/tools/google_serper", "Search Tools": "https://python.langchain.com/docs/integrations/tools/search_tools", "Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier", "Metaphor Search": "https://python.langchain.com/docs/integrations/tools/metaphor_search", "Gradio Tools": "https://python.langchain.com/docs/integrations/tools/gradio_tools", "SceneXplain": "https://python.langchain.com/docs/integrations/tools/sceneXplain", "Shell Tool": "https://python.langchain.com/docs/integrations/tools/bash", "Zep Memory": "https://python.langchain.com/docs/integrations/memory/zep_memory", "Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "WandB Tracing": "https://python.langchain.com/docs/integrations/providers/agent_with_wandb_tracing", "Comet": "https://python.langchain.com/docs/integrations/providers/comet_tracking", "Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking", "Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking", "MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking", "Google Serper": "https://python.langchain.com/docs/integrations/providers/google_serper", "ClearML": "https://python.langchain.com/docs/integrations/providers/clearml_tracking", "Jira": "https://python.langchain.com/docs/integrations/toolkits/jira", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Azure Cognitive Services Toolkit": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services", "Gmail Toolkit": "https://python.langchain.com/docs/integrations/toolkits/gmail", "GitHub": "https://python.langchain.com/docs/integrations/toolkits/github", "PlayWright Browser Toolkit": "https://python.langchain.com/docs/integrations/toolkits/playwright", "Office365 Toolkit": "https://python.langchain.com/docs/integrations/toolkits/office365", "Amadeus Toolkit": "https://python.langchain.com/docs/integrations/toolkits/amadeus", "Amazon API Gateway": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway_example", "Comparing Chain Outputs": "https://python.langchain.com/docs/guides/evaluation/examples/comparisons", "Agent VectorDB Question Answering Benchmarking": "https://python.langchain.com/docs/guides/evaluation/examples/agent_vectordb_sota_pg", "Multi-modal outputs: Image & Text": "https://python.langchain.com/docs/use_cases/multi_modal/image_agent", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Multi-Input Tools": "https://python.langchain.com/docs/modules/agents/tools/multi_input_tool", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval", "Self ask with search": "https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search", "ReAct document store": "https://python.langchain.com/docs/modules/agents/agent_types/react_docstore", "OpenAI Multi Functions Agent": "https://python.langchain.com/docs/modules/agents/agent_types/openai_multi_functions_agent", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Access intermediate steps": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps", "Handle parsing errors": "https://python.langchain.com/docs/modules/agents/how_to/handle_parsing_errors", "Timeouts for agents": "https://python.langchain.com/docs/modules/agents/how_to/max_time_limit", "Streaming final agent output": "https://python.langchain.com/docs/modules/agents/how_to/streaming_stdout_final_only", "Add Memory to OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/add_memory_openai_functions", "Cap the max number of iterations": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations", "Custom functions with OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent", "Async API": "https://python.langchain.com/docs/modules/agents/how_to/async_agent", "Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions", "Human input Chat Model": "https://python.langchain.com/docs/modules/model_io/models/chat/human_input_chat_model", "Fake LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/fake_llm", "Tracking token usage": "https://python.langchain.com/docs/modules/model_io/models/llms/token_usage_tracking", "Human input LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/human_input_llm"}, "GooglePlacesTool": {"Google Places": "https://python.langchain.com/docs/integrations/tools/google_places"}, "HumanInputRun": {"Human as a tool": "https://python.langchain.com/docs/integrations/tools/human_tools", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times"}, "TwilioAPIWrapper": {"Twilio": "https://python.langchain.com/docs/integrations/tools/twilio"}, "IFTTTWebhook": {"IFTTT WebHooks": "https://python.langchain.com/docs/integrations/tools/ifttt"}, "WikipediaQueryRun": {"Wikipedia": "https://python.langchain.com/docs/integrations/tools/wikipedia"}, "WikipediaAPIWrapper": {"Wikipedia": "https://python.langchain.com/docs/integrations/tools/wikipedia", "Zep Memory": "https://python.langchain.com/docs/integrations/memory/zep_memory"}, "TextRequestsWrapper": {"Requests": "https://python.langchain.com/docs/integrations/tools/requests", "JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json", "OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "OpenWeatherMapAPIWrapper": {"OpenWeatherMap API": "https://python.langchain.com/docs/integrations/tools/openweathermap", "OpenWeatherMap": "https://python.langchain.com/docs/integrations/providers/openweathermap"}, "PubmedQueryRun": {"PubMed Tool": "https://python.langchain.com/docs/integrations/tools/pubmed"}, "YouTubeSearchTool": {"YouTubeSearchTool": "https://python.langchain.com/docs/integrations/tools/youtube"}, "VectorstoreIndexCreator": {"Apify": "https://python.langchain.com/docs/integrations/tools/apify", "HuggingFace dataset": "https://python.langchain.com/docs/integrations/document_loaders/hugging_face_dataset", "Spreedly": "https://python.langchain.com/docs/integrations/document_loaders/spreedly", "Image captions": "https://python.langchain.com/docs/integrations/document_loaders/image_captions", "Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma", "Apify Dataset": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset", "Iugu": "https://python.langchain.com/docs/integrations/document_loaders/iugu", "Stripe": "https://python.langchain.com/docs/integrations/document_loaders/stripe", "Modern Treasury": "https://python.langchain.com/docs/integrations/document_loaders/modern_treasury", "Question Answering Benchmarking: State of the Union Address": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_sota", "Question Answering Benchmarking: Paul Graham Essay": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_pg", "Agent VectorDB Question Answering Benchmarking": "https://python.langchain.com/docs/guides/evaluation/examples/agent_vectordb_sota_pg", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index"}, "ZapierToolkit": {"Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier"}, "ZapierNLAWrapper": {"Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier"}, "LLMChain": {"Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier", "Argilla": "https://python.langchain.com/docs/integrations/callbacks/argilla", "Comet": "https://python.langchain.com/docs/integrations/providers/comet_tracking", "Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking", "Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking", "Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation", "Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json", "Predibase": "https://python.langchain.com/docs/integrations/llms/predibase", "AzureML Online Endpoint": "https://python.langchain.com/docs/integrations/llms/azureml_endpoint_example", "Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/question_answering", "Custom Trajectory Evaluator": "https://python.langchain.com/docs/modules/evaluation/trajectory/custom", "Custom Pairwise Evaluator": "https://python.langchain.com/docs/modules/evaluation/comparison/custom", "Multi-agent authoritarian speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_authoritarian", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research", "How to add Memory to an LLMChain": "https://python.langchain.com/docs/modules/memory/adding_memory", "Logging to file": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler", "Datetime parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/datetime", "Prompt Pipelining": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining", "Connecting to a Feature Store": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router", "Async API": "https://python.langchain.com/docs/modules/chains/how_to/async_chain", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde"}, "PromptTemplate": {"Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier", "Context": "https://python.langchain.com/docs/integrations/callbacks/context", "Argilla": "https://python.langchain.com/docs/integrations/callbacks/argilla", "Comet": "https://python.langchain.com/docs/integrations/providers/comet_tracking", "Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking", "Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking", "Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation", "Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "Predibase": "https://python.langchain.com/docs/integrations/llms/predibase", "Evaluating an OpenAPI Chain": "https://python.langchain.com/docs/guides/evaluation/examples/openapi_eval", "Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/question_answering", "Pairwise String Comparison": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_string", "Criteria Evaluation": "https://python.langchain.com/docs/guides/evaluation/string/criteria_eval_chain", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research", "Lost in the middle: The problem with long contexts": "https://python.langchain.com/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder", "How to create a custom Memory class": "https://python.langchain.com/docs/modules/memory/custom_memory", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Conversation Knowledge Graph Memory": "https://python.langchain.com/docs/modules/memory/kg", "How to add Memory to an LLMChain": "https://python.langchain.com/docs/modules/memory/adding_memory", "How to use multiple memory classes in the same chain": "https://python.langchain.com/docs/modules/memory/multiple_memory", "How to customize conversational memory": "https://python.langchain.com/docs/modules/memory/conversational_customization", "Logging to file": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler", "Datetime parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/datetime", "Select by n-gram overlap": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/ngram_overlap", "Prompt Pipelining": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining", "Template Formats": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/formats", "Connecting to a Feature Store": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store", "Evaluating Custom Criteria": "https://python.langchain.com/docs/modules/evaluation/string/criteria_eval_chain", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router", "Transformation": "https://python.langchain.com/docs/modules/chains/foundational/transformation", "Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain", "Async API": "https://python.langchain.com/docs/modules/chains/how_to/async_chain", "Bash chain": "https://python.langchain.com/docs/modules/chains/additional/llm_bash", "Elasticsearch database": "https://python.langchain.com/docs/modules/chains/additional/elasticsearch_database", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "HTTP request chain": "https://python.langchain.com/docs/modules/chains/additional/llm_requests", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde"}, "ZapierNLARunAction": {"Zapier Natural Language Actions API": "https://python.langchain.com/docs/integrations/tools/zapier"}, "GoldenQueryAPIWrapper": {"Golden Query": "https://python.langchain.com/docs/integrations/tools/golden_query", "Golden": "https://python.langchain.com/docs/integrations/providers/golden"}, "ArxivAPIWrapper": {"ArXiv API Tool": "https://python.langchain.com/docs/integrations/tools/arxiv"}, "MetaphorSearchAPIWrapper": {"Metaphor Search": "https://python.langchain.com/docs/integrations/tools/metaphor_search"}, "PlayWrightBrowserToolkit": {"Metaphor Search": "https://python.langchain.com/docs/integrations/tools/metaphor_search", "PlayWright Browser Toolkit": "https://python.langchain.com/docs/integrations/toolkits/playwright"}, "MetaphorSearchResults": {"Metaphor Search": "https://python.langchain.com/docs/integrations/tools/metaphor_search"}, "SerpAPIWrapper": {"SerpAPI": "https://python.langchain.com/docs/integrations/providers/serpapi", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt"}, "GraphQLAPIWrapper": {"GraphQL tool": "https://python.langchain.com/docs/integrations/tools/graphql"}, "DuckDuckGoSearchRun": {"DuckDuckGo Search": "https://python.langchain.com/docs/integrations/tools/ddg"}, "DuckDuckGoSearchResults": {"DuckDuckGo Search": "https://python.langchain.com/docs/integrations/tools/ddg"}, "DuckDuckGoSearchAPIWrapper": {"DuckDuckGo Search": "https://python.langchain.com/docs/integrations/tools/ddg"}, "ConversationBufferMemory": {"Gradio Tools": "https://python.langchain.com/docs/integrations/tools/gradio_tools", "SceneXplain": "https://python.langchain.com/docs/integrations/tools/sceneXplain", "Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Bedrock": "https://python.langchain.com/docs/integrations/llms/bedrock", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Adding Message Memory backed by a database to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "How to add Memory to an LLMChain": "https://python.langchain.com/docs/modules/memory/adding_memory", "How to customize conversational memory": "https://python.langchain.com/docs/modules/memory/conversational_customization", "How to add Memory to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory", "Shared memory across agents and tools": "https://python.langchain.com/docs/modules/agents/how_to/sharedmemory_for_tools", "Add Memory to OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/add_memory_openai_functions", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "SceneXplainTool": {"SceneXplain": "https://python.langchain.com/docs/integrations/tools/sceneXplain"}, "WolframAlphaAPIWrapper": {"Wolfram Alpha": "https://python.langchain.com/docs/integrations/providers/wolfram_alpha"}, "load_huggingface_tool": {"Requires transformers>=4.29.0 and huggingface_hub>=0.14.1": "https://python.langchain.com/docs/integrations/tools/huggingface_tools"}, "GoogleSearchAPIWrapper": {"Google Search": "https://python.langchain.com/docs/integrations/providers/google_search", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research", "Adding Message Memory backed by a database to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db", "How to add Memory to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory", "Shared memory across agents and tools": "https://python.langchain.com/docs/modules/agents/how_to/sharedmemory_for_tools"}, "BingSearchAPIWrapper": {"Bing Search": "https://python.langchain.com/docs/integrations/tools/bing_search"}, "ShellTool": {"Shell Tool": "https://python.langchain.com/docs/integrations/tools/bash", "Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval"}, "FileManagementToolkit": {"File System Tools": "https://python.langchain.com/docs/integrations/tools/filesystem"}, "BraveSearch": {"Brave Search": "https://python.langchain.com/docs/integrations/providers/brave_search"}, "RedisChatMessageHistory": {"Redis Chat Message History": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history", "Adding Message Memory backed by a database to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db"}, "ZepMemory": {"Zep Memory": "https://python.langchain.com/docs/integrations/memory/zep_memory"}, "ConversationChain": {"Entity Memory with SQLite storage": "https://python.langchain.com/docs/integrations/memory/entity_memory_with_sqlite", "Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma", "Bedrock": "https://python.langchain.com/docs/integrations/llms/bedrock", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Conversation Knowledge Graph Memory": "https://python.langchain.com/docs/modules/memory/kg", "ConversationTokenBufferMemory": "https://python.langchain.com/docs/modules/memory/token_buffer", "How to use multiple memory classes in the same chain": "https://python.langchain.com/docs/modules/memory/multiple_memory", "How to customize conversational memory": "https://python.langchain.com/docs/modules/memory/conversational_customization", "ConversationSummaryBufferMemory": "https://python.langchain.com/docs/modules/memory/summary_buffer", "Router": "https://python.langchain.com/docs/modules/chains/foundational/router"}, "ConversationEntityMemory": {"Entity Memory with SQLite storage": "https://python.langchain.com/docs/integrations/memory/entity_memory_with_sqlite"}, "SQLiteEntityStore": {"Entity Memory with SQLite storage": "https://python.langchain.com/docs/integrations/memory/entity_memory_with_sqlite"}, "ENTITY_MEMORY_CONVERSATION_TEMPLATE": {"Entity Memory with SQLite storage": "https://python.langchain.com/docs/integrations/memory/entity_memory_with_sqlite"}, "PostgresChatMessageHistory": {"Postgres Chat Message History": "https://python.langchain.com/docs/integrations/memory/postgres_chat_message_history"}, "MomentoChatMessageHistory": {"Momento Chat Message History": "https://python.langchain.com/docs/integrations/memory/momento_chat_message_history"}, "MongoDBChatMessageHistory": {"Mongodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/mongodb_chat_message_history"}, "CassandraChatMessageHistory": {"Cassandra Chat Message History": "https://python.langchain.com/docs/integrations/memory/cassandra_chat_message_history", "Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra"}, "MotorheadMemory": {"Mot\u00f6rhead Memory": "https://python.langchain.com/docs/integrations/memory/motorhead_memory", "Mot\u00f6rhead Memory (Managed)": "https://python.langchain.com/docs/integrations/memory/motorhead_memory_managed"}, "DynamoDBChatMessageHistory": {"Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history"}, "PythonREPL": {"Dynamodb Chat Message History": "https://python.langchain.com/docs/integrations/memory/dynamodb_chat_message_history", "Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python"}, "ChatAnthropic": {"Anthropic": "https://python.langchain.com/docs/integrations/chat/anthropic", "PlayWright Browser Toolkit": "https://python.langchain.com/docs/integrations/toolkits/playwright", "Agent Trajectory": "https://python.langchain.com/docs/modules/evaluation/trajectory/trajectory_eval", "Custom Pairwise Evaluator": "https://python.langchain.com/docs/modules/evaluation/comparison/custom", "Pairwise String Comparison": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_string", "Criteria Evaluation": "https://python.langchain.com/docs/guides/evaluation/string/criteria_eval_chain", "Evaluating Custom Criteria": "https://python.langchain.com/docs/modules/evaluation/string/criteria_eval_chain"}, "AIMessage": {"Anthropic": "https://python.langchain.com/docs/integrations/chat/anthropic", "OpenAI": "https://python.langchain.com/docs/integrations/chat/openai", "JinaChat": "https://python.langchain.com/docs/integrations/chat/jinachat", "Few shot examples for chat models": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/few_shot_examples_chat"}, "CallbackManager": {"Anthropic": "https://python.langchain.com/docs/integrations/chat/anthropic", "Llama-cpp": "https://python.langchain.com/docs/integrations/llms/llamacpp", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research"}, "StreamingStdOutCallbackHandler": {"Anthropic": "https://python.langchain.com/docs/integrations/chat/anthropic", "GPT4All": "https://python.langchain.com/docs/integrations/llms/gpt4all", "Arthur": "https://python.langchain.com/docs/integrations/providers/arthur_tracking", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Llama-cpp": "https://python.langchain.com/docs/integrations/llms/llamacpp", "C Transformers": "https://python.langchain.com/docs/integrations/llms/ctransformers", "Huggingface TextGen Inference": "https://python.langchain.com/docs/integrations/llms/huggingface_textgen_inference", "Replicate": "https://python.langchain.com/docs/integrations/llms/replicate", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research"}, "create_tagging_chain": {"Llama API": "https://python.langchain.com/docs/integrations/chat/llama_api", "Tagging": "https://python.langchain.com/docs/modules/chains/additional/tagging"}, "ChatVertexAI": {"Google Cloud Platform Vertex AI PaLM ": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm"}, "JinaChat": {"JinaChat": "https://python.langchain.com/docs/integrations/chat/jinachat"}, "AzureChatOpenAI": {"Azure": "https://python.langchain.com/docs/integrations/chat/azure_chat_openai", "Azure OpenAI": "https://python.langchain.com/docs/integrations/providers/azure_openai"}, "PromptLayerChatOpenAI": {"PromptLayer ChatOpenAI": "https://python.langchain.com/docs/integrations/chat/promptlayer_chatopenai"}, "ContextCallbackHandler": {"Context": "https://python.langchain.com/docs/integrations/callbacks/context"}, "ArgillaCallbackHandler": {"Argilla": "https://python.langchain.com/docs/integrations/providers/argilla"}, "PromptLayerCallbackHandler": {"PromptLayer": "https://python.langchain.com/docs/integrations/callbacks/promptlayer"}, "GPT4All": {"PromptLayer": "https://python.langchain.com/docs/integrations/callbacks/promptlayer", "GPT4All": "https://python.langchain.com/docs/integrations/llms/gpt4all", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa"}, "StreamlitCallbackHandler": {"Streamlit": "https://python.langchain.com/docs/integrations/callbacks/streamlit", "GPT4All": "https://python.langchain.com/docs/integrations/providers/gpt4all"}, "FigmaFileLoader": {"Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma"}, "AzureOpenAI": {"Azure OpenAI": "https://python.langchain.com/docs/integrations/llms/azure_openai_example", "OpenAI": "https://python.langchain.com/docs/integrations/providers/openai"}, "MyScale": {"MyScale": "https://python.langchain.com/docs/integrations/vectorstores/myscale", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query"}, "Baseten": {"Baseten": "https://python.langchain.com/docs/integrations/llms/baseten"}, "WeatherDataLoader": {"Weather": "https://python.langchain.com/docs/integrations/document_loaders/weather"}, "Tair": {"Tair": "https://python.langchain.com/docs/integrations/vectorstores/tair"}, "UnstructuredWordDocumentLoader": {"Microsoft Word": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_word"}, "CollegeConfidentialLoader": {"College Confidential": "https://python.langchain.com/docs/integrations/document_loaders/college_confidential"}, "RWKV": {"RWKV-4": "https://python.langchain.com/docs/integrations/providers/rwkv"}, "GoogleDriveLoader": {"Google Drive": "https://python.langchain.com/docs/integrations/document_loaders/google_drive"}, "AmazonAPIGateway": {"Amazon API Gateway": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway_example"}, "UnstructuredPowerPointLoader": {"Microsoft PowerPoint": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_powerpoint"}, "wandb_tracing_enabled": {"WandB Tracing": "https://python.langchain.com/docs/integrations/providers/agent_with_wandb_tracing"}, "CometCallbackHandler": {"Comet": "https://python.langchain.com/docs/integrations/providers/comet_tracking"}, "CTransformers": {"C Transformers": "https://python.langchain.com/docs/integrations/llms/ctransformers"}, "BiliBiliLoader": {"BiliBili": "https://python.langchain.com/docs/integrations/document_loaders/bilibili"}, "DiffbotLoader": {"Diffbot": "https://python.langchain.com/docs/integrations/document_loaders/diffbot"}, "AimCallbackHandler": {"Aim": "https://python.langchain.com/docs/integrations/providers/aim_tracking"}, "ModernTreasuryLoader": {"Modern Treasury": "https://python.langchain.com/docs/integrations/document_loaders/modern_treasury"}, "FacebookChatLoader": {"Facebook Chat": "https://python.langchain.com/docs/integrations/document_loaders/facebook_chat"}, "Banana": {"Banana": "https://python.langchain.com/docs/integrations/llms/banana"}, "HuggingFacePipeline": {"Hugging Face": "https://python.langchain.com/docs/integrations/providers/huggingface", "RELLM": "https://python.langchain.com/docs/integrations/llms/rellm_experimental", "JSONFormer": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental"}, "HuggingFaceHub": {"Hugging Face": "https://python.langchain.com/docs/integrations/providers/huggingface"}, "HuggingFaceHubEmbeddings": {"Hugging Face": "https://python.langchain.com/docs/integrations/providers/huggingface"}, "CharacterTextSplitter": {"Hugging Face": "https://python.langchain.com/docs/integrations/providers/huggingface", "OpenAI": "https://python.langchain.com/docs/integrations/providers/openai", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation", "Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "Vectorstore Agent": "https://python.langchain.com/docs/integrations/toolkits/vectorstore", "LanceDB": "https://python.langchain.com/docs/integrations/vectorstores/lancedb", "Weaviate": "https://python.langchain.com/docs/integrations/vectorstores/weaviate", "Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake", "Vectara": "https://python.langchain.com/docs/integrations/vectorstores/vectara", "Redis": "https://python.langchain.com/docs/integrations/vectorstores/redis", "PGVector": "https://python.langchain.com/docs/integrations/vectorstores/pgvector", "Rockset": "https://python.langchain.com/docs/integrations/vectorstores/rockset", "Zilliz": "https://python.langchain.com/docs/integrations/vectorstores/zilliz", "SingleStoreDB": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb", "Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy", "Typesense": "https://python.langchain.com/docs/integrations/vectorstores/typesense", "Tair": "https://python.langchain.com/docs/integrations/vectorstores/tair", "Chroma": "https://python.langchain.com/docs/integrations/vectorstores/chroma", "Alibaba Cloud OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch", "StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks", "Clarifai": "https://python.langchain.com/docs/integrations/vectorstores/clarifai", "scikit-learn": "https://python.langchain.com/docs/integrations/vectorstores/sklearn", "DocArrayHnswSearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_hnsw", "MyScale": "https://python.langchain.com/docs/integrations/vectorstores/myscale", "ClickHouse Vector Search": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse", "Qdrant": "https://python.langchain.com/docs/integrations/vectorstores/qdrant", "Tigris": "https://python.langchain.com/docs/integrations/vectorstores/tigris", "AwaDB": "https://python.langchain.com/docs/integrations/vectorstores/awadb", "Supabase (Postgres)": "https://python.langchain.com/docs/integrations/vectorstores/supabase", "OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/opensearch", "Pinecone": "https://python.langchain.com/docs/integrations/vectorstores/pinecone", "Azure Cognitive Search": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch", "Cassandra": "https://python.langchain.com/docs/integrations/vectorstores/cassandra", "Milvus": "https://python.langchain.com/docs/integrations/vectorstores/milvus", "ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch", "Marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo", "DocArrayInMemorySearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory", "pg_embedding": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding", "FAISS": "https://python.langchain.com/docs/integrations/vectorstores/faiss", "AnalyticDB": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb", "Hologres": "https://python.langchain.com/docs/integrations/vectorstores/hologres", "MongoDB Atlas": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas", "Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma", "Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic", "Manifest": "https://python.langchain.com/docs/integrations/llms/manifest", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching", "Data Augmented Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/data_augmented_question_answering", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Split by tokens ": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/split_by_token", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Loading from LangChainHub": "https://python.langchain.com/docs/modules/chains/how_to/from_hub", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Vector store-augmented text generation": "https://python.langchain.com/docs/modules/chains/additional/vector_db_text_generation", "Hypothetical Document Embeddings": "https://python.langchain.com/docs/modules/chains/additional/hyde"}, "DocugamiLoader": {"Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami"}, "GutenbergLoader": {"Gutenberg": "https://python.langchain.com/docs/integrations/document_loaders/gutenberg"}, "AzureBlobStorageContainerLoader": {"Azure Blob Storage": "https://python.langchain.com/docs/integrations/providers/azure_blob_storage", "Azure Blob Storage Container": "https://python.langchain.com/docs/integrations/document_loaders/azure_blob_storage_container"}, "AzureBlobStorageFileLoader": {"Azure Blob Storage": "https://python.langchain.com/docs/integrations/providers/azure_blob_storage", "Azure Blob Storage File": "https://python.langchain.com/docs/integrations/document_loaders/azure_blob_storage_file"}, "WikipediaLoader": {"Wikipedia": "https://python.langchain.com/docs/integrations/document_loaders/wikipedia"}, "ConfluenceLoader": {"Confluence": "https://python.langchain.com/docs/integrations/document_loaders/confluence"}, "Predibase": {"Predibase": "https://python.langchain.com/docs/integrations/llms/predibase"}, "Beam": {"Beam": "https://python.langchain.com/docs/integrations/llms/beam"}, "GrobidParser": {"Grobid": "https://python.langchain.com/docs/integrations/document_loaders/grobid"}, "GenericLoader": {"Grobid": "https://python.langchain.com/docs/integrations/document_loaders/grobid", "Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio", "Source Code": "https://python.langchain.com/docs/integrations/document_loaders/source_code"}, "Typesense": {"Typesense": "https://python.langchain.com/docs/integrations/vectorstores/typesense"}, "Hologres": {"Hologres": "https://python.langchain.com/docs/integrations/vectorstores/hologres"}, "AI21": {"AI21 Labs": "https://python.langchain.com/docs/integrations/providers/ai21", "AI21": "https://python.langchain.com/docs/integrations/llms/ai21"}, "WandbCallbackHandler": {"Weights & Biases": "https://python.langchain.com/docs/integrations/providers/wandb_tracking"}, "ObsidianLoader": {"Obsidian": "https://python.langchain.com/docs/integrations/document_loaders/obsidian"}, "create_sql_agent": {"CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database"}, "SQLDatabaseToolkit": {"CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions"}, "OpenAIModerationChain": {"OpenAI": "https://python.langchain.com/docs/integrations/providers/openai"}, "ChatGPTLoader": {"OpenAI": "https://python.langchain.com/docs/integrations/providers/openai", "ChatGPT Data": "https://python.langchain.com/docs/integrations/document_loaders/chatgpt_loader"}, "AZLyricsLoader": {"AZLyrics": "https://python.langchain.com/docs/integrations/document_loaders/azlyrics"}, "ToMarkdownLoader": {"2Markdown": "https://python.langchain.com/docs/integrations/document_loaders/tomarkdown"}, "GitLoader": {"Git": "https://python.langchain.com/docs/integrations/document_loaders/git"}, "InfinoCallbackHandler": {"Infino": "https://python.langchain.com/docs/integrations/providers/infino"}, "MlflowAIGateway": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway"}, "MlflowAIGatewayEmbeddings": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway"}, "ChatMLflowAIGateway": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway"}, "SingleStoreDB": {"SingleStoreDB": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb"}, "Tigris": {"Tigris": "https://python.langchain.com/docs/integrations/vectorstores/tigris"}, "S3DirectoryLoader": {"AWS S3 Directory": "https://python.langchain.com/docs/integrations/document_loaders/aws_s3_directory"}, "TransformChain": {"Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "Transformation": "https://python.langchain.com/docs/modules/chains/foundational/transformation"}, "SQLDatabase": {"Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database"}, "Weaviate": {"Weaviate": "https://python.langchain.com/docs/integrations/vectorstores/weaviate", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query"}, "AirbyteJSONLoader": {"Airbyte": "https://python.langchain.com/docs/integrations/providers/airbyte", "Airbyte JSON": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_json"}, "TelegramChatFileLoader": {"Telegram": "https://python.langchain.com/docs/integrations/document_loaders/telegram"}, "TelegramChatApiLoader": {"Telegram": "https://python.langchain.com/docs/integrations/providers/telegram"}, "PredictionGuard": {"Prediction Guard": "https://python.langchain.com/docs/integrations/llms/predictionguard"}, "NotionDirectoryLoader": {"Notion DB": "https://python.langchain.com/docs/integrations/providers/notion", "Notion DB 1/2": "https://python.langchain.com/docs/integrations/document_loaders/notion", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA"}, "NotionDBLoader": {"Notion DB": "https://python.langchain.com/docs/integrations/providers/notion", "Notion DB 2/2": "https://python.langchain.com/docs/integrations/document_loaders/notiondb"}, "MWDumpLoader": {"MediaWikiDump": "https://python.langchain.com/docs/integrations/document_loaders/mediawikidump"}, "BraveSearchLoader": {"Brave Search": "https://python.langchain.com/docs/integrations/document_loaders/brave_search"}, "StarRocks": {"StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks"}, "DatadogLogsLoader": {"Datadog Logs": "https://python.langchain.com/docs/integrations/document_loaders/datadog_logs"}, "ApifyDatasetLoader": {"Apify": "https://python.langchain.com/docs/integrations/providers/apify", "Apify Dataset": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset"}, "NLPCloud": {"NLPCloud": "https://python.langchain.com/docs/integrations/providers/nlpcloud", "NLP Cloud": "https://python.langchain.com/docs/integrations/llms/nlpcloud"}, "Milvus": {"Milvus": "https://python.langchain.com/docs/integrations/vectorstores/milvus", "Zilliz": "https://python.langchain.com/docs/integrations/vectorstores/zilliz"}, "Qdrant": {"Qdrant": "https://python.langchain.com/docs/integrations/vectorstores/qdrant", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query"}, "GitbookLoader": {"GitBook": "https://python.langchain.com/docs/integrations/document_loaders/gitbook"}, "OpenSearchVectorSearch": {"OpenSearch": "https://python.langchain.com/docs/integrations/vectorstores/opensearch"}, "Pinecone": {"Pinecone": "https://python.langchain.com/docs/integrations/vectorstores/pinecone", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone"}, "UnstructuredFileLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured", "Unstructured File": "https://python.langchain.com/docs/integrations/document_loaders/unstructured_file"}, "SelfHostedPipeline": {"Runhouse": "https://python.langchain.com/docs/integrations/llms/runhouse"}, "MlflowCallbackHandler": {"MLflow": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking"}, "SpreedlyLoader": {"Spreedly": "https://python.langchain.com/docs/integrations/document_loaders/spreedly"}, "OpenLLM": {"OpenLLM": "https://python.langchain.com/docs/integrations/llms/openllm"}, "SearxSearchResults": {"SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx"}, "Modal": {"Modal": "https://python.langchain.com/docs/integrations/llms/modal"}, "IFixitLoader": {"iFixit": "https://python.langchain.com/docs/integrations/document_loaders/ifixit"}, "AlephAlpha": {"Aleph Alpha": "https://python.langchain.com/docs/integrations/llms/aleph_alpha"}, "PipelineAI": {"PipelineAI": "https://python.langchain.com/docs/integrations/llms/pipelineai_example"}, "LlamaCpp": {"Llama.cpp": "https://python.langchain.com/docs/integrations/providers/llamacpp", "Llama-cpp": "https://python.langchain.com/docs/integrations/llms/llamacpp", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research"}, "AwaDB": {"AwaDB": "https://python.langchain.com/docs/integrations/vectorstores/awadb"}, "ArxivLoader": {"Arxiv": "https://python.langchain.com/docs/integrations/document_loaders/arxiv"}, "Anyscale": {"Anyscale": "https://python.langchain.com/docs/integrations/llms/anyscale"}, "StripeLoader": {"Stripe": "https://python.langchain.com/docs/integrations/document_loaders/stripe"}, "BlackboardLoader": {"Blackboard": "https://python.langchain.com/docs/integrations/document_loaders/blackboard"}, "WhatsAppChatLoader": {"WhatsApp": "https://python.langchain.com/docs/integrations/providers/whatsapp", "WhatsApp Chat": "https://python.langchain.com/docs/integrations/document_loaders/whatsapp_chat"}, "LanceDB": {"LanceDB": "https://python.langchain.com/docs/integrations/vectorstores/lancedb"}, "OneDriveLoader": {"Microsoft OneDrive": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_onedrive"}, "AnalyticDB": {"AnalyticDB": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb"}, "YoutubeLoader": {"YouTube": "https://python.langchain.com/docs/integrations/providers/youtube", "YouTube transcripts": "https://python.langchain.com/docs/integrations/document_loaders/youtube_transcript"}, "GoogleApiYoutubeLoader": {"YouTube": "https://python.langchain.com/docs/integrations/providers/youtube"}, "PromptLayerOpenAI": {"PromptLayer": "https://python.langchain.com/docs/integrations/providers/promptlayer", "PromptLayer OpenAI": "https://python.langchain.com/docs/integrations/llms/promptlayer_openai"}, "DeepLake": {"Deep Lake": "https://python.langchain.com/docs/integrations/providers/deeplake", "Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat", "Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake", "Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/code/code-analysis-deeplake", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query"}, "WhyLabsCallbackHandler": {"WhyLabs": "https://python.langchain.com/docs/integrations/providers/whylabs_profiling"}, "FlyteCallbackHandler": {"Flyte": "https://python.langchain.com/docs/integrations/providers/flyte"}, "ManifestWrapper": {"Hazy Research": "https://python.langchain.com/docs/integrations/providers/hazy_research", "Manifest": "https://python.langchain.com/docs/integrations/llms/manifest"}, "Marqo": {"Marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo"}, "IMSDbLoader": {"IMSDb": "https://python.langchain.com/docs/integrations/document_loaders/imsdb"}, "PGVector": {"PGVector": "https://python.langchain.com/docs/integrations/vectorstores/pgvector"}, "DeepInfra": {"DeepInfra": "https://python.langchain.com/docs/integrations/llms/deepinfra_example"}, "AgentExecutor": {"Jina": "https://python.langchain.com/docs/integrations/providers/jina", "PowerBI Dataset Agent": "https://python.langchain.com/docs/integrations/toolkits/powerbi", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "Running Agent as an Iterator": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter"}, "RedditPostsLoader": {"Reddit": "https://python.langchain.com/docs/integrations/document_loaders/reddit"}, "TrelloLoader": {"Trello": "https://python.langchain.com/docs/integrations/document_loaders/trello"}, "AtlasDB": {"AtlasDB": "https://python.langchain.com/docs/integrations/providers/atlas", "Atlas": "https://python.langchain.com/docs/integrations/vectorstores/atlas"}, "SKLearnVectorStore": {"scikit-learn": "https://python.langchain.com/docs/integrations/vectorstores/sklearn"}, "EverNoteLoader": {"EverNote": "https://python.langchain.com/docs/integrations/document_loaders/evernote"}, "TwitterTweetLoader": {"Twitter": "https://python.langchain.com/docs/integrations/document_loaders/twitter"}, "DiscordChatLoader": {"Discord": "https://python.langchain.com/docs/integrations/document_loaders/discord"}, "RedisCache": {"Redis": "https://python.langchain.com/docs/integrations/providers/redis", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "RedisSemanticCache": {"Redis": "https://python.langchain.com/docs/integrations/providers/redis", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "Redis": {"Redis": "https://python.langchain.com/docs/integrations/vectorstores/redis"}, "SelfQueryRetriever": {"Chroma": "https://python.langchain.com/docs/integrations/providers/chroma", "Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query"}, "ClearMLCallbackHandler": {"ClearML": "https://python.langchain.com/docs/integrations/providers/clearml_tracking"}, "StdOutCallbackHandler": {"ClearML": "https://python.langchain.com/docs/integrations/providers/clearml_tracking", "Async API": "https://python.langchain.com/docs/modules/agents/how_to/async_agent", "Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain"}, "Cohere": {"Cohere": "https://python.langchain.com/docs/integrations/llms/cohere"}, "SlackDirectoryLoader": {"Slack": "https://python.langchain.com/docs/integrations/document_loaders/slack"}, "LLMContentHandler": {"SageMaker Endpoint": "https://python.langchain.com/docs/integrations/providers/sagemaker_endpoint", "SageMakerEndpoint": "https://python.langchain.com/docs/integrations/llms/sagemaker"}, "ContentHandlerBase": {"SageMaker Endpoint": "https://python.langchain.com/docs/integrations/providers/sagemaker_endpoint"}, "HNLoader": {"Hacker News": "https://python.langchain.com/docs/integrations/document_loaders/hacker_news"}, "Annoy": {"Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy"}, "GCSDirectoryLoader": {"Google Cloud Storage": "https://python.langchain.com/docs/integrations/providers/google_cloud_storage", "Google Cloud Storage Directory": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_storage_directory"}, "GCSFileLoader": {"Google Cloud Storage": "https://python.langchain.com/docs/integrations/providers/google_cloud_storage", "Google Cloud Storage File": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_storage_file"}, "ArthurCallbackHandler": {"Arthur": "https://python.langchain.com/docs/integrations/providers/arthur_tracking"}, "DuckDBLoader": {"DuckDB": "https://python.langchain.com/docs/integrations/document_loaders/duckdb"}, "Petals": {"Petals": "https://python.langchain.com/docs/integrations/llms/petals_example"}, "MomentoCache": {"Momento": "https://python.langchain.com/docs/integrations/providers/momento", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "AirtableLoader": {"Airtable": "https://python.langchain.com/docs/integrations/document_loaders/airtable"}, "Clarifai": {"Clarifai": "https://python.langchain.com/docs/integrations/llms/clarifai"}, "BigQueryLoader": {"Google BigQuery": "https://python.langchain.com/docs/integrations/document_loaders/google_bigquery"}, "RoamLoader": {"Roam": "https://python.langchain.com/docs/integrations/document_loaders/roam"}, "Portkey": {"Log, Trace, and Monitor Langchain LLM Calls": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index"}, "Vectara": {"Vectara": "https://python.langchain.com/docs/integrations/vectorstores/vectara", "Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "Vectara Text Generation": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_text_generation"}, "VectaraRetriever": {"Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat"}, "load_qa_chain": {"Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat", "SageMakerEndpoint": "https://python.langchain.com/docs/integrations/llms/sagemaker", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs"}, "CONDENSE_QUESTION_PROMPT": {"Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat"}, "load_qa_with_sources_chain": {"Chat Over Documents with Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat"}, "create_csv_agent": {"CSV Agent": "https://python.langchain.com/docs/integrations/toolkits/csv"}, "create_xorbits_agent": {"Xorbits Agent": "https://python.langchain.com/docs/integrations/toolkits/xorbits"}, "JiraToolkit": {"Jira": "https://python.langchain.com/docs/integrations/toolkits/jira"}, "JiraAPIWrapper": {"Jira": "https://python.langchain.com/docs/integrations/toolkits/jira"}, "create_spark_dataframe_agent": {"Spark Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/spark"}, "PyPDFLoader": {"Document Comparison": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit", "MergeDocLoader": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc_loader", "Question answering over a group chat messages using Activeloop's DeepLake": "https://python.langchain.com/docs/use_cases/question_answering/semantic-search-over-chat"}, "create_python_agent": {"Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python"}, "PythonREPLTool": {"Python Agent": "https://python.langchain.com/docs/integrations/toolkits/python"}, "create_pbi_agent": {"PowerBI Dataset Agent": "https://python.langchain.com/docs/integrations/toolkits/powerbi"}, "PowerBIToolkit": {"PowerBI Dataset Agent": "https://python.langchain.com/docs/integrations/toolkits/powerbi"}, "PowerBIDataset": {"PowerBI Dataset Agent": "https://python.langchain.com/docs/integrations/toolkits/powerbi"}, "AzureCognitiveServicesToolkit": {"Azure Cognitive Services Toolkit": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services"}, "Requests": {"Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "Evaluating an OpenAPI Chain": "https://python.langchain.com/docs/guides/evaluation/examples/openapi_eval", "OpenAPI chain": "https://python.langchain.com/docs/modules/chains/additional/openapi"}, "APIOperation": {"Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla"}, "NLAToolkit": {"Natural Language APIs": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla", "Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval"}, "GmailToolkit": {"Gmail Toolkit": "https://python.langchain.com/docs/integrations/toolkits/gmail"}, "build_resource_service": {"Gmail Toolkit": "https://python.langchain.com/docs/integrations/toolkits/gmail"}, "create_json_agent": {"JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json"}, "JsonToolkit": {"JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json"}, "JsonSpec": {"JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json", "OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "GitHubToolkit": {"GitHub": "https://python.langchain.com/docs/integrations/toolkits/github"}, "GitHubAPIWrapper": {"GitHub": "https://python.langchain.com/docs/integrations/toolkits/github"}, "create_spark_sql_agent": {"Spark SQL Agent": "https://python.langchain.com/docs/integrations/toolkits/spark_sql"}, "SparkSQLToolkit": {"Spark SQL Agent": "https://python.langchain.com/docs/integrations/toolkits/spark_sql"}, "SparkSQL": {"Spark SQL Agent": "https://python.langchain.com/docs/integrations/toolkits/spark_sql"}, "O365Toolkit": {"Office365 Toolkit": "https://python.langchain.com/docs/integrations/toolkits/office365"}, "create_pandas_dataframe_agent": {"Pandas Dataframe Agent": "https://python.langchain.com/docs/integrations/toolkits/pandas", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times"}, "MultionClientTool": {"Multion Toolkit": "https://python.langchain.com/docs/integrations/toolkits/multion"}, "AmadeusToolkit": {"Amadeus Toolkit": "https://python.langchain.com/docs/integrations/toolkits/amadeus"}, "WebBaseLoader": {"Vectorstore Agent": "https://python.langchain.com/docs/integrations/toolkits/vectorstore", "WebBaseLoader": "https://python.langchain.com/docs/integrations/document_loaders/web_base", "MergeDocLoader": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc_loader", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "Running LLMs locally": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore"}, "reduce_openapi_spec": {"OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "RequestsWrapper": {"OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "create_openapi_agent": {"OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "OpenAPIToolkit": {"OpenAPI agents": "https://python.langchain.com/docs/integrations/toolkits/openapi"}, "RetrievalQAWithSourcesChain": {"Weaviate": "https://python.langchain.com/docs/integrations/vectorstores/weaviate", "Marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo", "Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic", "QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research"}, "MatchingEngine": {"MatchingEngine": "https://python.langchain.com/docs/integrations/vectorstores/matchingengine"}, "OpenAIChat": {"Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/deeplake"}, "InMemoryDocstore": {"Annoy": "https://python.langchain.com/docs/integrations/vectorstores/annoy", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Generative Agents in LangChain": "https://python.langchain.com/docs/use_cases/agent_simulations/characters"}, "SpacyTextSplitter": {"Atlas": "https://python.langchain.com/docs/integrations/vectorstores/atlas", "Split by tokens ": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/split_by_token"}, "SentenceTransformerEmbeddings": {"Chroma": "https://python.langchain.com/docs/integrations/vectorstores/chroma"}, "StarRocksSettings": {"StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks"}, "DirectoryLoader": {"StarRocks": "https://python.langchain.com/docs/integrations/vectorstores/starrocks"}, "DocArrayHnswSearch": {"DocArrayHnswSearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_hnsw"}, "Clickhouse": {"ClickHouse Vector Search": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse"}, "SupabaseVectorStore": {"Supabase (Postgres)": "https://python.langchain.com/docs/integrations/vectorstores/supabase"}, "AzureSearch": {"Azure Cognitive Search": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch"}, "Cassandra": {"Cassandra": "https://python.langchain.com/docs/integrations/vectorstores/cassandra"}, "ElasticVectorSearch": {"ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch", "How to add memory to a Multi-Input Chain": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs"}, "ElasticKnnSearch": {"ElasticSearch": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch"}, "DocArrayInMemorySearch": {"DocArrayInMemorySearch": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory"}, "PGEmbedding": {"pg_embedding": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding"}, "MongoDBAtlasVectorSearch": {"MongoDB Atlas": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas"}, "create_metadata_tagger": {"OpenAI Functions Metadata Tagger": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger"}, "ChatPromptTemplate": {"OpenAI Functions Metadata Tagger": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger", "How to add Memory to an LLMChain": "https://python.langchain.com/docs/modules/memory/adding_memory", "Prompt Pipelining": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining", "Using OpenAI functions": "https://python.langchain.com/docs/modules/chains/popular/openai_functions", "Extraction": "https://python.langchain.com/docs/modules/chains/additional/extraction", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa", "Tagging": "https://python.langchain.com/docs/modules/chains/additional/tagging"}, "AsyncHtmlLoader": {"html2text": "https://python.langchain.com/docs/integrations/document_transformers/html2text", "AsyncHtmlLoader": "https://python.langchain.com/docs/integrations/document_loaders/async_html"}, "Html2TextTransformer": {"html2text": "https://python.langchain.com/docs/integrations/document_transformers/html2text"}, "DoctranPropertyExtractor": {"Doctran Extract Properties": "https://python.langchain.com/docs/integrations/document_transformers/doctran_extract_properties"}, "DoctranQATransformer": {"Doctran Interrogate Documents": "https://python.langchain.com/docs/integrations/document_transformers/doctran_interrogate_document"}, "DoctranTextTranslator": {"Doctran Translate Documents": "https://python.langchain.com/docs/integrations/document_transformers/doctran_translate_document"}, "SnowflakeLoader": {"Snowflake": "https://python.langchain.com/docs/integrations/document_loaders/snowflake"}, "AcreomLoader": {"acreom": "https://python.langchain.com/docs/integrations/document_loaders/acreom"}, "UnstructuredCSVLoader": {"CSV": "https://python.langchain.com/docs/integrations/document_loaders/csv"}, "XorbitsLoader": {"Xorbits Pandas DataFrame": "https://python.langchain.com/docs/integrations/document_loaders/xorbits"}, "UnstructuredEmailLoader": {"Email": "https://python.langchain.com/docs/integrations/document_loaders/email"}, "OutlookMessageLoader": {"Email": "https://python.langchain.com/docs/integrations/document_loaders/email"}, "RecursiveUrlLoader": {"Recursive URL Loader": "https://python.langchain.com/docs/integrations/document_loaders/recursive_url_loader"}, "JoplinLoader": {"Joplin": "https://python.langchain.com/docs/integrations/document_loaders/joplin"}, "EtherscanLoader": {"Etherscan Loader": "https://python.langchain.com/docs/integrations/document_loaders/Etherscan"}, "Docx2txtLoader": {"Microsoft Word": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_word"}, "OpenAIWhisperParser": {"Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio"}, "YoutubeAudioLoader": {"Loading documents from a YouTube url": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio"}, "UnstructuredURLLoader": {"URL": "https://python.langchain.com/docs/integrations/document_loaders/url"}, "SeleniumURLLoader": {"URL": "https://python.langchain.com/docs/integrations/document_loaders/url"}, "PlaywrightURLLoader": {"URL": "https://python.langchain.com/docs/integrations/document_loaders/url"}, "OpenCityDataLoader": {"Geopandas": "https://python.langchain.com/docs/integrations/document_loaders/geopandas", "Open City Data": "https://python.langchain.com/docs/integrations/document_loaders/open_city_data"}, "GeoDataFrameLoader": {"Geopandas": "https://python.langchain.com/docs/integrations/document_loaders/geopandas"}, "HuggingFaceDatasetLoader": {"HuggingFace dataset": "https://python.langchain.com/docs/integrations/document_loaders/hugging_face_dataset"}, "MHTMLLoader": {"mhtml": "https://python.langchain.com/docs/integrations/document_loaders/mhtml"}, "RocksetLoader": {"Rockset": "https://python.langchain.com/docs/integrations/document_loaders/rockset"}, "ImageCaptionLoader": {"Image captions": "https://python.langchain.com/docs/integrations/document_loaders/image_captions"}, "UnstructuredRSTLoader": {"RST": "https://python.langchain.com/docs/integrations/document_loaders/rst"}, "ConversationBufferWindowMemory": {"Figma": "https://python.langchain.com/docs/integrations/document_loaders/figma", "Meta-Prompt": "https://python.langchain.com/docs/use_cases/autonomous_agents/meta_prompt", "Voice Assistant": "https://python.langchain.com/docs/use_cases/chatbots/voice_assistant", "Create ChatGPT clone": "https://python.langchain.com/docs/modules/agents/how_to/chatgpt_clone"}, "UnstructuredImageLoader": {"Images": "https://python.langchain.com/docs/integrations/document_loaders/image"}, "TencentCOSFileLoader": {"Tencent COS File": "https://python.langchain.com/docs/integrations/document_loaders/tencent_cos_file"}, "TomlLoader": {"TOML": "https://python.langchain.com/docs/integrations/document_loaders/toml"}, "UnstructuredAPIFileLoader": {"Unstructured File": "https://python.langchain.com/docs/integrations/document_loaders/unstructured_file"}, "PsychicLoader": {"Psychic": "https://python.langchain.com/docs/integrations/document_loaders/psychic"}, "TencentCOSDirectoryLoader": {"Tencent COS Directory": "https://python.langchain.com/docs/integrations/document_loaders/tencent_cos_directory"}, "GitHubIssuesLoader": {"GitHub": "https://python.langchain.com/docs/integrations/document_loaders/github"}, "UnstructuredOrgModeLoader": {"Org-mode": "https://python.langchain.com/docs/integrations/document_loaders/org_mode"}, "LarkSuiteDocLoader": {"LarkSuite (FeiShu)": "https://python.langchain.com/docs/integrations/document_loaders/larksuite"}, "load_summarize_chain": {"LarkSuite (FeiShu)": "https://python.langchain.com/docs/integrations/document_loaders/larksuite", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching", "Summarization": "https://python.langchain.com/docs/use_cases/summarization"}, "IuguLoader": {"Iugu": "https://python.langchain.com/docs/integrations/document_loaders/iugu"}, "UnstructuredEPubLoader": {"EPub ": "https://python.langchain.com/docs/integrations/document_loaders/epub"}, "AttributeInfo": {"Docugami": "https://python.langchain.com/docs/integrations/document_loaders/docugami", "Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "Weaviate self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/weaviate_self_query", "Chroma self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/chroma_self_query", "DeepLake self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/deeplake_self_query", "Self-querying with Pinecone": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/pinecone", "Self-querying with MyScale": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/myscale_self_query", "Qdrant self-querying ": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/qdrant_self_query"}, "UnstructuredFileIOLoader": {"Google Drive": "https://python.langchain.com/docs/integrations/document_loaders/google_drive"}, "BrowserlessLoader": {"Browserless": "https://python.langchain.com/docs/integrations/document_loaders/browserless"}, "BibtexLoader": {"BibTeX": "https://python.langchain.com/docs/integrations/document_loaders/bibtex"}, "ReadTheDocsLoader": {"ReadTheDocs Documentation": "https://python.langchain.com/docs/integrations/document_loaders/readthedocs_documentation"}, "DataFrameLoader": {"Pandas DataFrame": "https://python.langchain.com/docs/integrations/document_loaders/pandas_dataframe"}, "GoogleApiClient": {"YouTube transcripts": "https://python.langchain.com/docs/integrations/document_loaders/youtube_transcript"}, "NotebookLoader": {"Jupyter Notebook": "https://python.langchain.com/docs/integrations/document_loaders/jupyter_notebook", "Notebook": "https://python.langchain.com/docs/integrations/document_loaders/example_data/notebook"}, "UnstructuredTSVLoader": {"TSV": "https://python.langchain.com/docs/integrations/document_loaders/tsv"}, "UnstructuredODTLoader": {"Open Document Format (ODT)": "https://python.langchain.com/docs/integrations/document_loaders/odt"}, "EmbaasBlobLoader": {"Embaas": "https://python.langchain.com/docs/integrations/document_loaders/embaas"}, "Blob": {"Embaas": "https://python.langchain.com/docs/integrations/document_loaders/embaas"}, "EmbaasLoader": {"Embaas": "https://python.langchain.com/docs/integrations/document_loaders/embaas"}, "UnstructuredXMLLoader": {"XML": "https://python.langchain.com/docs/integrations/document_loaders/xml"}, "MaxComputeLoader": {"Alibaba Cloud MaxCompute": "https://python.langchain.com/docs/integrations/document_loaders/alibaba_cloud_maxcompute"}, "CubeSemanticLoader": {"Cube Semantic Layer": "https://python.langchain.com/docs/integrations/document_loaders/cube_semantic"}, "UnstructuredExcelLoader": {"Microsoft Excel": "https://python.langchain.com/docs/integrations/document_loaders/excel"}, "Language": {"Source Code": "https://python.langchain.com/docs/integrations/document_loaders/source_code"}, "LanguageParser": {"Source Code": "https://python.langchain.com/docs/integrations/document_loaders/source_code"}, "SRTLoader": {"Subtitle": "https://python.langchain.com/docs/integrations/document_loaders/subtitle"}, "MastodonTootsLoader": {"Mastodon": "https://python.langchain.com/docs/integrations/document_loaders/mastodon"}, "MergedDataLoader": {"MergeDocLoader": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc_loader"}, "PySparkDataFrameLoader": {"PySpark DataFrame Loader": "https://python.langchain.com/docs/integrations/document_loaders/pyspark_dataframe"}, "CoNLLULoader": {"CoNLL-U": "https://python.langchain.com/docs/integrations/document_loaders/conll-u"}, "FaunaLoader": {"Fauna": "https://python.langchain.com/docs/integrations/document_loaders/fauna"}, "SitemapLoader": {"Sitemap": "https://python.langchain.com/docs/integrations/document_loaders/sitemap"}, "S3FileLoader": {"AWS S3 File": "https://python.langchain.com/docs/integrations/document_loaders/aws_s3_file"}, "SimpleSequentialChain": {"Baseten": "https://python.langchain.com/docs/integrations/llms/baseten", "Predibase": "https://python.langchain.com/docs/integrations/llms/predibase", "Replicate": "https://python.langchain.com/docs/integrations/llms/replicate"}, "StochasticAI": {"StochasticAI": "https://python.langchain.com/docs/integrations/llms/stochasticai"}, "ForefrontAI": {"ForefrontAI": "https://python.langchain.com/docs/integrations/llms/forefrontai_example"}, "CerebriumAI": {"CerebriumAI": "https://python.langchain.com/docs/integrations/llms/cerebriumai_example"}, "OctoAIEndpoint": {"OctoAI Compute Service": "https://python.langchain.com/docs/integrations/llms/octoai"}, "Writer": {"Writer": "https://python.langchain.com/docs/integrations/llms/writer"}, "TextGen": {"TextGen": "https://python.langchain.com/docs/integrations/llms/textgen"}, "MosaicML": {"MosaicML": "https://python.langchain.com/docs/integrations/llms/mosaicml"}, "KoboldApiLLM": {"KoboldAI API": "https://python.langchain.com/docs/integrations/llms/koboldai"}, "VertexAI": {"Google Cloud Platform Vertex AI PaLM ": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm"}, "Bedrock": {"Bedrock": "https://python.langchain.com/docs/integrations/llms/bedrock"}, "GooseAI": {"GooseAI": "https://python.langchain.com/docs/integrations/llms/gooseai_example"}, "Databricks": {"Databricks": "https://python.langchain.com/docs/integrations/llms/databricks"}, "MapReduceChain": {"Manifest": "https://python.langchain.com/docs/integrations/llms/manifest", "Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "ModelLaboratory": {"Manifest": "https://python.langchain.com/docs/integrations/llms/manifest", "Model Comparison": "https://python.langchain.com/docs/guides/model_laboratory"}, "RELLM": {"RELLM": "https://python.langchain.com/docs/integrations/llms/rellm_experimental"}, "Tongyi": {"Tongyi Qwen": "https://python.langchain.com/docs/integrations/llms/tongyi"}, "InMemoryCache": {"Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "SQLiteCache": {"Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "GPTCache": {"Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "SQLAlchemyCache": {"Caching integrations": "https://python.langchain.com/docs/integrations/llms/llm_caching"}, "AzureMLOnlineEndpoint": {"AzureML Online Endpoint": "https://python.langchain.com/docs/integrations/llms/azureml_endpoint_example"}, "DollyContentFormatter": {"AzureML Online Endpoint": "https://python.langchain.com/docs/integrations/llms/azureml_endpoint_example"}, "load_llm": {"AzureML Online Endpoint": "https://python.langchain.com/docs/integrations/llms/azureml_endpoint_example", "Serialization": "https://python.langchain.com/docs/modules/model_io/models/llms/llm_serialization"}, "AzureMLEndpointClient": {"AzureML Online Endpoint": "https://python.langchain.com/docs/integrations/llms/azureml_endpoint_example"}, "OpenLM": {"OpenLM": "https://python.langchain.com/docs/integrations/llms/openlm"}, "HuggingFaceTextGenInference": {"Huggingface TextGen Inference": "https://python.langchain.com/docs/integrations/llms/huggingface_textgen_inference"}, "ChatGLM": {"ChatGLM": "https://python.langchain.com/docs/integrations/llms/chatglm"}, "tool": {"JSONFormer": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental", "Agent Trajectory": "https://python.langchain.com/docs/modules/evaluation/trajectory/trajectory_eval", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools"}, "JsonFormer": {"JSONFormer": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental"}, "Replicate": {"Replicate": "https://python.langchain.com/docs/integrations/llms/replicate"}, "tracing_v2_enabled": {"LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough"}, "wait_for_all_tracers": {"LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough"}, "EvaluatorType": {"LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough", "Criteria Evaluation": "https://python.langchain.com/docs/guides/evaluation/string/criteria_eval_chain"}, "RunEvalConfig": {"LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough"}, "load_dataset": {"Question Answering Benchmarking: State of the Union Address": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_sota", "Question Answering Benchmarking: Paul Graham Essay": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_pg", "Evaluating an OpenAPI Chain": "https://python.langchain.com/docs/guides/evaluation/examples/openapi_eval", "Comparing Chain Outputs": "https://python.langchain.com/docs/guides/evaluation/examples/comparisons", "SQL Question Answering Benchmarking: Chinook": "https://python.langchain.com/docs/guides/evaluation/examples/sql_qa_benchmarking_chinook", "Agent VectorDB Question Answering Benchmarking": "https://python.langchain.com/docs/guides/evaluation/examples/agent_vectordb_sota_pg"}, "QAEvalChain": {"Question Answering Benchmarking: State of the Union Address": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_sota", "Question Answering Benchmarking: Paul Graham Essay": "https://python.langchain.com/docs/guides/evaluation/examples/qa_benchmarking_pg", "Data Augmented Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/data_augmented_question_answering", "SQL Question Answering Benchmarking: Chinook": "https://python.langchain.com/docs/guides/evaluation/examples/sql_qa_benchmarking_chinook", "Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/question_answering", "Agent VectorDB Question Answering Benchmarking": "https://python.langchain.com/docs/guides/evaluation/examples/agent_vectordb_sota_pg"}, "QAGenerationChain": {"QA Generation": "https://python.langchain.com/docs/guides/evaluation/examples/qa_generation"}, "OpenAPISpec": {"Evaluating an OpenAPI Chain": "https://python.langchain.com/docs/guides/evaluation/examples/openapi_eval", "OpenAPI chain": "https://python.langchain.com/docs/modules/chains/additional/openapi"}, "OpenAPIEndpointChain": {"Evaluating an OpenAPI Chain": "https://python.langchain.com/docs/guides/evaluation/examples/openapi_eval", "OpenAPI chain": "https://python.langchain.com/docs/modules/chains/additional/openapi"}, "QAGenerateChain": {"Data Augmented Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/data_augmented_question_answering"}, "load_evaluator": {"Comparing Chain Outputs": "https://python.langchain.com/docs/guides/evaluation/examples/comparisons", "Agent Trajectory": "https://python.langchain.com/docs/modules/evaluation/trajectory/trajectory_eval", "Pairwise Embedding Distance ": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_embedding_distance", "Pairwise String Comparison": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_string", "Criteria Evaluation": "https://python.langchain.com/docs/guides/evaluation/string/criteria_eval_chain", "QA Correctness": "https://python.langchain.com/docs/modules/evaluation/string/qa", "String Distance": "https://python.langchain.com/docs/modules/evaluation/string/string_distance", "Embedding Distance": "https://python.langchain.com/docs/modules/evaluation/string/embedding_distance", "Evaluating Custom Criteria": "https://python.langchain.com/docs/modules/evaluation/string/criteria_eval_chain"}, "ContextQAEvalChain": {"Question Answering": "https://python.langchain.com/docs/guides/evaluation/examples/question_answering"}, "AgentAction": {"Custom Trajectory Evaluator": "https://python.langchain.com/docs/modules/evaluation/trajectory/custom", "Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Wikibase Agent": "https://python.langchain.com/docs/use_cases/agents/wikibase_agent", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval", "Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks", "Custom multi-action agent": "https://python.langchain.com/docs/modules/agents/how_to/custom_multi_action_agent", "Custom agent": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent", "Custom agent with tool retrieval": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval"}, "AgentTrajectoryEvaluator": {"Custom Trajectory Evaluator": "https://python.langchain.com/docs/modules/evaluation/trajectory/custom"}, "EmbeddingDistance": {"Pairwise Embedding Distance ": "https://python.langchain.com/docs/modules/evaluation/comparison/pairwise_embedding_distance", "Embedding Distance": "https://python.langchain.com/docs/modules/evaluation/string/embedding_distance"}, "PairwiseStringEvaluator": {"Custom Pairwise Evaluator": "https://python.langchain.com/docs/modules/evaluation/comparison/custom"}, "Criteria": {"Criteria Evaluation": "https://python.langchain.com/docs/guides/evaluation/string/criteria_eval_chain"}, "SQL_PROMPT": {"QA Correctness": "https://python.langchain.com/docs/modules/evaluation/string/qa"}, "StringEvaluator": {"Custom String Evaluator": "https://python.langchain.com/docs/modules/evaluation/string/custom"}, "StringDistance": {"String Distance": "https://python.langchain.com/docs/modules/evaluation/string/string_distance"}, "WriteFileTool": {"AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times"}, "ReadFileTool": {"AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times"}, "AutoGPT": {"AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times"}, "FileChatMessageHistory": {"AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt"}, "BaseLLM": {"BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context"}, "VectorStore": {"BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent"}, "Chain": {"BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain"}, "BabyAGI": {"BabyAGI User Guide": "https://python.langchain.com/docs/use_cases/autonomous_agents/baby_agi", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/autonomous_agents/baby_agi_with_agent"}, "ZeroShotAgent": {"BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "Adding Message Memory backed by a database to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db", "How to add Memory to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory", "Custom MRKL agent": "https://python.langchain.com/docs/modules/agents/how_to/custom_mrkl_agent", "Shared memory across agents and tools": "https://python.langchain.com/docs/modules/agents/how_to/sharedmemory_for_tools"}, "BaseTool": {"!pip install bs4": "https://python.langchain.com/docs/use_cases/autonomous_agents/marathon_times", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools", "Combine agents and vector stores": "https://python.langchain.com/docs/modules/agents/how_to/agent_vectorstore", "Custom functions with OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent"}, "MarkdownHeaderTextSplitter": {"Context aware text splitting and QA / Chat": "https://python.langchain.com/docs/use_cases/question_answering/document-context-aware-QA", "MarkdownHeaderTextSplitter": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/markdown_header_metadata"}, "MultiQueryRetriever": {"QA and Chat over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever"}, "StringPromptTemplate": {"Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Wikibase Agent": "https://python.langchain.com/docs/use_cases/agents/wikibase_agent", "SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval", "Custom agent with tool retrieval": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval", "Custom prompt template": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/custom_prompt_template"}, "AIPlugin": {"Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Custom Agent with PlugIn Retrieval": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval"}, "AgentOutputParser": {"SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base": "https://python.langchain.com/docs/use_cases/agents/sales_agent_with_context"}, "SteamshipImageGenerationTool": {"Multi-modal outputs: Image & Text": "https://python.langchain.com/docs/use_cases/multi_modal/image_agent"}, "RegexParser": {"Multi-Agent Simulated Environment: Petting Zoo": "https://python.langchain.com/docs/use_cases/agent_simulations/petting_zoo", "Multi-agent decentralized speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_bidding", "Multi-agent authoritarian speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_authoritarian", "Simulated Environment: Gymnasium": "https://python.langchain.com/docs/use_cases/agent_simulations/gymnasium"}, "TimeWeightedVectorStoreRetriever": {"Generative Agents in LangChain": "https://python.langchain.com/docs/use_cases/agent_simulations/characters"}, "PydanticOutputParser": {"MultiQueryRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever", "WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research", "Pydantic (JSON) parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/pydantic"}, "WebResearchRetriever": {"WebResearchRetriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/web_research"}, "TokenTextSplitter": {"Split by tokens ": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/split_by_token"}, "SentenceTransformersTokenTextSplitter": {"Split by tokens ": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/split_by_token"}, "NLTKTextSplitter": {"Split by tokens ": "https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/split_by_token"}, "StuffDocumentsChain": {"Lost in the middle: The problem with long contexts": "https://python.langchain.com/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "ChatMessageHistory": {"Adding Message Memory backed by a database to an Agent": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db"}, "BaseMemory": {"How to create a custom Memory class": "https://python.langchain.com/docs/modules/memory/custom_memory"}, "ConversationKGMemory": {"Conversation Knowledge Graph Memory": "https://python.langchain.com/docs/modules/memory/kg"}, "ConversationTokenBufferMemory": {"ConversationTokenBufferMemory": "https://python.langchain.com/docs/modules/memory/token_buffer"}, "SystemMessage": {"How to add Memory to an LLMChain": "https://python.langchain.com/docs/modules/memory/adding_memory", "Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions", "Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "ConversationSummaryBufferMemory": {"ConversationSummaryBufferMemory": "https://python.langchain.com/docs/modules/memory/summary_buffer"}, "BaseCallbackHandler": {"Custom callback handlers": "https://python.langchain.com/docs/modules/callbacks/custom_callbacks", "Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks", "Streaming final agent output": "https://python.langchain.com/docs/modules/agents/how_to/streaming_stdout_final_only"}, "tracing_enabled": {"Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks"}, "get_openai_callback": {"Token counting": "https://python.langchain.com/docs/modules/callbacks/token_counting", "Tracking token usage": "https://python.langchain.com/docs/modules/model_io/models/llms/token_usage_tracking"}, "FileCallbackHandler": {"Logging to file": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler"}, "LLMResult": {"Async callbacks": "https://python.langchain.com/docs/modules/callbacks/async_callbacks"}, "AsyncCallbackHandler": {"Async callbacks": "https://python.langchain.com/docs/modules/callbacks/async_callbacks"}, "StructuredTool": {"Multi-Input Tools": "https://python.langchain.com/docs/modules/agents/tools/multi_input_tool", "Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools"}, "ToolException": {"Defining Custom Tools": "https://python.langchain.com/docs/modules/agents/tools/custom_tools"}, "MoveFileTool": {"Tools as OpenAI Functions": "https://python.langchain.com/docs/modules/agents/tools/tools_as_openai_functions"}, "RequestsGetTool": {"Tool Input Schema": "https://python.langchain.com/docs/modules/agents/tools/tool_input_validation"}, "HumanApprovalCallbackHandler": {"Human-in-the-loop Tool Validation": "https://python.langchain.com/docs/modules/agents/tools/human_approval"}, "DocstoreExplorer": {"ReAct document store": "https://python.langchain.com/docs/modules/agents/agent_types/react_docstore"}, "AgentFinish": {"Running Agent as an Iterator": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter"}, "MessagesPlaceholder": {"Add Memory to OpenAI Functions Agent": "https://python.langchain.com/docs/modules/agents/how_to/add_memory_openai_functions", "Types of `MessagePromptTemplate`": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/msg_prompt_templates"}, "LangChainTracer": {"Async API": "https://python.langchain.com/docs/modules/agents/how_to/async_agent"}, "HumanInputChatModel": {"Human input Chat Model": "https://python.langchain.com/docs/modules/model_io/models/chat/human_input_chat_model"}, "FakeListLLM": {"Fake LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/fake_llm"}, "CallbackManagerForLLMRun": {"Custom LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/custom_llm"}, "LLM": {"Custom LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/custom_llm"}, "HumanInputLLM": {"Human input LLM": "https://python.langchain.com/docs/modules/model_io/models/llms/human_input_llm"}, "RetryWithErrorOutputParser": {"Retry parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/retry"}, "EnumOutputParser": {"Enum parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/enum"}, "DatetimeOutputParser": {"Datetime parser": "https://python.langchain.com/docs/modules/model_io/output_parsers/datetime"}, "FewShotPromptTemplate": {"Select by maximal marginal relevance (MMR)": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/mmr", "Select by n-gram overlap": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/ngram_overlap"}, "BaseExampleSelector": {"Custom example selector": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/custom_example_selector"}, "NGramOverlapExampleSelector": {"Select by n-gram overlap": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/ngram_overlap"}, "load_prompt": {"Serialization": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompt_serialization"}, "ChatMessagePromptTemplate": {"Types of `MessagePromptTemplate`": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/msg_prompt_templates"}, "CriteriaEvalChain": {"Evaluating Custom Criteria": "https://python.langchain.com/docs/modules/evaluation/string/criteria_eval_chain"}, "MultiPromptChain": {"Router": "https://python.langchain.com/docs/modules/chains/foundational/router"}, "LLMRouterChain": {"Router": "https://python.langchain.com/docs/modules/chains/foundational/router"}, "EmbeddingRouterChain": {"Router": "https://python.langchain.com/docs/modules/chains/foundational/router"}, "BasePromptTemplate": {"Custom chain": "https://python.langchain.com/docs/modules/chains/how_to/custom_chain"}, "load_chain": {"Serialization": "https://python.langchain.com/docs/modules/chains/how_to/serialization", "Loading from LangChainHub": "https://python.langchain.com/docs/modules/chains/how_to/from_hub"}, "NeptuneGraph": {"Neptune Open Cypher QA Chain": "https://python.langchain.com/docs/modules/chains/additional/neptune_cypher_qa"}, "NeptuneOpenCypherQAChain": {"Neptune Open Cypher QA Chain": "https://python.langchain.com/docs/modules/chains/additional/neptune_cypher_qa"}, "LLMBashChain": {"Bash chain": "https://python.langchain.com/docs/modules/chains/additional/llm_bash"}, "BashOutputParser": {"Bash chain": "https://python.langchain.com/docs/modules/chains/additional/llm_bash"}, "BashProcess": {"Bash chain": "https://python.langchain.com/docs/modules/chains/additional/llm_bash"}, "NebulaGraphQAChain": {"NebulaGraphQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa"}, "NebulaGraph": {"NebulaGraphQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa"}, "PALChain": {"Causal program-aided language (CPAL) chain": "https://python.langchain.com/docs/modules/chains/additional/cpal", "Program-aided language model (PAL) chain": "https://python.langchain.com/docs/modules/chains/additional/pal"}, "ElasticsearchDatabaseChain": {"Elasticsearch database": "https://python.langchain.com/docs/modules/chains/additional/elasticsearch_database"}, "get_openapi_chain": {"OpenAPI calls with OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openapi_openai"}, "create_extraction_chain": {"Extraction": "https://python.langchain.com/docs/modules/chains/additional/extraction"}, "LLMSummarizationCheckerChain": {"Summarization checker chain": "https://python.langchain.com/docs/modules/chains/additional/llm_summarization_checker"}, "KuzuGraph": {"KuzuQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_kuzu_qa"}, "KuzuQAChain": {"KuzuQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_kuzu_qa"}, "create_qa_with_sources_chain": {"Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "create_qa_with_structure_chain": {"Retrieval QA using OpenAI functions": "https://python.langchain.com/docs/modules/chains/additional/openai_functions_retrieval_qa"}, "HugeGraphQAChain": {"HugeGraph QA Chain": "https://python.langchain.com/docs/modules/chains/additional/graph_hugegraph_qa"}, "HugeGraph": {"HugeGraph QA Chain": "https://python.langchain.com/docs/modules/chains/additional/graph_hugegraph_qa"}, "GraphSparqlQAChain": {"GraphSparqlQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_sparql_qa"}, "RdfGraph": {"GraphSparqlQAChain": "https://python.langchain.com/docs/modules/chains/additional/graph_sparql_qa"}, "LLMRequestsChain": {"HTTP request chain": "https://python.langchain.com/docs/modules/chains/additional/llm_requests"}, "LLMSymbolicMathChain": {"LLM Symbolic Math ": "https://python.langchain.com/docs/modules/chains/additional/llm_symbolic_math"}, "create_citation_fuzzy_match_chain": {"Question-Answering Citations": "https://python.langchain.com/docs/modules/chains/additional/qa_citations"}, "BaseRetriever": {"FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare"}, "FlareChain": {"FLARE": "https://python.langchain.com/docs/modules/chains/additional/flare"}, "ArangoGraph": {"ArangoDB QA chain": "https://python.langchain.com/docs/modules/chains/additional/graph_arangodb_qa"}, "ArangoGraphQAChain": {"ArangoDB QA chain": "https://python.langchain.com/docs/modules/chains/additional/graph_arangodb_qa"}, "GraphIndexCreator": {"Graph QA": "https://python.langchain.com/docs/modules/chains/additional/graph_qa"}, "GraphQAChain": {"Graph QA": "https://python.langchain.com/docs/modules/chains/additional/graph_qa"}, "NetworkxEntityGraph": {"Graph QA": "https://python.langchain.com/docs/modules/chains/additional/graph_qa"}, "LLMCheckerChain": {"Self-checking chain": "https://python.langchain.com/docs/modules/chains/additional/llm_checker"}, "GraphCypherQAChain": {"Graph DB QA chain": "https://python.langchain.com/docs/modules/chains/additional/graph_cypher_qa"}, "Neo4jGraph": {"Graph DB QA chain": "https://python.langchain.com/docs/modules/chains/additional/graph_cypher_qa"}} \ No newline at end of file diff --git a/docs/docs_skeleton/docs/guides/evaluation/trajectory/index.mdx b/docs/docs_skeleton/docs/guides/evaluation/trajectory/index.mdx index 2c2d2b2325409..7deadd808ca61 100644 --- a/docs/docs_skeleton/docs/guides/evaluation/trajectory/index.mdx +++ b/docs/docs_skeleton/docs/guides/evaluation/trajectory/index.mdx @@ -1,8 +1,31 @@ --- sidebar_position: 4 +sidebar_label: Trajectory Evaluators --- # Trajectory Evaluators +Trajectory Evaluators in LangChain provide a holistic approach to evaluating an agent. These evaluators assess the full sequence of actions taken by an agent and their corresponding responses, which we refer to as the "trajectory". This comprehensive approach allows you to better gauge an agent's effectiveness and capabilities. + +A Trajectory Evaluator implements the `AgentTrajectoryEvaluator` interface, which requires two main methods: + +- `evaluate_agent_trajectory`: This method synchronously evaluates an agent's trajectory. +- `aevaluate_agent_trajectory`: This asynchronous counterpart allows evaluations to be run in parallel for efficiency. + +Both methods accept three main parameters: + +- `input`: The initial input given to the agent. +- `prediction`: The final predicted response from the agent. +- `agent_trajectory`: The intermediate steps taken by the agent, given as a list of tuples. + +These methods return a dictionary with a `score` (a float from 0 to 1 indicating the effectiveness of the agent, where 1 means "most effective" and 0 means "least effective") and `reasoning` (a string explaining the reasoning behind the score). + +You can capture an agent's trajectory by initializing the agent with the `return_intermediate_steps=True` parameter. This lets you collect all intermediate steps without relying on special tracing callbacks. + +Additionally, you can specify the list of valid tools an agent is permitted to use via the `agent_tools` argument. + +For a deeper dive into the implementation and use of Trajectory Evaluators, refer to the sections below. + import DocCardList from "@theme/DocCardList"; - \ No newline at end of file + + From 6a8ab1f52103120b6151f1286022bd9d0f25f554 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Wed, 26 Jul 2023 17:40:38 -0700 Subject: [PATCH 15/15] update --- .../docs/guides/evaluation/trajectory/index.mdx | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/docs/docs_skeleton/docs/guides/evaluation/trajectory/index.mdx b/docs/docs_skeleton/docs/guides/evaluation/trajectory/index.mdx index 7deadd808ca61..825fd630672b8 100644 --- a/docs/docs_skeleton/docs/guides/evaluation/trajectory/index.mdx +++ b/docs/docs_skeleton/docs/guides/evaluation/trajectory/index.mdx @@ -1,10 +1,9 @@ --- sidebar_position: 4 -sidebar_label: Trajectory Evaluators --- # Trajectory Evaluators -Trajectory Evaluators in LangChain provide a holistic approach to evaluating an agent. These evaluators assess the full sequence of actions taken by an agent and their corresponding responses, which we refer to as the "trajectory". This comprehensive approach allows you to better gauge an agent's effectiveness and capabilities. +Trajectory Evaluators in LangChain provide a more holistic approach to evaluating an agent. These evaluators assess the full sequence of actions taken by an agent and their corresponding responses, which we refer to as the "trajectory". This allows you to better measure an agent's effectiveness and capabilities. A Trajectory Evaluator implements the `AgentTrajectoryEvaluator` interface, which requires two main methods: @@ -17,11 +16,9 @@ Both methods accept three main parameters: - `prediction`: The final predicted response from the agent. - `agent_trajectory`: The intermediate steps taken by the agent, given as a list of tuples. -These methods return a dictionary with a `score` (a float from 0 to 1 indicating the effectiveness of the agent, where 1 means "most effective" and 0 means "least effective") and `reasoning` (a string explaining the reasoning behind the score). +These methods return a dictionary. It is recommended that custom implementations return a `score` (a float indicating the effectiveness of the agent) and `reasoning` (a string explaining the reasoning behind the score). -You can capture an agent's trajectory by initializing the agent with the `return_intermediate_steps=True` parameter. This lets you collect all intermediate steps without relying on special tracing callbacks. - -Additionally, you can specify the list of valid tools an agent is permitted to use via the `agent_tools` argument. +You can capture an agent's trajectory by initializing the agent with the `return_intermediate_steps=True` parameter. This lets you collect all intermediate steps without relying on special callbacks. For a deeper dive into the implementation and use of Trajectory Evaluators, refer to the sections below.