-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmakechain.ts
72 lines (56 loc) · 2.4 KB
/
makechain.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import "dotenv/config";
import { ChatOpenAI } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { RunnableSequence } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { VectorStoreRetriever } from "@langchain/core/vectorstores";
const CONDENSE_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
<chat_history>
{chat_history}
</chat_history>
Follow Up Input: {question}
Standalone question:`;
const QA_TEMPLATE = `You are an expert researcher. Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say you don't know. DO NOT try to make up an answer.
If the question is not related to the context or chat history, politely respond that you are tuned to only answer questions that are related to the context.
<context>
{context}
</context>
<chat_history>
{chat_history}
</chat_history>
Question: {question}
Helpful answer in markdown:`;
const combineDocumentsFn = (docs: Document[], separator = "\n\n") => {
const serializedDocs = docs.map((doc) => doc.pageContent);
return serializedDocs.join(separator);
};
export const makechain = (retriever: VectorStoreRetriever) => {
const condenseQuestionPrompt = ChatPromptTemplate.fromTemplate(CONDENSE_TEMPLATE);
const answerPrompt = ChatPromptTemplate.fromTemplate(QA_TEMPLATE);
const model = new ChatOpenAI({
temperature: 0.5, // increase temperature to get more creative answers
modelName: "gpt-3.5-turbo", //change this to gpt-4 if you have access
});
const standaloneQuestionChain = RunnableSequence.from([condenseQuestionPrompt, model, new StringOutputParser()]);
const retrievalChain = retriever.pipe(combineDocumentsFn as any);
const answerChain = RunnableSequence.from([
{
question: (input) => input.question,
chat_history: (input) => input.chat_history,
context: RunnableSequence.from([(input) => input.question, retrievalChain]),
},
answerPrompt,
model,
new StringOutputParser(),
]);
const conversationalRetrievalQAChain = RunnableSequence.from([
{
question: standaloneQuestionChain,
chat_history: (input) => input.chat_history,
},
answerChain,
]);
return conversationalRetrievalQAChain;
};