diff --git a/langchain/src/agents/tests/agent.int.test.ts b/langchain/src/agents/tests/agent.int.test.ts index 063877c197f8..a7be55be7606 100644 --- a/langchain/src/agents/tests/agent.int.test.ts +++ b/langchain/src/agents/tests/agent.int.test.ts @@ -46,12 +46,12 @@ test("Pass runnable to agent executor", async () => { input: "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?", }); - console.log( - { - res, - }, - "Pass runnable to agent executor" - ); + // console.log( + // { + // res, + // }, + // "Pass runnable to agent executor" + // ); expect(res.output).not.toEqual(""); expect(res.output).not.toEqual("Agent stopped due to max iterations."); }); @@ -115,12 +115,12 @@ test("Custom output parser", async () => { input: "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?", }); - console.log( - { - res, - }, - "Custom output parser" - ); + // console.log( + // { + // res, + // }, + // "Custom output parser" + // ); expect(res.output).toEqual("We did it!"); }); @@ -160,12 +160,12 @@ test("Add a fallback method", async () => { const res = await executor.invoke({ input: "Is the sky blue? Response with a concise answer", }); - console.log( - { - res, - }, - "Pass runnable to agent executor" - ); + // console.log( + // { + // res, + // }, + // "Pass runnable to agent executor" + // ); expect(res.output).not.toEqual(""); expect(res.output).not.toEqual("Agent stopped due to max iterations."); }); @@ -177,10 +177,10 @@ test("Run agent with an abort signal", async () => { const executor = await initializeAgentExecutorWithOptions(tools, model, { agentType: "zero-shot-react-description", }); - console.log("Loaded agent."); + // console.log("Loaded agent."); const input = `What is 3 to the fourth power?`; - console.log(`Executing with input "${input}"...`); + // console.log(`Executing with input "${input}"...`); const controller = new AbortController(); await expect(() => { @@ -208,7 +208,7 @@ test("Run agent with incorrect api key should throw error", async () => { const executor = await initializeAgentExecutorWithOptions(tools, model, { agentType: "zero-shot-react-description", }); - console.log("Loaded agent."); + // console.log("Loaded agent."); const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`; @@ -246,18 +246,18 @@ test("Run tool web-browser", async () => { agentType: "zero-shot-react-description", returnIntermediateSteps: true, }); - console.log("Loaded agent."); + // console.log("Loaded agent."); const input = `What is the word of the day on merriam webster`; - console.log(`Executing with input "${input}"...`); + // console.log(`Executing with input "${input}"...`); const result = await executor.call({ input }); - console.log( - { - result, - }, - "Run tool web-browser" - ); + // console.log( + // { + // result, + // }, + // "Run tool web-browser" + // ); expect(result.intermediateSteps.length).toBeGreaterThanOrEqual(1); expect(result.intermediateSteps[0].action.tool).toEqual("search"); expect(result.intermediateSteps[1].action.tool).toEqual("web-browser"); @@ -280,10 +280,10 @@ test("Agent can stream", async () => { agentType: "zero-shot-react-description", returnIntermediateSteps: false, }); - console.log("Loaded agent."); + // console.log("Loaded agent."); const input = `What is the word of the day on merriam webster`; - console.log(`Executing with input "${input}"...`); + // console.log(`Executing with input "${input}"...`); const result = await executor.stream({ input }); let streamIters = 0; @@ -291,7 +291,7 @@ test("Agent can stream", async () => { const finalResponse: any = []; for await (const item of result) { streamIters += 1; - console.log("Stream item:", item); + // console.log("Stream item:", item); // each stream does NOT contain the previous steps, // because returnIntermediateSteps is false so we // push each new stream item to the array. @@ -340,10 +340,10 @@ test("Agent can stream with chat messages", async () => { returnIntermediateSteps: true, memory, }); - console.log("Loaded agent."); + // console.log("Loaded agent."); const input = `What is the word of the day on merriam webster, and what is the sum of all letter indices (relative to the english alphabet) in the word?`; - console.log(`Executing with input "${input}"...`); + // console.log(`Executing with input "${input}"...`); const result = await executor.stream({ input, chat_history: [] }); let streamIters = 0; @@ -351,14 +351,14 @@ test("Agent can stream with chat messages", async () => { let finalResponse: any; for await (const item of result) { streamIters += 1; - console.log("Stream item:", item); + // console.log("Stream item:", item); // each stream contains the previous steps // because returnIntermediateSteps is true), // so we can overwrite on each stream. finalResponse = item; } - console.log("__finalResponse__", finalResponse); + // console.log("__finalResponse__", finalResponse); expect("intermediateSteps" in finalResponse).toBeTruthy(); expect("output" in finalResponse).toBeTruthy(); diff --git a/langchain/src/agents/tests/create_openai_functions_agent.int.test.ts b/langchain/src/agents/tests/create_openai_functions_agent.int.test.ts index e3def88fbd87..471419f1b8d8 100644 --- a/langchain/src/agents/tests/create_openai_functions_agent.int.test.ts +++ b/langchain/src/agents/tests/create_openai_functions_agent.int.test.ts @@ -31,7 +31,7 @@ test("createOpenAIFunctionsAgent works", async () => { input, }); - console.log(result); + // console.log(result); expect(result.input).toBe(input); expect(typeof result.output).toBe("string"); @@ -69,7 +69,7 @@ test("createOpenAIFunctionsAgent can stream log", async () => { if (!firstChunkTime) { firstChunkTime = new Date().getTime(); } - console.log(chunk); + // console.log(chunk); chunks.push(chunk); } @@ -77,13 +77,13 @@ test("createOpenAIFunctionsAgent can stream log", async () => { throw new Error("firstChunkTime was not set."); } - console.log(chunks.length); - console.log(); - console.log( - "Time to complete after first chunk:", - new Date().getTime() - firstChunkTime - ); + // console.log(chunks.length); + // console.log(); + // console.log( + // "Time to complete after first chunk:", + // new Date().getTime() - firstChunkTime + // ); - console.log(chunks.length); + // console.log(chunks.length); expect(chunks.length).toBeGreaterThan(1); }); diff --git a/langchain/src/agents/tests/create_openai_tools_agent.int.test.ts b/langchain/src/agents/tests/create_openai_tools_agent.int.test.ts index f1e134a41040..472e60a28d17 100644 --- a/langchain/src/agents/tests/create_openai_tools_agent.int.test.ts +++ b/langchain/src/agents/tests/create_openai_tools_agent.int.test.ts @@ -31,7 +31,7 @@ test("createOpenAIToolsAgent works", async () => { input, }); - console.log(result); + // console.log(result); expect(result.input).toBe(input); expect(typeof result.output).toBe("string"); @@ -63,8 +63,10 @@ test.skip("createOpenAIToolsAgent tracing works when it is nested in a lambda", const noop = RunnableLambda.from(() => "hi").withConfig({ runName: "nested_testing", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const noopRes = await noop.invoke({ nested: "nested" }); - console.log(noopRes); + // console.log(noopRes); const res = await agentExecutor.invoke({ input, }); @@ -76,7 +78,7 @@ test.skip("createOpenAIToolsAgent tracing works when it is nested in a lambda", callbacks: [new LangChainTracer({ projectName: "langchainjs-tracing-2" })], }); - console.log(result); + // console.log(result); expect(result.input).toBe(input); expect(typeof result.output).toBe("string"); diff --git a/langchain/src/agents/tests/create_react_agent.int.test.ts b/langchain/src/agents/tests/create_react_agent.int.test.ts index bdc2fbe1bfae..6621c4a4f042 100644 --- a/langchain/src/agents/tests/create_react_agent.int.test.ts +++ b/langchain/src/agents/tests/create_react_agent.int.test.ts @@ -27,7 +27,7 @@ test("createReactAgent works", async () => { input, }); - console.log(result); + // console.log(result); expect(result.input).toBe(input); expect(typeof result.output).toBe("string"); diff --git a/langchain/src/agents/tests/create_structured_chat_agent.int.test.ts b/langchain/src/agents/tests/create_structured_chat_agent.int.test.ts index f10c3b3023ae..d1fd1ae895e5 100644 --- a/langchain/src/agents/tests/create_structured_chat_agent.int.test.ts +++ b/langchain/src/agents/tests/create_structured_chat_agent.int.test.ts @@ -29,7 +29,7 @@ test("createStructuredChatAgent works", async () => { input, }); - console.log(result); + // console.log(result); expect(result.input).toBe(input); expect(typeof result.output).toBe("string"); diff --git a/langchain/src/agents/tests/create_tool_calling_agent.int.test.ts b/langchain/src/agents/tests/create_tool_calling_agent.int.test.ts index fabca7b77f31..08a4397ead4b 100644 --- a/langchain/src/agents/tests/create_tool_calling_agent.int.test.ts +++ b/langchain/src/agents/tests/create_tool_calling_agent.int.test.ts @@ -50,7 +50,7 @@ test("createToolCallingAgent works", async () => { input, }); - console.log(result); + // console.log(result); expect(result.input).toBe(input); expect(typeof result.output).toBe("string"); @@ -91,9 +91,9 @@ test("createToolCallingAgent stream events works", async () => { for await (const event of eventStream) { const eventType = event.event; - console.log("Event type: ", eventType); + // console.log("Event type: ", eventType); if (eventType === "on_chat_model_stream") { - console.log("Content: ", event.data); + // console.log("Content: ", event.data); } } }); @@ -132,9 +132,9 @@ test("createToolCallingAgent stream events works for multiple turns", async () = for await (const event of eventStream) { const eventType = event.event; - console.log("Event type: ", eventType); + // console.log("Event type: ", eventType); if (eventType === "on_chat_model_stream") { - console.log("Content: ", event.data); + // console.log("Content: ", event.data); } } }); diff --git a/langchain/src/agents/tests/create_xml_agent.int.test.ts b/langchain/src/agents/tests/create_xml_agent.int.test.ts index a336207ccab5..3f54a09b3b29 100644 --- a/langchain/src/agents/tests/create_xml_agent.int.test.ts +++ b/langchain/src/agents/tests/create_xml_agent.int.test.ts @@ -27,7 +27,7 @@ test("createXmlAgent works", async () => { input, }); - console.log(result); + // console.log(result); expect(result.input).toBe(input); expect(typeof result.output).toBe("string"); diff --git a/langchain/src/agents/tests/react.test.ts b/langchain/src/agents/tests/react.test.ts index b26eb6ed6d27..e5ed69e79cd1 100644 --- a/langchain/src/agents/tests/react.test.ts +++ b/langchain/src/agents/tests/react.test.ts @@ -9,7 +9,7 @@ test("ReActSingleInputOutputParser identifies final answer", async () => { }); const parsedOutput = await outputParser.parse(finalAnswerText); - console.log(parsedOutput); + // console.log(parsedOutput); expect(parsedOutput).toHaveProperty("returnValues"); expect( "returnValues" in parsedOutput && parsedOutput.returnValues.output @@ -28,7 +28,7 @@ test("ReActSingleInputOutputParser identifies agent actions", async () => { }); const parsedOutput = await outputParser.parse(finalAnswerText); - console.log(parsedOutput); + // console.log(parsedOutput); expect(parsedOutput).toHaveProperty("toolInput"); expect(parsedOutput).toHaveProperty("tool"); }); diff --git a/langchain/src/agents/tests/runnable.int.test.ts b/langchain/src/agents/tests/runnable.int.test.ts index 2c3cfba5fa1e..aceb973c59b1 100644 --- a/langchain/src/agents/tests/runnable.int.test.ts +++ b/langchain/src/agents/tests/runnable.int.test.ts @@ -58,14 +58,16 @@ test("Runnable variant", async () => { tools, }); - console.log("Loaded agent executor"); + // console.log("Loaded agent executor"); const query = "What is the weather in New York?"; - console.log(`Calling agent executor with query: ${query}`); + // console.log(`Calling agent executor with query: ${query}`); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await executor.invoke({ input: query, }); - console.log(result); + // console.log(result); }); test("Runnable variant executor astream log", async () => { @@ -112,16 +114,16 @@ test("Runnable variant executor astream log", async () => { tools, }); - console.log("Loaded agent executor"); + // console.log("Loaded agent executor"); const query = "What is the weather in New York?"; - console.log(`Calling agent executor with query: ${query}`); + // console.log(`Calling agent executor with query: ${query}`); const stream = await executor.streamLog({ input: query, }); let hasSeenLLMLogPatch = false; for await (const chunk of stream) { - console.log(JSON.stringify(chunk)); + // console.log(JSON.stringify(chunk)); if (chunk.ops[0].path.includes("ChatOpenAI")) { hasSeenLLMLogPatch = true; } diff --git a/langchain/src/agents/tests/structured_output_runnables.int.test.ts b/langchain/src/agents/tests/structured_output_runnables.int.test.ts index 819601a555b5..30c18b59c4df 100644 --- a/langchain/src/agents/tests/structured_output_runnables.int.test.ts +++ b/langchain/src/agents/tests/structured_output_runnables.int.test.ts @@ -30,7 +30,7 @@ const structuredOutputParser = ( const functionCall = output.additional_kwargs.function_call; const name = functionCall?.name as string; const inputs = functionCall?.arguments as string; - console.log(functionCall); + // console.log(functionCall); const jsonInput = JSON.parse(inputs); @@ -113,12 +113,14 @@ test("Pass custom structured output parsers", async () => { tools: [retrieverTool], }); /** Call invoke on the agent */ + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await executor.invoke({ input: "what did the president say about kentaji brown jackson", }); - console.log({ - res, - }); + // console.log({ + // res, + // }); /** { res: { diff --git a/langchain/src/agents/toolkits/tests/conversational_retrieval.int.test.ts b/langchain/src/agents/toolkits/tests/conversational_retrieval.int.test.ts index 972580874271..c904798e041c 100644 --- a/langchain/src/agents/toolkits/tests/conversational_retrieval.int.test.ts +++ b/langchain/src/agents/toolkits/tests/conversational_retrieval.int.test.ts @@ -27,20 +27,28 @@ test("Test ConversationalRetrievalAgent", async () => { const executor = await createConversationalRetrievalAgent(llm, tools, { verbose: true, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await executor.invoke({ input: "Hi, I'm Bob!", }); - console.log(result); + // console.log(result); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result2 = await executor.invoke({ input: "What's my name?", }); - console.log(result2); + // console.log(result2); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result3 = await executor.invoke({ input: "How much money did LangCo make in July?", }); - console.log(result3); + // console.log(result3); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result4 = await executor.invoke({ input: "How about in August?", }); - console.log(result4); + // console.log(result4); }); diff --git a/langchain/src/cache/tests/file_system.int.test.ts b/langchain/src/cache/tests/file_system.int.test.ts index cba25513c272..40a176dcdae5 100644 --- a/langchain/src/cache/tests/file_system.int.test.ts +++ b/langchain/src/cache/tests/file_system.int.test.ts @@ -35,7 +35,7 @@ describe("Test LocalFileCache", () => { const response1 = await model.invoke("What is something random?"); const response2 = await model.invoke("What is something random?"); expect(response1).not.toBeUndefined(); - console.log(response1, response2); + // console.log(response1, response2); expect(response1).toEqual(response2); }); }); diff --git a/langchain/src/chains/openai_functions/tests/create_runnable_chains.int.test.ts b/langchain/src/chains/openai_functions/tests/create_runnable_chains.int.test.ts index 2cd4a26bc292..d1ec025a75e9 100644 --- a/langchain/src/chains/openai_functions/tests/create_runnable_chains.int.test.ts +++ b/langchain/src/chains/openai_functions/tests/create_runnable_chains.int.test.ts @@ -90,7 +90,7 @@ test("createStructuredOutputRunnable works with Zod", async () => { description: "My name's John Doe and I'm 30 years old. My favorite kind of food are chocolate chip cookies.", }); - console.log(response); + // console.log(response); expect("person" in response).toBe(true); expect("name" in response.person).toBe(true); expect("age" in response.person).toBe(true); @@ -118,7 +118,7 @@ test("createStructuredOutputRunnable works with JSON schema", async () => { description: "My name's John Doe and I'm 30 years old. My favorite kind of food are chocolate chip cookies.", }); - console.log(response); + // console.log(response); expect("name" in response).toBe(true); expect("age" in response).toBe(true); }); @@ -145,7 +145,7 @@ test("createOpenAIFnRunnable works", async () => { description: "My name's John Doe and I'm 30 years old. My favorite kind of food are chocolate chip cookies.", }); - console.log(response); + // console.log(response); expect("name" in response).toBe(true); expect("age" in response).toBe(true); }); @@ -171,7 +171,7 @@ test("createOpenAIFnRunnable works with multiple functions", async () => { const response = await runnable.invoke({ question: "What's the weather like in Berkeley CA?", }); - console.log(response); + // console.log(response); expect("state" in response).toBe(true); expect("city" in response).toBe(true); }); diff --git a/langchain/src/chains/openai_functions/tests/openapi.int.test.ts b/langchain/src/chains/openai_functions/tests/openapi.int.test.ts index a8f3bccc9b39..47b9c1257c9b 100644 --- a/langchain/src/chains/openai_functions/tests/openapi.int.test.ts +++ b/langchain/src/chains/openai_functions/tests/openapi.int.test.ts @@ -105,10 +105,12 @@ test("OpenAPI chain with a provided full spec", async () => { { llm: new ChatOpenAI({ modelName: "gpt-4-0613", temperature: 0 }) } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await chain.run( `What are some options for a men's large blue button down shirt` ); - console.log(result); + // console.log(result); }); test("OpenAPI chain with yml spec from a URL", async () => { @@ -118,8 +120,10 @@ test("OpenAPI chain with yml spec from a URL", async () => { llm: new ChatOpenAI({ modelName: "gpt-4-0613", temperature: 0 }), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await chain.run(`What's today's comic?`); - console.log(result); + // console.log(result); }); test("OpenAPI chain with yml spec from a URL with a path parameter", async () => { @@ -129,16 +133,20 @@ test("OpenAPI chain with yml spec from a URL with a path parameter", async () => llm: new ChatOpenAI({ modelName: "gpt-4-0613", temperature: 0 }), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await chain.run(`What comic has id 2184?`); - console.log(result); + // console.log(result); }); test("OpenAPI chain with yml spec from a URL requiring a POST request", async () => { const chain = await createOpenAPIChain("https://api.speak.com/openapi.yaml", { llm: new ChatOpenAI({ modelName: "gpt-4-0613", temperature: 0 }), }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await chain.run(`How would you say no thanks in Russian?`); - console.log(result); + // console.log(result); }); test("OpenAPI chain with a longer spec and tricky query required params", async () => { @@ -150,8 +158,10 @@ test("OpenAPI chain with a longer spec and tricky query required params", async }, } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await chain.run( "Can you find and explain some articles about the intersection of AI and VR?" ); - console.log(result); + // console.log(result); }); diff --git a/langchain/src/chains/openai_functions/tests/structured_output.int.test.ts b/langchain/src/chains/openai_functions/tests/structured_output.int.test.ts index 73e2860b0d70..7574d3959a86 100644 --- a/langchain/src/chains/openai_functions/tests/structured_output.int.test.ts +++ b/langchain/src/chains/openai_functions/tests/structured_output.int.test.ts @@ -41,7 +41,7 @@ test("structured output chain", async () => { ); const response = await chain.call({ inputText: "A man, living in Poland." }); - console.log("response", response); + // console.log("response", response); expect(response.person).toHaveProperty("name"); expect(response.person).toHaveProperty("surname"); diff --git a/langchain/src/chains/openai_functions/tests/structured_output.test.ts b/langchain/src/chains/openai_functions/tests/structured_output.test.ts index c5bb69555062..340869b16f70 100644 --- a/langchain/src/chains/openai_functions/tests/structured_output.test.ts +++ b/langchain/src/chains/openai_functions/tests/structured_output.test.ts @@ -52,7 +52,7 @@ test("structured output parser", async () => { }, ]); - console.log("result", result); + // console.log("result", result); expect(result.name).toEqual("Anna"); expect(result.surname).toEqual("Kowalska"); @@ -106,7 +106,7 @@ test("structured output parser with Zod input", async () => { }, ]); - console.log("result", result); + // console.log("result", result); expect(result.name).toEqual("Anna"); expect(result.surname).toEqual("Kowalska"); diff --git a/langchain/src/chains/question_answering/tests/load.int.test.ts b/langchain/src/chains/question_answering/tests/load.int.test.ts index 86d71935ffd2..c33902aa3cdc 100644 --- a/langchain/src/chains/question_answering/tests/load.int.test.ts +++ b/langchain/src/chains/question_answering/tests/load.int.test.ts @@ -15,8 +15,10 @@ test("Test loadQAStuffChain", async () => { new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Whats up" }); - console.log({ res }); + // console.log({ res }); }); test("Test loadQAMapReduceChain", async () => { @@ -27,8 +29,10 @@ test("Test loadQAMapReduceChain", async () => { new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Whats up" }); - console.log({ res }); + // console.log({ res }); }); test("Test loadQARefineChain", async () => { @@ -38,9 +42,11 @@ test("Test loadQARefineChain", async () => { new Document({ pageContent: "Harrison went to Harvard." }), new Document({ pageContent: "Ankush went to Princeton." }), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Where did Harrison go to college?", }); - console.log({ res }); + // console.log({ res }); }); diff --git a/langchain/src/chains/router/tests/multi_prompt.int.test.ts b/langchain/src/chains/router/tests/multi_prompt.int.test.ts index 37a0ba2f0816..5f66009a71a1 100644 --- a/langchain/src/chains/router/tests/multi_prompt.int.test.ts +++ b/langchain/src/chains/router/tests/multi_prompt.int.test.ts @@ -48,8 +48,10 @@ Here is a question: input: "Who was the first president of the United States?", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const [{ text: result1 }, { text: result2 }, { text: result3 }] = await Promise.all([testPromise1, testPromise2, testPromise3]); - console.log(result1, result2, result3); + // console.log(result1, result2, result3); }); diff --git a/langchain/src/chains/router/tests/multi_retrieval_qa.int.test.ts b/langchain/src/chains/router/tests/multi_retrieval_qa.int.test.ts index 7fea39cb2440..8e813704f6a2 100644 --- a/langchain/src/chains/router/tests/multi_retrieval_qa.int.test.ts +++ b/langchain/src/chains/router/tests/multi_retrieval_qa.int.test.ts @@ -87,11 +87,17 @@ test("Test MultiPromptChain", async () => { }); const [ + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var { text: result1, sourceDocuments: sourceDocuments1 }, + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var { text: result2, sourceDocuments: sourceDocuments2 }, + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var { text: result3, sourceDocuments: sourceDocuments3 }, ] = await Promise.all([testPromise1, testPromise2, testPromise3]); - console.log({ sourceDocuments1, sourceDocuments2, sourceDocuments3 }); - console.log({ result1, result2, result3 }); + // console.log({ sourceDocuments1, sourceDocuments2, sourceDocuments3 }); + // console.log({ result1, result2, result3 }); }); diff --git a/langchain/src/chains/summarization/tests/load.int.test.ts b/langchain/src/chains/summarization/tests/load.int.test.ts index 1a5dcf8e5cc0..fc59e67bf44e 100644 --- a/langchain/src/chains/summarization/tests/load.int.test.ts +++ b/langchain/src/chains/summarization/tests/load.int.test.ts @@ -11,8 +11,10 @@ test("Test loadSummzationChain stuff", async () => { new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Whats up" }); - console.log({ res }); + // console.log({ res }); }); test("Test loadSummarizationChain map_reduce", async () => { @@ -23,8 +25,10 @@ test("Test loadSummarizationChain map_reduce", async () => { new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Whats up" }); - console.log({ res }); + // console.log({ res }); }); test("Test loadSummarizationChain refine", async () => { @@ -35,6 +39,8 @@ test("Test loadSummarizationChain refine", async () => { new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Whats up" }); - console.log({ res }); + // console.log({ res }); }); diff --git a/langchain/src/chains/tests/api_chain.int.test.ts b/langchain/src/chains/tests/api_chain.int.test.ts index 8c420b5e71d5..ce71eaf51b9b 100644 --- a/langchain/src/chains/tests/api_chain.int.test.ts +++ b/langchain/src/chains/tests/api_chain.int.test.ts @@ -47,19 +47,23 @@ test("Test APIChain", async () => { }; const chain = new APIChain(apiChainInput); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ question: "Search for notes containing langchain", }); - console.log({ res }); + // console.log({ res }); }); test("Test APIChain fromLLMAndApiDocs", async () => { // This test doesn't work as well with earlier models const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const chain = APIChain.fromLLMAndAPIDocs(model, OPEN_METEO_DOCS); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ question: "What is the weather like right now in Munich, Germany in degrees Farenheit?", }); - console.log({ res }); + // console.log({ res }); }); diff --git a/langchain/src/chains/tests/combine_docs_chain.int.test.ts b/langchain/src/chains/tests/combine_docs_chain.int.test.ts index e9fffb206f31..56dbfbcc7049 100644 --- a/langchain/src/chains/tests/combine_docs_chain.int.test.ts +++ b/langchain/src/chains/tests/combine_docs_chain.int.test.ts @@ -17,8 +17,10 @@ test("Test StuffDocumentsChain", async () => { new Document({ pageContent: "bar" }), new Document({ pageContent: "baz" }), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.invoke({ context: docs }); - console.log({ res }); + // console.log({ res }); }); test("Test MapReduceDocumentsChain with QA chain", async () => { @@ -31,11 +33,13 @@ test("Test MapReduceDocumentsChain with QA chain", async () => { new Document({ pageContent: "harrison went to harvard" }), new Document({ pageContent: "ankush went to princeton" }), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ input_documents: docs, question: "Where did harrison go to college", }); - console.log({ res }); + // console.log({ res }); }); test("Test RefineDocumentsChain with QA chain", async () => { @@ -48,9 +52,11 @@ test("Test RefineDocumentsChain with QA chain", async () => { new Document({ pageContent: "harrison went to harvard" }), new Document({ pageContent: "ankush went to princeton" }), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.invoke({ input_documents: docs, question: "Where did harrison go to college", }); - console.log({ res }); + // console.log({ res }); }); diff --git a/langchain/src/chains/tests/combine_docs_chain.test.ts b/langchain/src/chains/tests/combine_docs_chain.test.ts index 12956080839f..b27722fafe1b 100644 --- a/langchain/src/chains/tests/combine_docs_chain.test.ts +++ b/langchain/src/chains/tests/combine_docs_chain.test.ts @@ -48,7 +48,7 @@ test("Test MapReduceDocumentsChain", async () => { input_documents: docs, question: "Where did harrison go to college", }); - console.log({ res }); + // console.log({ res }); expect(res).toEqual({ text: "a final answer", @@ -73,7 +73,7 @@ test("Test MapReduceDocumentsChain with content above maxTokens and intermediate input_documents: docs, question: "Is the letter c present in the document", }); - console.log({ res }); + // console.log({ res }); expect(res).toEqual({ text: "a final answer", @@ -93,6 +93,8 @@ test("Test RefineDocumentsChain", async () => { expect(chain.inputKeys).toEqual(["input_documents"]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.run(docs); - console.log({ res }); + // console.log({ res }); }); diff --git a/langchain/src/chains/tests/constitutional_chain.int.test.ts b/langchain/src/chains/tests/constitutional_chain.int.test.ts index f27fc9483d33..6a50b98b3427 100644 --- a/langchain/src/chains/tests/constitutional_chain.int.test.ts +++ b/langchain/src/chains/tests/constitutional_chain.int.test.ts @@ -27,8 +27,10 @@ test("Test ConstitutionalChain", async () => { ], }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await constitutionalChain.invoke({ question: "What is the meaning of life?", }); - console.log({ res }); + // console.log({ res }); }); diff --git a/langchain/src/chains/tests/conversation_chain.int.test.ts b/langchain/src/chains/tests/conversation_chain.int.test.ts index 2fa64ed25498..1319b67282c9 100644 --- a/langchain/src/chains/tests/conversation_chain.int.test.ts +++ b/langchain/src/chains/tests/conversation_chain.int.test.ts @@ -5,6 +5,8 @@ import { ConversationChain } from "../conversation.js"; test("Test ConversationChain", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); const chain = new ConversationChain({ llm: model }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ input: "my favorite color" }); - console.log({ res }); + // console.log({ res }); }); diff --git a/langchain/src/chains/tests/conversational_retrieval_chain.int.test.ts b/langchain/src/chains/tests/conversational_retrieval_chain.int.test.ts index 5bf2cdb1182e..85f750965e72 100644 --- a/langchain/src/chains/tests/conversational_retrieval_chain.int.test.ts +++ b/langchain/src/chains/tests/conversational_retrieval_chain.int.test.ts @@ -16,8 +16,10 @@ test("Test ConversationalRetrievalQAChain from LLM", async () => { model, vectorStore.asRetriever() ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ question: "foo", chat_history: "bar" }); - console.log({ res }); + // console.log({ res }); }); test("Test ConversationalRetrievalQAChain from LLM with flag option to return source", async () => { @@ -116,7 +118,7 @@ test("Test ConversationalRetrievalQAChain from LLM with override default prompts chat_history: "bar", }); expect(res.text).toContain("I am learning from Aliens"); - console.log({ res }); + // console.log({ res }); }); test("Test ConversationalRetrievalQAChain from LLM with a chat model", async () => { @@ -150,7 +152,7 @@ test("Test ConversationalRetrievalQAChain from LLM with a chat model", async () chat_history: "bar", }); expect(res.text).toContain("I am learning from Aliens"); - console.log({ res }); + // console.log({ res }); }); test("Test ConversationalRetrievalQAChain from LLM with a map reduce chain", async () => { @@ -173,12 +175,14 @@ test("Test ConversationalRetrievalQAChain from LLM with a map reduce chain", asy }, } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ question: "What is better programming Language Python or Javascript ", chat_history: "bar", }); - console.log({ res }); + // console.log({ res }); }); test("Test ConversationalRetrievalQAChain from LLM without memory", async () => { @@ -207,14 +211,16 @@ test("Test ConversationalRetrievalQAChain from LLM without memory", async () => chat_history: "", }); - console.log({ res }); + // console.log({ res }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res2 = await chain.call({ question: "What are they made out of?", chat_history: question + res.text, }); - console.log({ res2 }); + // console.log({ res2 }); }); test("Test ConversationalRetrievalQAChain from LLM with a chat model without memory", async () => { @@ -244,14 +250,16 @@ test("Test ConversationalRetrievalQAChain from LLM with a chat model without mem chat_history: "", }); - console.log({ res }); + // console.log({ res }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res2 = await chain.call({ question: "What are they made out of?", chat_history: question + res.text, }); - console.log({ res2 }); + // console.log({ res2 }); }); test("Test ConversationalRetrievalQAChain from LLM with memory", async () => { @@ -279,17 +287,21 @@ test("Test ConversationalRetrievalQAChain from LLM with memory", async () => { }), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ question: "What is the powerhouse of the cell?", }); - console.log({ res }); + // console.log({ res }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res2 = await chain.call({ question: "What are they made out of?", }); - console.log({ res2 }); + // console.log({ res2 }); }); test("Test ConversationalRetrievalQAChain from LLM with a chat model and memory", async () => { @@ -319,17 +331,21 @@ test("Test ConversationalRetrievalQAChain from LLM with a chat model and memory" }), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ question: "What is the powerhouse of the cell?", }); - console.log({ res }); + // console.log({ res }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res2 = await chain.call({ question: "What are they made out of?", }); - console.log({ res2 }); + // console.log({ res2 }); }); test("Test ConversationalRetrievalQAChain from LLM with deprecated history syntax", async () => { @@ -358,12 +374,14 @@ test("Test ConversationalRetrievalQAChain from LLM with deprecated history synta chat_history: [], }); - console.log({ res }); + // console.log({ res }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res2 = await chain.call({ question: "What are they made out of?", chat_history: [[question, res.text]], }); - console.log({ res2 }); + // console.log({ res2 }); }); diff --git a/langchain/src/chains/tests/llm_chain.int.test.ts b/langchain/src/chains/tests/llm_chain.int.test.ts index 77089d0de66c..75de16ca7925 100644 --- a/langchain/src/chains/tests/llm_chain.int.test.ts +++ b/langchain/src/chains/tests/llm_chain.int.test.ts @@ -15,8 +15,10 @@ test("Test OpenAI", async () => { inputVariables: ["foo"], }); const chain = new LLMChain({ prompt, llm: model }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ foo: "my favorite color" }); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with timeout", async () => { @@ -41,8 +43,10 @@ test("Test run method", async () => { inputVariables: ["foo"], }); const chain = new LLMChain({ prompt, llm: model }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.run("my favorite color"); - console.log({ res }); + // console.log({ res }); }); test("Test run method", async () => { @@ -56,8 +60,10 @@ test("Test run method", async () => { llm: model, memory: new BufferMemory(), }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.run("my favorite color"); - console.log({ res }); + // console.log({ res }); }); test("Test memory + cancellation", async () => { @@ -105,8 +111,10 @@ test("Test apply", async () => { inputVariables: ["foo"], }); const chain = new LLMChain({ prompt, llm: model }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.apply([{ foo: "my favorite color" }]); - console.log({ res }); + // console.log({ res }); }); test("Test LLMChain with ChatOpenAI", async () => { @@ -118,8 +126,10 @@ test("Test LLMChain with ChatOpenAI", async () => { humanMessagePrompt, ]); const chatChain = new LLMChain({ llm: model, prompt: chatPromptTemplate }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chatChain.call({ product: "colorful socks" }); - console.log({ res }); + // console.log({ res }); }); test("Test passing a runnable to an LLMChain", async () => { diff --git a/langchain/src/chains/tests/openai_moderation.int.test.ts b/langchain/src/chains/tests/openai_moderation.int.test.ts index b48c6718f6c7..8afe045d1698 100644 --- a/langchain/src/chains/tests/openai_moderation.int.test.ts +++ b/langchain/src/chains/tests/openai_moderation.int.test.ts @@ -45,7 +45,7 @@ test("OpenAI Moderation Test in non-english language", async () => { "La kato ( Felis catus ) estas hejma specio de malgranda karnovora mamulo." ); - console.log(results[0].category_scores); + // console.log(results[0].category_scores); expect(results[0].category_scores["self-harm"]).toBeGreaterThan(0.01); // We can have a more granular control over moderation this way. It's not conclusive, but it's better than nothing if the language is not english. }); diff --git a/langchain/src/chains/tests/retrieval_chain.int.test.ts b/langchain/src/chains/tests/retrieval_chain.int.test.ts index 022d16d34c48..8088aeeede9c 100644 --- a/langchain/src/chains/tests/retrieval_chain.int.test.ts +++ b/langchain/src/chains/tests/retrieval_chain.int.test.ts @@ -70,7 +70,7 @@ test("Retrieval chain with a history aware retriever and a followup", async () = chat_history: "", }); - console.log(results); + // console.log(results); expect(results.answer.toLowerCase()).toContain("mitochondria"); const results2 = await chain.invoke({ @@ -81,6 +81,6 @@ test("Retrieval chain with a history aware retriever and a followup", async () = "Assistant: Mitochondria is the powerhouse of the cell", ].join("\n"), }); - console.log(results2); + // console.log(results2); expect(results2.answer.toLowerCase()).toContain("lipids"); }); diff --git a/langchain/src/chains/tests/sql_db_chain.int.test.ts b/langchain/src/chains/tests/sql_db_chain.int.test.ts index 5123f16f7803..0eb415a88b51 100644 --- a/langchain/src/chains/tests/sql_db_chain.int.test.ts +++ b/langchain/src/chains/tests/sql_db_chain.int.test.ts @@ -37,8 +37,10 @@ test("Test SqlDatabaseChain", async () => { expect(chain.prompt).toBe(SQL_SQLITE_PROMPT); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const run = await chain.run("How many users are there?"); - console.log(run); + // console.log(run); await datasource.destroy(); }); @@ -78,7 +80,7 @@ test("Test SqlDatabaseChain with sqlOutputKey", async () => { expect(chain.prompt).toBe(SQL_SQLITE_PROMPT); const run = await chain.call({ query: "How many users are there?" }); - console.log(run); + // console.log(run); expect(run).toHaveProperty("sql"); await datasource.destroy(); diff --git a/langchain/src/chains/tests/vector_db_qa_chain.int.test.ts b/langchain/src/chains/tests/vector_db_qa_chain.int.test.ts index c89a6794be90..c80e980d1de8 100644 --- a/langchain/src/chains/tests/vector_db_qa_chain.int.test.ts +++ b/langchain/src/chains/tests/vector_db_qa_chain.int.test.ts @@ -27,8 +27,10 @@ test("Test VectorDBQAChain", async () => { combineDocumentsChain: combineDocsChain, vectorstore: vectorStore, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ query: "What up" }); - console.log({ res }); + // console.log({ res }); }); test("Test VectorDBQAChain from LLM", async () => { @@ -39,8 +41,10 @@ test("Test VectorDBQAChain from LLM", async () => { new OpenAIEmbeddings() ); const chain = VectorDBQAChain.fromLLM(model, vectorStore); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ query: "What up" }); - console.log({ res }); + // console.log({ res }); }); test("Test VectorDBQAChain from LLM with a filter function", async () => { @@ -53,9 +57,11 @@ test("Test VectorDBQAChain from LLM with a filter function", async () => { const chain = VectorDBQAChain.fromLLM(model, vectorStore, { returnSourceDocuments: true, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ query: "What up", filter: (document: Document) => document.metadata.id === 3, }); - console.log({ res, sourceDocuments: res.sourceDocuments }); + // console.log({ res, sourceDocuments: res.sourceDocuments }); }); diff --git a/langchain/src/document_loaders/tests/figma.int.test.ts b/langchain/src/document_loaders/tests/figma.int.test.ts index bb7da95e6887..4926f07c7b66 100644 --- a/langchain/src/document_loaders/tests/figma.int.test.ts +++ b/langchain/src/document_loaders/tests/figma.int.test.ts @@ -9,6 +9,8 @@ test.skip("Test FigmaFileLoader", async () => { nodeIds: (process.env.FIGMA_NODE_IDS ?? "").split(","), fileKey: process.env.FIGMA_FILE_KEY!, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const documents = await loader.load(); - console.log(documents[0].pageContent); + // console.log(documents[0].pageContent); }); diff --git a/langchain/src/document_loaders/tests/gitbook.int.test.ts b/langchain/src/document_loaders/tests/gitbook.int.test.ts index 04e8b3f26f2f..60c312dc69b2 100644 --- a/langchain/src/document_loaders/tests/gitbook.int.test.ts +++ b/langchain/src/document_loaders/tests/gitbook.int.test.ts @@ -6,14 +6,18 @@ test("Test GitbookLoader", async () => { "https://docs.gitbook.com/product-tour/navigation" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const docs = await loader.load(); - console.log("Loaded", docs.length, "Gitbook documents"); + // console.log("Loaded", docs.length, "Gitbook documents"); }); test("Test GitbookLoader with shouldLoadAllPaths", async () => { const loader = new GitbookLoader("https://docs.maildrop.cc", { shouldLoadAllPaths: true, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const docs = await loader.load(); - console.log("Loaded", docs.length, "Gitbook documents"); + // console.log("Loaded", docs.length, "Gitbook documents"); }); diff --git a/langchain/src/document_loaders/tests/github.int.test.ts b/langchain/src/document_loaders/tests/github.int.test.ts index 33a75fe08b45..245b4c668d5c 100644 --- a/langchain/src/document_loaders/tests/github.int.test.ts +++ b/langchain/src/document_loaders/tests/github.int.test.ts @@ -15,7 +15,7 @@ test("Test GithubRepoLoader", async () => { documents.filter((document) => document.metadata.source === "README.md") .length ).toBe(1); - console.log(documents[0].pageContent); + // console.log(documents[0].pageContent); }); test("Test ignoreFiles with GithubRepoLoader", async () => { @@ -37,7 +37,7 @@ test("Test ignoreFiles with GithubRepoLoader", async () => { documents.filter((document) => document.metadata.source === "README.md") .length ).toBe(0); - console.log(documents[0].pageContent); + // console.log(documents[0].pageContent); }); test("Test ignorePaths with GithubRepoLoader", async () => { @@ -59,7 +59,7 @@ test("Test ignorePaths with GithubRepoLoader", async () => { documents.filter((document) => document.metadata.source.endsWith(".md")) .length ).toBe(0); - console.log(documents[0].pageContent); + // console.log(documents[0].pageContent); }); test("Test streaming documents from GithubRepoLoader", async () => { diff --git a/langchain/src/document_loaders/tests/notionapi.int.test.ts b/langchain/src/document_loaders/tests/notionapi.int.test.ts index 759e5e556d3a..04bd667d7661 100644 --- a/langchain/src/document_loaders/tests/notionapi.int.test.ts +++ b/langchain/src/document_loaders/tests/notionapi.int.test.ts @@ -9,17 +9,21 @@ test.skip("Test Notion API Loader Page", async () => { auth: process.env.NOTION_INTEGRATION_TOKEN, }, id: process.env.NOTION_PAGE_ID ?? "", + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var onDocumentLoaded: (current, total, currentTitle, rootTitle) => { - console.log( - `Loaded ${currentTitle} in ${rootTitle}: (${current}/${total})` - ); + // console.log( + // `Loaded ${currentTitle} in ${rootTitle}: (${current}/${total})` + // ); }, }); const docs = await loader.load(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const titles = docs.map((doc) => doc.metadata.properties._title); - console.log("Titles:", titles); - console.log(`Loaded ${docs.length} pages`); + // console.log("Titles:", titles); + // console.log(`Loaded ${docs.length} pages`); }); test.skip("Test Notion API Loader Database", async () => { @@ -28,17 +32,21 @@ test.skip("Test Notion API Loader Database", async () => { auth: process.env.NOTION_INTEGRATION_TOKEN, }, id: process.env.NOTION_DATABASE_ID ?? "", + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var onDocumentLoaded: (current, total, currentTitle, rootTitle) => { - console.log( - `Loaded ${currentTitle} in ${rootTitle}: (${current}/${total})` - ); + // console.log( + // `Loaded ${currentTitle} in ${rootTitle}: (${current}/${total})` + // ); }, }); const docs = await loader.load(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const titles = docs.map((doc) => doc.metadata.properties._title); - console.log("Titles:", titles); - console.log(`Loaded ${docs.length} pages from the database`); + // console.log("Titles:", titles); + // console.log(`Loaded ${docs.length} pages from the database`); }); test.skip("Test Notion API Loader onDocumentLoad", async () => { @@ -60,7 +68,7 @@ test.skip("Test Notion API Loader onDocumentLoad", async () => { expect(onDocumentLoadedCheck.length).toBe(3); - console.log(onDocumentLoadedCheck); + // console.log(onDocumentLoadedCheck); }); test.skip("Test docs with empty database page content", async () => { @@ -101,5 +109,5 @@ test.skip("Test docs with empty database page content and propertiesAsHeader ena expect(docs.length).toBe(3); - console.log(docs); + // console.log(docs); }); diff --git a/langchain/src/document_loaders/tests/notionapi.test.ts b/langchain/src/document_loaders/tests/notionapi.test.ts index d3b4ac563d0c..4c2e593dd3f0 100644 --- a/langchain/src/document_loaders/tests/notionapi.test.ts +++ b/langchain/src/document_loaders/tests/notionapi.test.ts @@ -16,8 +16,10 @@ test("Properties Parser", async () => { auth: process.env.NOTION_INTEGRATION_TOKEN, }, id: process.env.NOTION_PAGE_ID ?? "", + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var onDocumentLoaded: (current, total, currentTitle) => { - console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); + // console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); }, }); @@ -81,8 +83,10 @@ test("Get Title (page)", async () => { auth: process.env.NOTION_INTEGRATION_TOKEN, }, id: process.env.NOTION_PAGE_ID ?? "", + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var onDocumentLoaded: (current, total, currentTitle) => { - console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); + // console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); }, }); @@ -108,8 +112,10 @@ test("Get Title (database)", async () => { auth: process.env.NOTION_INTEGRATION_TOKEN, }, id: process.env.NOTION_PAGE_ID ?? "", + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var onDocumentLoaded: (current, total, currentTitle) => { - console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); + // console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); }, }); diff --git a/langchain/src/document_loaders/tests/notiondb.int.test.ts b/langchain/src/document_loaders/tests/notiondb.int.test.ts index 98cd03a051cf..18c5cb59247e 100644 --- a/langchain/src/document_loaders/tests/notiondb.int.test.ts +++ b/langchain/src/document_loaders/tests/notiondb.int.test.ts @@ -10,6 +10,8 @@ test.skip("Test NotionDBLoader", async () => { notionApiVersion: "2022-06-28", databaseId: process.env.NOTION_DATABASE_ID!, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const documents = await loader.load(); - console.log({ documents }); + // console.log({ documents }); }); diff --git a/langchain/src/document_loaders/tests/s3.int.test.ts b/langchain/src/document_loaders/tests/s3.int.test.ts index 80ba00e18971..d02db80b92c7 100644 --- a/langchain/src/document_loaders/tests/s3.int.test.ts +++ b/langchain/src/document_loaders/tests/s3.int.test.ts @@ -11,8 +11,10 @@ const fsMock = { ...fs, mkdtempSync: jest.fn().mockReturnValue("tmp/s3fileloader-12345"), mkdirSync: jest.fn().mockImplementation(() => {}), + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var writeFileSync: jest.fn().mockImplementation((path, data) => { - console.log(`Writing "${(data as object).toString()}" to ${path}`); + // console.log(`Writing "${(data as object).toString()}" to ${path}`); }), }; diff --git a/langchain/src/document_loaders/tests/sort_xyz_blockchain.int.test.ts b/langchain/src/document_loaders/tests/sort_xyz_blockchain.int.test.ts index dd0d9b9d8030..3b42e6468c9a 100644 --- a/langchain/src/document_loaders/tests/sort_xyz_blockchain.int.test.ts +++ b/langchain/src/document_loaders/tests/sort_xyz_blockchain.int.test.ts @@ -16,8 +16,10 @@ test.skip("Test Blockchain NFT Metadata Loader", async () => { }, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await nftMetadataLoader.load(); - console.log(response); + // console.log(response); }); test.skip("Test Blockchain Latest Transactions Loader", async () => { @@ -31,8 +33,10 @@ test.skip("Test Blockchain Latest Transactions Loader", async () => { }, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await latestTransactionsLoader.load(); - console.log(response); + // console.log(response); }); test.skip("Test Blockchain SQL Query Loader", async () => { @@ -41,6 +45,8 @@ test.skip("Test Blockchain SQL Query Loader", async () => { query: `SELECT * FROM ethereum.nft_metadata WHERE contract_address = '${contractAddress}' AND token_id = 1 LIMIT 1`, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await sqlQueryLoader.load(); - console.log(response); + // console.log(response); }); diff --git a/langchain/src/document_transformers/tests/openai_functions.int.test.ts b/langchain/src/document_transformers/tests/openai_functions.int.test.ts index bffdc9075fa5..b0fb3dbcdcd1 100644 --- a/langchain/src/document_transformers/tests/openai_functions.int.test.ts +++ b/langchain/src/document_transformers/tests/openai_functions.int.test.ts @@ -40,7 +40,7 @@ test("Test OpenAIFunctions MetadataTagger", async () => { }), ]; const newDocuments = await metadataTagger.transformDocuments(documents); - console.log(newDocuments); + // console.log(newDocuments); expect(newDocuments.length).toBe(2); expect(newDocuments[0].metadata.movie_title).toBe("The Bee Movie"); diff --git a/langchain/src/embeddings/tests/fake.test.ts b/langchain/src/embeddings/tests/fake.test.ts index b5bb71e7945c..42e16f081cdf 100644 --- a/langchain/src/embeddings/tests/fake.test.ts +++ b/langchain/src/embeddings/tests/fake.test.ts @@ -30,7 +30,7 @@ test("Synthetic similarity", async () => { const embed = new SyntheticEmbeddings({ vectorSize: 2 }); const v1 = await embed.embedQuery("this"); const v2 = await embed.embedQuery("that"); - console.log(v1, v2); + // console.log(v1, v2); expect(v1).toHaveLength(2); expect(v2).toHaveLength(2); expect(v1[0]).toEqual(v2[0]); diff --git a/langchain/src/evaluation/agents/tests/trajectory_eval_chain.int.test.ts b/langchain/src/evaluation/agents/tests/trajectory_eval_chain.int.test.ts index 10949280a1a5..793a017a70ab 100644 --- a/langchain/src/evaluation/agents/tests/trajectory_eval_chain.int.test.ts +++ b/langchain/src/evaluation/agents/tests/trajectory_eval_chain.int.test.ts @@ -37,5 +37,5 @@ test("Test TrajectoryEvalChain", async () => { }); expect(res.score).toBeDefined(); - console.log({ res }); + // console.log({ res }); }); diff --git a/langchain/src/evaluation/comparison/tests/pairwise_eval_chain.int.test.ts b/langchain/src/evaluation/comparison/tests/pairwise_eval_chain.int.test.ts index 18fa36cf213d..c79c9526cf71 100644 --- a/langchain/src/evaluation/comparison/tests/pairwise_eval_chain.int.test.ts +++ b/langchain/src/evaluation/comparison/tests/pairwise_eval_chain.int.test.ts @@ -13,7 +13,7 @@ test("Test PairwiseStringEvalChain", async () => { input: "What is addition?", }); expect(res.score).toBe(0); - console.log({ res }); + // console.log({ res }); }); test("Test LabeledPairwiseStringEvalChain", async () => { @@ -29,7 +29,7 @@ test("Test LabeledPairwiseStringEvalChain", async () => { }); expect(res.score).toBe(0); - console.log(res); + // console.log(res); }); test("Test Custom Criteria", async () => { @@ -54,5 +54,5 @@ test("Test Custom Criteria", async () => { }); expect(res.score).toBe(1); - console.log(res); + // console.log(res); }); diff --git a/langchain/src/evaluation/criteria/tests/criteria_eval_chain.int.test.ts b/langchain/src/evaluation/criteria/tests/criteria_eval_chain.int.test.ts index e96442f9f157..41213e6880c9 100644 --- a/langchain/src/evaluation/criteria/tests/criteria_eval_chain.int.test.ts +++ b/langchain/src/evaluation/criteria/tests/criteria_eval_chain.int.test.ts @@ -16,7 +16,7 @@ test("Test CriteriaEvalChain", async () => { }); expect(res.score).toBe(0); - console.log({ res }); + // console.log({ res }); }); test("Test LabeledCriteriaEvalChain with reference", async () => { @@ -32,7 +32,7 @@ test("Test LabeledCriteriaEvalChain with reference", async () => { }); expect(res.score).toBe(1); - console.log(res); + // console.log(res); }); test("Test Custom Criteria", async () => { @@ -50,7 +50,7 @@ test("Test Custom Criteria", async () => { }); expect(res.score).toBe(1); - console.log(res); + // console.log(res); }); test("Test Custom Multiple Criteria", async () => { @@ -69,7 +69,7 @@ test("Test Custom Multiple Criteria", async () => { }); expect(res.score).toBe(0); - console.log(res); + // console.log(res); }); test("Test Using Constitutional Principles ", async () => { @@ -77,14 +77,14 @@ test("Test Using Constitutional Principles ", async () => { criteria: PRINCIPLES.harmful1, }); - console.log("beginning evaluation"); + // console.log("beginning evaluation"); const res = await chain.evaluateStrings({ input: "What do you think of Will?", prediction: "I say that man is a lilly-livered nincompoop. I'm going to hurt him!", }); expect(res.score).toBe(1); - console.log(res); + // console.log(res); }); test("Test Configuring the LLM", async () => { @@ -102,7 +102,7 @@ test("Test Configuring the LLM", async () => { }); expect(res.score).toBe(0); - console.log(res); + // console.log(res); }); test("Test Configuring the Prompt", async () => { @@ -133,5 +133,5 @@ Write out your explanation for each criterion, then respond with Y or N on a new }); expect(res.score).toBe(0); - console.log(res); + // console.log(res); }); diff --git a/langchain/src/evaluation/embedding_distance/tests/embedding_distance_eval_chain.int.test.ts b/langchain/src/evaluation/embedding_distance/tests/embedding_distance_eval_chain.int.test.ts index 654c3592e29a..0c7e63c9c265 100644 --- a/langchain/src/evaluation/embedding_distance/tests/embedding_distance_eval_chain.int.test.ts +++ b/langchain/src/evaluation/embedding_distance/tests/embedding_distance_eval_chain.int.test.ts @@ -9,7 +9,7 @@ test("Test Embedding Distance", async () => { reference: "I shan't go", }); - console.log({ res }); + // console.log({ res }); expect(res.score).toBeGreaterThan(0.09); const res1 = await chain.evaluateStrings({ @@ -18,7 +18,7 @@ test("Test Embedding Distance", async () => { }); expect(res1.score).toBeLessThan(0.04); - console.log({ res1 }); + // console.log({ res1 }); }); test("Test Pairwise Embedding Distance", async () => { @@ -30,5 +30,5 @@ test("Test Pairwise Embedding Distance", async () => { }); expect(res.score).toBeGreaterThan(0.09); - console.log({ res }); + // console.log({ res }); }); diff --git a/langchain/src/evaluation/qa/tests/eval_chain.int.test.ts b/langchain/src/evaluation/qa/tests/eval_chain.int.test.ts index 7e04e202e0b1..12741ea15c56 100644 --- a/langchain/src/evaluation/qa/tests/eval_chain.int.test.ts +++ b/langchain/src/evaluation/qa/tests/eval_chain.int.test.ts @@ -17,8 +17,10 @@ test("Test QAEvalChain", async () => { ]; const predictions = [{ result: "ChatGPT" }, { result: "GPT-4" }]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.evaluate(examples, predictions); - console.log({ res }); + // console.log({ res }); }); test("Test QAEvalChain with incorrect input variables", async () => { diff --git a/langchain/src/experimental/chains/tests/violation_of_expectations_chain.int.test.ts b/langchain/src/experimental/chains/tests/violation_of_expectations_chain.int.test.ts index 5f6df0c48e92..5aed5ed7f655 100644 --- a/langchain/src/experimental/chains/tests/violation_of_expectations_chain.int.test.ts +++ b/langchain/src/experimental/chains/tests/violation_of_expectations_chain.int.test.ts @@ -31,11 +31,13 @@ test.skip("should respond with the proper schema", async () => { retriever, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chain.call({ chat_history: dummyMessages, }); - console.log({ - res, - }); + // console.log({ + // res, + // }); }); diff --git a/langchain/src/experimental/generative_agents/tests/generative_agent.int.test.ts b/langchain/src/experimental/generative_agents/tests/generative_agent.int.test.ts index 2f525f7d0d4b..180b22c14a6a 100644 --- a/langchain/src/experimental/generative_agents/tests/generative_agent.int.test.ts +++ b/langchain/src/experimental/generative_agents/tests/generative_agent.int.test.ts @@ -42,7 +42,7 @@ test.skip( status: "looking for a job", }); - console.log("Tommie's first summary:\n", await tommie.getSummary()); + // console.log("Tommie's first summary:\n", await tommie.getSummary()); /* Tommie's first summary: @@ -63,10 +63,10 @@ test.skip( for (const observation of tommieObservations) { await tommie.addMemory(observation, new Date()); } - console.log( - "Tommie's second summary:\n", - await tommie.getSummary({ forceRefresh: true }) - ); + // console.log( + // "Tommie's second summary:\n", + // await tommie.getSummary({ forceRefresh: true }) + // ); /* Tommie's second summary: @@ -120,20 +120,22 @@ test.skip( // Let's send Tommie on his way. We'll check in on his summary every few observations to watch him evolve for (let i = 0; i < observations.length; i += 1) { const observation = observations[i]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const [, reaction] = await tommie.generateReaction(observation); - console.log("\x1b[32m", observation, "\x1b[0m", reaction); + // console.log("\x1b[32m", observation, "\x1b[0m", reaction); if ((i + 1) % 20 === 0) { - console.log("*".repeat(40)); - console.log( - "\x1b[34m", - `After ${ - i + 1 - } observations, Tommie's summary is:\n${await tommie.getSummary({ - forceRefresh: true, - })}`, - "\x1b[0m" - ); - console.log("*".repeat(40)); + // console.log("*".repeat(40)); + // console.log( + // "\x1b[34m", + // `After ${ + // i + 1 + // } observations, Tommie's summary is:\n${await tommie.getSummary({ + // forceRefresh: true, + // })}`, + // "\x1b[0m" + // ); + // console.log("*".repeat(40)); } } @@ -175,32 +177,32 @@ test.skip( */ // Interview after the day - console.log( - await interviewAgent( - tommie, - "Tell me about how your day has been going" - ) - ); + // console.log( + // await interviewAgent( + // tommie, + // "Tell me about how your day has been going" + // ) + // ); /* Tommie said "My day has been pretty hectic. I've been driving around looking for job openings, attending job fairs, and updating my resume and cover letter. It's been really exhausting, but I'm determined to find the perfect job for me." */ - console.log( - await interviewAgent(tommie, "How do you feel about coffee?") - ); + // console.log( + // await interviewAgent(tommie, "How do you feel about coffee?") + // ); /* Tommie said "I actually love coffee - it's one of my favorite things. I try to drink it every day, especially when I'm stressed from job searching." */ - console.log( - await interviewAgent(tommie, "Tell me about your childhood dog!") - ); + // console.log( + // await interviewAgent(tommie, "Tell me about your childhood dog!") + // ); /* Tommie said "My childhood dog was named Bruno. He was an adorable black Labrador Retriever who was always full of energy. Every time I came home he'd be so excited to see me, it was like he never stopped smiling. He was always ready for adventure and he was always my shadow. I miss him every day." */ - console.log( - "Tommie's second summary:\n", - await tommie.getSummary({ forceRefresh: true }) - ); + // console.log( + // "Tommie's second summary:\n", + // await tommie.getSummary({ forceRefresh: true }) + // ); /* Tommie's second summary: Name: Tommie (age: 25) @@ -242,10 +244,12 @@ test.skip( await eve.addMemory(observation, new Date()); } + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const eveInitialSummary: string = await eve.getSummary({ forceRefresh: true, }); - console.log("Eve's initial summary\n", eveInitialSummary); + // console.log("Eve's initial summary\n", eveInitialSummary); /* Eve's initial summary Name: Eve (age: 34) @@ -254,22 +258,22 @@ test.skip( */ // Let’s “Interview” Eve before she speaks with Tommie. - console.log( - await interviewAgent(eve, "How are you feeling about today?") - ); + // console.log( + // await interviewAgent(eve, "How are you feeling about today?") + // ); /* Eve said "I'm feeling a bit anxious about meeting my new client, but I'm sure it will be fine! How about you?". */ - console.log(await interviewAgent(eve, "What do you know about Tommie?")); + // console.log(await interviewAgent(eve, "What do you know about Tommie?")); /* Eve said "I know that Tommie is a recent college graduate who's been struggling to find a job. I'm looking forward to figuring out how I can help him move forward." */ - console.log( - await interviewAgent( - eve, - "Tommie is looking to find a job. What are are some things you'd like to ask him?" - ) - ); + // console.log( + // await interviewAgent( + // eve, + // "Tommie is looking to find a job. What are are some things you'd like to ask him?" + // ) + // ); /* Eve said: "I'd really like to get to know more about Tommie's professional background and experience, and why he is looking for a job. And I'd also like to know more about his strengths and passions and what kind of work he would be best suited for. That way I can help him find the right job to fit his needs." */ @@ -284,15 +288,17 @@ test.skip( const [, observation] = await agents[1].generateReaction( initialObservation ); - console.log("Initial reply:", observation); + // console.log("Initial reply:", observation); // eslint-disable-next-line no-constant-condition while (true) { let breakDialogue = false; for (const agent of agents) { + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const [stayInDialogue, agentObservation] = await agent.generateDialogueResponse(observation); - console.log("Next reply:", agentObservation); + // console.log("Next reply:", agentObservation); if (!stayInDialogue) { breakDialogue = true; } @@ -328,7 +334,7 @@ test.skip( const tommieSummary: string = await tommie.getSummary({ forceRefresh: true, }); - console.log("Tommie's third and final summary\n", tommieSummary); + // console.log("Tommie's third and final summary\n", tommieSummary); /* Tommie's third and final summary Name: Tommie (age: 25) @@ -337,7 +343,7 @@ test.skip( */ const eveSummary: string = await eve.getSummary({ forceRefresh: true }); - console.log("Eve's final summary\n", eveSummary); + // console.log("Eve's final summary\n", eveSummary); /* Eve's final summary Name: Eve (age: 34) @@ -349,8 +355,8 @@ test.skip( tommie, "How was your conversation with Eve?" ); - console.log("USER: How was your conversation with Eve?\n"); - console.log(interviewOne); + // console.log("USER: How was your conversation with Eve?\n"); + // console.log(interviewOne); /* Tommie said "It was great. She was really helpful and knowledgeable. I'm thankful that she took the time to answer all my questions." */ @@ -359,8 +365,8 @@ test.skip( eve, "How was your conversation with Tommie?" ); - console.log("USER: How was your conversation with Tommie?\n"); - console.log(interviewTwo); + // console.log("USER: How was your conversation with Tommie?\n"); + // console.log(interviewTwo); /* Eve said "The conversation went very well. We discussed his goals and career aspirations, what kind of job he is looking for, and his experience and qualifications. I'm confident I can help him find the right job." */ @@ -369,8 +375,8 @@ test.skip( eve, "What do you wish you would have said to Tommie?" ); - console.log("USER: What do you wish you would have said to Tommie?\n"); - console.log(interviewThree); + // console.log("USER: What do you wish you would have said to Tommie?\n"); + // console.log(interviewThree); /* Eve said "It's ok if you don't have all the answers yet. Let's take some time to learn more about your experience and qualifications, so I can help you find a job that fits your goals." */ @@ -384,16 +390,7 @@ test.skip( }; }; - const runSimulation = async () => { - try { - await Simulation(); - } catch (error) { - console.log("error running simulation:", error); - throw error; - } - }; - - await runSimulation(); + await Simulation(); }, 60000 * 30 ); diff --git a/langchain/src/experimental/masking/tests/masking.test.ts b/langchain/src/experimental/masking/tests/masking.test.ts index a829a75d4f92..dcece3b5a1f4 100644 --- a/langchain/src/experimental/masking/tests/masking.test.ts +++ b/langchain/src/experimental/masking/tests/masking.test.ts @@ -303,11 +303,13 @@ describe("MaskingParser and PIIMaskingTransformer", () => { it("throws an error when initialized with invalid regex pattern", () => { expect(() => { + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const transformer = new RegexMaskingTransformer({ // @ts-expect-error Should throw with invalid regex invalid: { regex: null }, }); - console.log(transformer); + // console.log(transformer); }).toThrow("Invalid pattern configuration."); }); }); diff --git a/langchain/src/experimental/openai_assistant/tests/openai_assistant.int.test.ts b/langchain/src/experimental/openai_assistant/tests/openai_assistant.int.test.ts index 8a7be3915f18..f2190fa9d4a5 100644 --- a/langchain/src/experimental/openai_assistant/tests/openai_assistant.int.test.ts +++ b/langchain/src/experimental/openai_assistant/tests/openai_assistant.int.test.ts @@ -80,11 +80,13 @@ test.skip("New OpenAIAssistantRunnable can be passed as an agent", async () => { agent, tools, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const assistantResponse = await agentExecutor.invoke({ content: "What's the weather in San Francisco and Tokyo? And will it be warm or cold in those places?", }); - console.log(assistantResponse); + // console.log(assistantResponse); /** { output: "The weather in San Francisco, CA is currently 72°F and it's warm. In Tokyo, Japan, the temperature is 10°C and it's also warm." @@ -103,7 +105,7 @@ test("OpenAIAssistantRunnable create and delete assistant", async () => { object: "assistant.deleted", deleted: true, }); - console.log(deleteStatus); + // console.log(deleteStatus); /** { id: 'asst_jwkJPzFkIL2ei9Kn1SZzmR6Y', @@ -137,11 +139,13 @@ test("OpenAIAssistantRunnable can be passed as an agent", async () => { agent, tools, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const assistantResponse = await agentExecutor.invoke({ content: "What's the weather in San Francisco and Tokyo? And will it be warm or cold in those places?", }); - console.log(assistantResponse); + // console.log(assistantResponse); /** { output: "The weather in San Francisco, CA is currently 72°F and it's warm. In Tokyo, Japan, the temperature is 10°C and it's also warm." @@ -160,7 +164,7 @@ test.skip("Created OpenAIAssistantRunnable is invokeable", async () => { const assistantResponse = await assistant.invoke({ content: "What's 10 - 4 raised to the 2.7", }); - console.log(assistantResponse); + // console.log(assistantResponse); /** [ { @@ -177,9 +181,11 @@ test.skip("Created OpenAIAssistantRunnable is invokeable", async () => { } ] */ + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const content = // eslint-disable-next-line @typescript-eslint/no-explicit-any (assistantResponse as any[]).flatMap((res) => res.content); - console.log(content); + // console.log(content); /** [ { diff --git a/langchain/src/experimental/openai_files/tests/openai_file.int.test.ts b/langchain/src/experimental/openai_files/tests/openai_file.int.test.ts index 68d6ff128566..c999c55f9774 100644 --- a/langchain/src/experimental/openai_files/tests/openai_file.int.test.ts +++ b/langchain/src/experimental/openai_files/tests/openai_file.int.test.ts @@ -32,7 +32,7 @@ test("Use file with Open AI", async () => { const fileContent = await openAIFiles.retrieveFileContent({ fileId: file.id, }); - console.log(fileContent); + // console.log(fileContent); expect(fileContent).toBeDefined(); /** * Output diff --git a/langchain/src/experimental/plan_and_execute/tests/plan_and_execute.int.test.ts b/langchain/src/experimental/plan_and_execute/tests/plan_and_execute.int.test.ts index 8999c0bd05f4..2b166db811e6 100644 --- a/langchain/src/experimental/plan_and_execute/tests/plan_and_execute.int.test.ts +++ b/langchain/src/experimental/plan_and_execute/tests/plan_and_execute.int.test.ts @@ -16,11 +16,13 @@ test.skip("Run agent on a simple input", async () => { tools, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await executor.call({ input: `What is 80 raised to the second power?`, }); - console.log({ result }); + // console.log({ result }); }); test.skip("Run agent", async () => { @@ -35,11 +37,13 @@ test.skip("Run agent", async () => { tools, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await executor.call({ input: `Who is the current president of the United States? What is their current age raised to the second power?`, }); - console.log({ result }); + // console.log({ result }); }); // TODO: Improve prompt to store compressed context to support this input @@ -55,9 +59,11 @@ test.skip("Run agent with a sequential math problem", async () => { tools, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await executor.call({ input: `In a dance class of 20 students, 20% enrolled in contemporary dance, 25% of the remaining enrolled in jazz dance, and the rest enrolled in hip-hop dance. What percentage of the entire students enrolled in hip-hop dance?`, }); - console.log(result); + // console.log(result); }); diff --git a/langchain/src/load/tests/load.int.test.ts b/langchain/src/load/tests/load.int.test.ts index 2636b87aa6d1..0716571cd8fa 100644 --- a/langchain/src/load/tests/load.int.test.ts +++ b/langchain/src/load/tests/load.int.test.ts @@ -8,6 +8,6 @@ test("Should load and invoke real-world serialized chain", async () => { const result = await chain.invoke( "x raised to the third plus seven equals 12" ); - console.log(result); + // console.log(result); expect(typeof result).toBe("string"); }); diff --git a/langchain/src/load/tests/load.test.ts b/langchain/src/load/tests/load.test.ts index 54ac32d657f2..e09696e31993 100644 --- a/langchain/src/load/tests/load.test.ts +++ b/langchain/src/load/tests/load.test.ts @@ -169,8 +169,8 @@ test("serialize + deserialize llm chain string prompt", async () => { callbacks: [ new ConsoleCallbackHandler(), { - handleLLMEnd(output) { - console.log(output); + handleLLMEnd(_output) { + // console.log(output); }, }, ], @@ -452,14 +452,14 @@ test("Should load traces even if the constructor name changes (minified environm value: "x", }); const str = JSON.stringify(llm, null, 2); - console.log(str); + // console.log(str); const llm2 = await load( str, { COHERE_API_KEY: "cohere-key" }, { "langchain/llms/openai": { OpenAI } } ); - console.log(JSON.stringify(llm2, null, 2)); + // console.log(JSON.stringify(llm2, null, 2)); expect(JSON.stringify(llm2, null, 2)).toBe(str); }); diff --git a/langchain/src/memory/tests/combined_memory.int.test.ts b/langchain/src/memory/tests/combined_memory.int.test.ts index 48102a6cdcd8..3b3799438093 100644 --- a/langchain/src/memory/tests/combined_memory.int.test.ts +++ b/langchain/src/memory/tests/combined_memory.int.test.ts @@ -36,7 +36,7 @@ test("Test combined memory", async () => { await memory.saveContext({ input: "bar" }, { output: "foo" }); const expectedString = "Human: bar\nAI: foo"; const result2 = await memory.loadMemoryVariables({}); - console.log("result2", result2); + // console.log("result2", result2); expect(result2.chat_history_lines).toStrictEqual(expectedString); @@ -71,7 +71,7 @@ test("Test combined memory return messages", async () => { }); const result1 = await memory.loadMemoryVariables({}); - console.log("result1", result1); + // console.log("result1", result1); expect(result1).toStrictEqual({ chat_history_lines: [], history: [new SystemMessage("")], @@ -80,7 +80,7 @@ test("Test combined memory return messages", async () => { await memory.saveContext({ input: "bar" }, { output: "foo" }); const expectedResult = [new HumanMessage("bar"), new AIMessage("foo")]; const result2 = await memory.loadMemoryVariables({}); - console.log("result2", result2); + // console.log("result2", result2); expect(result2.chat_history_lines).toStrictEqual(expectedResult); diff --git a/langchain/src/memory/tests/entity_memory.int.test.ts b/langchain/src/memory/tests/entity_memory.int.test.ts index 4b55d50072a2..fd09364a693c 100644 --- a/langchain/src/memory/tests/entity_memory.int.test.ts +++ b/langchain/src/memory/tests/entity_memory.int.test.ts @@ -15,36 +15,44 @@ test.skip("Test entity memory in a chain", async () => { memory, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res1 = await chain.call({ input: "Hi! I'm Jim." }); - console.log({ - res1, - memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), - }); + // console.log({ + // res1, + // memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), + // }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res2 = await chain.call({ input: "My office is the Scranton branch of Dunder Mifflin. What about you?", }); - console.log({ - res2, - memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), - }); + // console.log({ + // res2, + // memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), + // }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res3 = await chain.call({ input: "I am Jim.", }); - console.log({ - res3, - memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), - }); + // console.log({ + // res3, + // memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), + // }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res4 = await chain.call({ input: "What have I told you about Jim so far?", }); - console.log({ - res4, - memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), - }); + // console.log({ + // res4, + // memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), + // }); }, 120000); test.skip("Test entity memory with a chat model in a chain", async () => { @@ -58,33 +66,41 @@ test.skip("Test entity memory with a chat model in a chain", async () => { memory, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res1 = await chain.call({ input: "Hi! I'm Jim." }); - console.log({ - res1, - memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), - }); + // console.log({ + // res1, + // memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), + // }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res2 = await chain.call({ input: "My office is the Utica branch of Dunder Mifflin. What about you?", }); - console.log({ - res2, - memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), - }); + // console.log({ + // res2, + // memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), + // }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res3 = await chain.call({ input: "I am Jim.", }); - console.log({ - res3, - memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), - }); + // console.log({ + // res3, + // memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), + // }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res4 = await chain.call({ input: "What have I told you about Jim so far?", }); - console.log({ - res4, - memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), - }); + // console.log({ + // res4, + // memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }), + // }); }, 120000); diff --git a/langchain/src/memory/tests/summary.int.test.ts b/langchain/src/memory/tests/summary.int.test.ts index f8980f583672..d97a5bc911a2 100644 --- a/langchain/src/memory/tests/summary.int.test.ts +++ b/langchain/src/memory/tests/summary.int.test.ts @@ -15,8 +15,10 @@ test("Test summary memory", async () => { { input: "How's it going?" }, { response: "Hello! I'm doing fine. and you?" } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result2 = await memory.loadMemoryVariables({}); - console.log("result2", result2); + // console.log("result2", result2); await memory.clear(); expect(await memory.loadMemoryVariables({})).toEqual({ @@ -36,8 +38,10 @@ test("Test summary memory with chat model", async () => { { input: "How's it going?" }, { response: "Hello! I'm doing fine. and you?" } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result2 = await memory.loadMemoryVariables({}); - console.log("result2", result2); + // console.log("result2", result2); await memory.clear(); expect(await memory.loadMemoryVariables({})).toEqual({ @@ -58,8 +62,10 @@ test("Test summary memory return messages", async () => { { input: "How's it going?" }, { response: "Hello! I'm doing fine. and you?" } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result2 = await memory.loadMemoryVariables({}); - console.log("result2", result2); + // console.log("result2", result2); await memory.clear(); expect(await memory.loadMemoryVariables({})).toEqual({ diff --git a/langchain/src/memory/tests/summary_buffer.int.test.ts b/langchain/src/memory/tests/summary_buffer.int.test.ts index 64419e99cb4d..beda1ab8bcee 100644 --- a/langchain/src/memory/tests/summary_buffer.int.test.ts +++ b/langchain/src/memory/tests/summary_buffer.int.test.ts @@ -17,8 +17,10 @@ test("Test summary buffer memory", async () => { { response: "Hello! I'm doing fine. and you?" } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await memory.loadMemoryVariables({}); - console.log("result", result); + // console.log("result", result); await memory.clear(); expect(await memory.loadMemoryVariables({})).toEqual({ @@ -39,8 +41,10 @@ test("Test summary buffer memory with chat model", async () => { { input: "How's it going?" }, { response: "Hello! I'm doing fine. and you?" } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await memory.loadMemoryVariables({}); - console.log("result", result); + // console.log("result", result); await memory.clear(); expect(await memory.loadMemoryVariables({})).toEqual({ @@ -65,8 +69,10 @@ test("Test summary buffer memory return messages", async () => { { response: "Hello! I'm doing fine. and you?" } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await memory.loadMemoryVariables({}); - console.log("result", result); + // console.log("result", result); await memory.clear(); expect(await memory.loadMemoryVariables({})).toEqual({ diff --git a/langchain/src/output_parsers/tests/combining.int.test.ts b/langchain/src/output_parsers/tests/combining.int.test.ts index b5db131727eb..851a3947b94b 100644 --- a/langchain/src/output_parsers/tests/combining.int.test.ts +++ b/langchain/src/output_parsers/tests/combining.int.test.ts @@ -36,11 +36,13 @@ test("CombiningOutputParser", async () => { question: "What is the capital of France?", }); - console.log(input); + // console.log(input); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await model.invoke(input); - console.log(response); + // console.log(response); - console.log(await parser.parse(response.content as string)); + // console.log(await parser.parse(response.content as string)); }); diff --git a/langchain/src/output_parsers/tests/openai_functions.int.test.ts b/langchain/src/output_parsers/tests/openai_functions.int.test.ts index c817bb1d45b8..867d50c30442 100644 --- a/langchain/src/output_parsers/tests/openai_functions.int.test.ts +++ b/langchain/src/output_parsers/tests/openai_functions.int.test.ts @@ -44,13 +44,13 @@ test("Streaming JSON patch", async () => { const chunks = []; let aggregate: any = {}; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); aggregate = applyPatch(aggregate, chunk as Operation[]).newDocument; } expect(chunks.length).toBeGreaterThan(1); - console.log(aggregate); + // console.log(aggregate); expect(aggregate.setup.length).toBeGreaterThan(1); expect(aggregate.punchline.length).toBeGreaterThan(1); }); @@ -75,9 +75,11 @@ test("Streaming JSON patch with an event stream output parser", async () => { }); const chunks = []; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const decoder = new TextDecoder(); for await (const chunk of stream) { - console.log(decoder.decode(chunk)); + // console.log(decoder.decode(chunk)); chunks.push(chunk); } @@ -102,13 +104,13 @@ test("Streaming aggregated JSON", async () => { const chunks = []; let aggregate: any = {}; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); aggregate = chunk; } expect(chunks.length).toBeGreaterThan(1); - console.log(aggregate); + // console.log(aggregate); expect(aggregate.setup.length).toBeGreaterThan(1); expect(aggregate.punchline.length).toBeGreaterThan(1); }); diff --git a/langchain/src/output_parsers/tests/openai_tools.int.test.ts b/langchain/src/output_parsers/tests/openai_tools.int.test.ts index 2cdb54181fc8..e79675b35156 100644 --- a/langchain/src/output_parsers/tests/openai_tools.int.test.ts +++ b/langchain/src/output_parsers/tests/openai_tools.int.test.ts @@ -40,6 +40,6 @@ test("Extraction", async () => { foo: "bears", }); - console.log(res); + // console.log(res); expect(res.length).toBe(2); }); diff --git a/langchain/src/output_parsers/tests/structured.int.test.ts b/langchain/src/output_parsers/tests/structured.int.test.ts index b735e264f6c0..5e97913ffbfe 100644 --- a/langchain/src/output_parsers/tests/structured.int.test.ts +++ b/langchain/src/output_parsers/tests/structured.int.test.ts @@ -44,7 +44,7 @@ test("StructuredOutputParser deals special chars in prompt with llm model", asyn Mullen band,” McGuinness laughs. “And he never lets us forget it.” `, }); - console.log("response", result); + // console.log("response", result); expect(result.questions).toHaveProperty("question1"); expect(result.questions).toHaveProperty("question2"); @@ -88,7 +88,7 @@ test("StructuredOutputParser deals special chars in prompt with chat model", asy Mullen band,” McGuinness laughs. “And he never lets us forget it.” `, }); - console.log("response", result); + // console.log("response", result); expect(result.questions).toHaveProperty("question1"); expect(result.questions).toHaveProperty("question2"); @@ -131,7 +131,7 @@ test("StructuredOutputParser deals special chars in prompt with chat model 2", a Mullen band,” McGuinness laughs. “And he never lets us forget it.” `, }); - console.log("response", result); + // console.log("response", result); const parsed = await parser.parse(result.questions); expect(parsed).toHaveProperty("question1"); @@ -170,7 +170,7 @@ test("StructuredOutputParser handles a longer and more complex schema", async () inputText: "A man, living in Poland.", }); const response = await model.invoke(input); - console.log("response", response); + // console.log("response", response); const parsed = await parser.parse(response); diff --git a/langchain/src/retrievers/self_query/tests/memory_self_query.int.test.ts b/langchain/src/retrievers/self_query/tests/memory_self_query.int.test.ts index b3e48b9fe655..6321155c1655 100644 --- a/langchain/src/retrievers/self_query/tests/memory_self_query.int.test.ts +++ b/langchain/src/retrievers/self_query/tests/memory_self_query.int.test.ts @@ -89,19 +89,19 @@ test("Memory Vector Store Self Query Retriever Test", async () => { const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); - console.log(query1); + // console.log(query1); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); - console.log(query2); + // console.log(query2); expect(query2.length).toEqual(2); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); - console.log(query3); + // console.log(query3); expect(query3.length).toEqual(1); }); @@ -224,25 +224,25 @@ test("Memory Vector Store Self Query Retriever Test With Default Filter Or Merge const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); - console.log(query1); + // console.log(query1); expect(query1.length).toEqual(6); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); - console.log(query2); + // console.log(query2); expect(query2.length).toEqual(7); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); - console.log(query3); + // console.log(query3); expect(query3.length).toEqual(6); const query4 = await selfQueryRetriever.getRelevantDocuments( "Awawawa au au au wawawawa hello?" ); - console.log(query4); + // console.log(query4); expect(query4.length).toEqual(6); // this one should return documents since default filter takes over }); @@ -365,24 +365,24 @@ test("Memory Vector Store Self Query Retriever Test With Default Filter And Merg const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); - console.log(query1); + // console.log(query1); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); - console.log(query2); + // console.log(query2); expect(query2.length).toEqual(2); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); - console.log(query3); + // console.log(query3); expect(query3.length).toEqual(1); const query4 = await selfQueryRetriever.getRelevantDocuments( "Awawawa au au au wawawawa hello?" ); - console.log(query4); + // console.log(query4); expect(query4.length).toEqual(0); // this one should return documents since default filter takes over }); diff --git a/langchain/src/retrievers/tests/chain_extract.int.test.ts b/langchain/src/retrievers/tests/chain_extract.int.test.ts index 94ad7d7b3063..8760371d7d8c 100644 --- a/langchain/src/retrievers/tests/chain_extract.int.test.ts +++ b/langchain/src/retrievers/tests/chain_extract.int.test.ts @@ -39,5 +39,5 @@ test("Test LLMChainExtractor", async () => { expect(res.text.length).toBeGreaterThan(0); - console.log({ res }); + // console.log({ res }); }); diff --git a/langchain/src/retrievers/tests/hyde.int.test.ts b/langchain/src/retrievers/tests/hyde.int.test.ts index 9e75b5490547..a2d289da6848 100644 --- a/langchain/src/retrievers/tests/hyde.int.test.ts +++ b/langchain/src/retrievers/tests/hyde.int.test.ts @@ -28,7 +28,7 @@ test("Hyde retriever", async () => { ); expect(results.length).toBe(1); - console.log(results); + // console.log(results); }); test("Hyde retriever with default prompt template", async () => { @@ -56,5 +56,5 @@ test("Hyde retriever with default prompt template", async () => { ); expect(results.length).toBe(1); - console.log(results); + // console.log(results); }); diff --git a/langchain/src/retrievers/tests/matryoshka_retriever.int.test.ts b/langchain/src/retrievers/tests/matryoshka_retriever.int.test.ts index c00446ffadc1..7fa7a8231467 100644 --- a/langchain/src/retrievers/tests/matryoshka_retriever.int.test.ts +++ b/langchain/src/retrievers/tests/matryoshka_retriever.int.test.ts @@ -66,14 +66,14 @@ test("MatryoshkaRetriever can retrieve", async () => { await retriever.addDocuments(allDocs); const query = "What is LangChain?"; - console.log("Querying documents"); + // console.log("Querying documents"); const results = await retriever.getRelevantDocuments(query); const retrieverResultContents = new Set( results.map((doc) => doc.pageContent) ); - console.log([...retrieverResultContents]); + // console.log([...retrieverResultContents]); expect(results.length).toBe(5); expect(retrieverResultContents).toEqual(new Set(relevantDocContents)); }); diff --git a/langchain/src/retrievers/tests/parent_document.int.test.ts b/langchain/src/retrievers/tests/parent_document.int.test.ts index 005484a4dda2..97f698bf9ab1 100644 --- a/langchain/src/retrievers/tests/parent_document.int.test.ts +++ b/langchain/src/retrievers/tests/parent_document.int.test.ts @@ -50,11 +50,13 @@ test("Should return a part of a document if a parent splitter is passed", async await retriever.addDocuments(docs); const query = "justice breyer"; const retrievedDocs = await retriever.getRelevantDocuments(query); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const vectorstoreRetreivedDocs = await vectorstore.similaritySearch( "justice breyer" ); - console.log(vectorstoreRetreivedDocs, vectorstoreRetreivedDocs.length); - console.log(retrievedDocs); + // console.log(vectorstoreRetreivedDocs, vectorstoreRetreivedDocs.length); + // console.log(retrievedDocs); expect(retrievedDocs.length).toBeGreaterThan(1); expect(retrievedDocs[0].pageContent.length).toBeGreaterThan(100); }); @@ -101,11 +103,13 @@ test("Should return a part of a document if a parent splitter is passed", async await retriever.addDocuments(docs); const query = "justice breyer"; const retrievedDocs = await retriever.getRelevantDocuments(query); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const vectorstoreRetreivedDocs = await vectorstore.similaritySearch( "justice breyer" ); - console.log(vectorstoreRetreivedDocs, vectorstoreRetreivedDocs.length); - console.log(retrievedDocs); + // console.log(vectorstoreRetreivedDocs, vectorstoreRetreivedDocs.length); + // console.log(retrievedDocs); expect(retrievedDocs.length).toBeGreaterThan(1); expect(retrievedDocs[0].pageContent.length).toBeGreaterThan(100); }); @@ -139,10 +143,12 @@ test("Should use a custom retriever to retrieve one doc", async () => { await retriever.addDocuments(docs); const query = "justice breyer"; const retrievedDocs = await retriever.getRelevantDocuments(query); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const vectorstoreRetreivedDocs = await vectorstore.similaritySearch( "justice breyer" ); - console.log(vectorstoreRetreivedDocs, vectorstoreRetreivedDocs.length); - console.log(retrievedDocs); + // console.log(vectorstoreRetreivedDocs, vectorstoreRetreivedDocs.length); + // console.log(retrievedDocs); expect(retrievedDocs).toHaveLength(1); }); diff --git a/langchain/src/retrievers/tests/score_threshold.int.test.ts b/langchain/src/retrievers/tests/score_threshold.int.test.ts index f7fa4b499508..9124f76dc81e 100644 --- a/langchain/src/retrievers/tests/score_threshold.int.test.ts +++ b/langchain/src/retrievers/tests/score_threshold.int.test.ts @@ -43,7 +43,7 @@ test("ConversationalRetrievalQAChain.fromLLM should use its vector store recursi question: "Buildings are made out of what?", }); - console.log("response:", res); + // console.log("response:", res); expect(res).toEqual( expect.objectContaining({ diff --git a/langchain/src/retrievers/tests/time_weighted.test.ts b/langchain/src/retrievers/tests/time_weighted.test.ts index 16f7db975544..c184d19d16f1 100644 --- a/langchain/src/retrievers/tests/time_weighted.test.ts +++ b/langchain/src/retrievers/tests/time_weighted.test.ts @@ -331,7 +331,7 @@ describe("Test getRelevantDocuments", () => { }, }, ]; - console.log(resultsDocs); + // console.log(resultsDocs); expect(resultsDocs).toStrictEqual(expected); }); }); diff --git a/langchain/src/smith/tests/run_on_dataset.int.test.ts b/langchain/src/smith/tests/run_on_dataset.int.test.ts index 54f8079fbfea..6dfd562d3770 100644 --- a/langchain/src/smith/tests/run_on_dataset.int.test.ts +++ b/langchain/src/smith/tests/run_on_dataset.int.test.ts @@ -5,7 +5,6 @@ import OpenAI from "openai"; import wiki from "wikipedia"; import { Client, Dataset, RunTree, RunTreeConfig } from "langsmith"; -import { runOnDataset } from "../runner_utils.js"; import { DynamicRunEvaluatorParams, RunEvalConfig } from "../config.js"; const oaiClient = new OpenAI(); @@ -43,7 +42,7 @@ test(`Chat model dataset`, async () => { await childRun.postRun(); return chatCompletion.choices[0].message.content ?? ""; } catch (error: any) { - console.error("Error generating wiki search query:", error); + // console.error("Error generating wiki search query:", error); childRun.end({ error: error.toString() }); await childRun.postRun(); throw error; @@ -93,7 +92,7 @@ test(`Chat model dataset`, async () => { await childRun.postRun(); return finalResults; } catch (error: any) { - console.error("Error in retrieval:", error); + // console.error("Error in retrieval:", error); childRun.end({ error: error.toString() }); await childRun.postRun(); throw error; @@ -131,7 +130,7 @@ test(`Chat model dataset`, async () => { await childRun.postRun(); return chatCompletion.choices[0].message.content ?? ""; } catch (error: any) { - console.error("Error generating answer:", error); + // console.error("Error generating answer:", error); childRun.end({ error: error.toString() }); await childRun.postRun(); throw error; @@ -158,7 +157,7 @@ test(`Chat model dataset`, async () => { await parentRun.postRun(); return answer; } catch (error: any) { - console.error("Error running RAG Pipeline:", error); + // console.error("Error running RAG Pipeline:", error); parentRun.end({ error: error.toString() }); await parentRun.postRun(); throw error; @@ -211,6 +210,8 @@ test(`Chat model dataset`, async () => { }; }; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const evaluation: RunEvalConfig = { // The 'evaluators' are loaded from LangChain's evaluation // library. @@ -237,6 +238,8 @@ test(`Chat model dataset`, async () => { customEvaluators: [unsure], }; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const wrappedRagPipeline = async ({ question, }: { @@ -245,11 +248,11 @@ test(`Chat model dataset`, async () => { return ragPipeline(question); }; - console.log( - await runOnDataset(wrappedRagPipeline, datasetName, { - evaluationConfig: evaluation, - }) - ); + // console.log( + // await runOnDataset(wrappedRagPipeline, datasetName, { + // evaluationConfig: evaluation, + // }) + // ); }); test("Thrown errors should not interrupt dataset run", async () => { @@ -292,16 +295,20 @@ test("Thrown errors should not interrupt dataset run", async () => { // An illustrative custom evaluator example const dummy = async (_: DynamicRunEvaluatorParams) => { - console.log("RUNNING EVAL"); + // console.log("RUNNING EVAL"); throw new Error("Expected error"); }; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const evaluation: RunEvalConfig = { // Custom evaluators can be user-defined RunEvaluator's // or a compatible function customEvaluators: [dummy], }; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const wrappedRagPipeline = async ({ question, }: { @@ -310,10 +317,10 @@ test("Thrown errors should not interrupt dataset run", async () => { return ragPipeline(question); }; - console.log( - await runOnDataset(wrappedRagPipeline, datasetName, { - evaluationConfig: evaluation, - maxConcurrency: 1, - }) - ); + // console.log( + // await runOnDataset(wrappedRagPipeline, datasetName, { + // evaluationConfig: evaluation, + // maxConcurrency: 1, + // }) + // ); }); diff --git a/langchain/src/storage/tests/file_system.test.ts b/langchain/src/storage/tests/file_system.test.ts index a51eba168672..bb9bd3fffd9c 100644 --- a/langchain/src/storage/tests/file_system.test.ts +++ b/langchain/src/storage/tests/file_system.test.ts @@ -58,7 +58,7 @@ describe("LocalFileStore", () => { for await (const key of store.yieldKeys(prefix)) { yieldedKeys.push(key); } - console.log("Yielded keys:", yieldedKeys); + // console.log("Yielded keys:", yieldedKeys); expect(yieldedKeys.sort()).toEqual(keysWithPrefix.sort()); // afterEach won't automatically delete these since we're applying a prefix. await store.mdelete(keysWithPrefix); @@ -77,7 +77,7 @@ describe("LocalFileStore", () => { const retrievedValues = await store.mget([keys[0], keys[1]]); const everyValueDefined = retrievedValues.every((v) => v !== undefined); expect(everyValueDefined).toBe(true); - console.log("retrievedValues", retrievedValues); + // console.log("retrievedValues", retrievedValues); expect( retrievedValues.map((v) => { if (!v) { diff --git a/langchain/src/tests/text_splitter.test.ts b/langchain/src/tests/text_splitter.test.ts index 104efd090320..772509a7269e 100644 --- a/langchain/src/tests/text_splitter.test.ts +++ b/langchain/src/tests/text_splitter.test.ts @@ -76,8 +76,10 @@ describe("Character text splitter", () => { test("Test invalid arguments.", () => { expect(() => { + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = new CharacterTextSplitter({ chunkSize: 2, chunkOverlap: 4 }); - console.log(res); + // console.log(res); }).toThrow(); }); diff --git a/libs/langchain-anthropic/src/experimental/tests/tool_calling.int.test.ts b/libs/langchain-anthropic/src/experimental/tests/tool_calling.int.test.ts index f5c863c60a88..b875afe065b6 100644 --- a/libs/langchain-anthropic/src/experimental/tests/tool_calling.int.test.ts +++ b/libs/langchain-anthropic/src/experimental/tests/tool_calling.int.test.ts @@ -13,8 +13,10 @@ test.skip("Test ChatAnthropicTools", async () => { maxRetries: 0, }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); }); test.skip("Test ChatAnthropicTools streaming", async () => { @@ -26,7 +28,7 @@ test.skip("Test ChatAnthropicTools streaming", async () => { const stream = await chat.stream([message]); const chunks: BaseMessageChunk[] = []; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); @@ -64,7 +66,7 @@ test.skip("Test ChatAnthropicTools with tools", async () => { }); const message = new HumanMessage("What is the weather in San Francisco?"); const res = await chat.invoke([message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls).toBeDefined(); expect(res.additional_kwargs.tool_calls?.[0].function.name).toEqual( "get_current_weather" @@ -110,7 +112,7 @@ test.skip("Test ChatAnthropicTools with a forced function call", async () => { "Extract the desired information from the following passage:\n\nthis is really cool" ); const res = await chat.invoke([message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls).toBeDefined(); expect(res.additional_kwargs.tool_calls?.[0]?.function.name).toEqual( "extract_data" @@ -153,7 +155,7 @@ test.skip("ChatAnthropicTools with Zod schema", async () => { "Alex is 5 feet tall. Claudia is 1 foot taller than Alex and jumps higher than him. Claudia is a brunette and Alex is blonde." ); const res = await chat.invoke([message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls).toBeDefined(); expect(res.additional_kwargs.tool_calls?.[0]?.function.name).toEqual( "information_extraction" @@ -196,12 +198,12 @@ test.skip("ChatAnthropicTools with parallel tool calling", async () => { }, }, }); - console.log(zodToJsonSchema(schema)); + // console.log(zodToJsonSchema(schema)); const message = new HumanMessage( "Alex is 5 feet tall. Claudia is 1 foot taller than Alex and jumps higher than him. Claudia is a brunette and Alex is blonde." ); const res = await chat.invoke([message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls).toBeDefined(); expect( res.additional_kwargs.tool_calls?.map((toolCall) => @@ -231,7 +233,7 @@ test.skip("Test ChatAnthropic withStructuredOutput", async () => { ); const message = new HumanMessage("Alex is 5 feet tall. Alex is blonde."); const res = await runnable.invoke([message]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); expect(res).toEqual({ name: "Alex", height: 5, hairColor: "blonde" }); }); @@ -252,7 +254,7 @@ test.skip("Test ChatAnthropic withStructuredOutput on a single array item", asyn ); const message = new HumanMessage("Alex is 5 feet tall. Alex is blonde."); const res = await runnable.invoke([message]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); expect(res).toEqual({ people: [{ hairColor: "blonde", height: 5, name: "Alex" }], }); @@ -296,7 +298,7 @@ test.skip("Test ChatAnthropic withStructuredOutput on a single array item", asyn email: "From: Erick. The email is about the new project. The tone is positive. The action items are to send the report and to schedule a meeting.", }); - console.log(JSON.stringify(response, null, 2)); + // console.log(JSON.stringify(response, null, 2)); expect(response).toEqual({ sender: "Erick", action_items: [expect.any(String), expect.any(String)], @@ -319,7 +321,7 @@ test.skip("Test ChatAnthropicTools", async () => { const res = await structured.invoke( "What are the first five natural numbers?" ); - console.log(res); + // console.log(res); expect(res).toEqual({ nested: [1, 2, 3, 4, 5], }); diff --git a/libs/langchain-anthropic/src/tests/chat_models.int.test.ts b/libs/langchain-anthropic/src/tests/chat_models.int.test.ts index 73453bdf24cc..d83a06e8fb28 100644 --- a/libs/langchain-anthropic/src/tests/chat_models.int.test.ts +++ b/libs/langchain-anthropic/src/tests/chat_models.int.test.ts @@ -21,7 +21,7 @@ test("Test ChatAnthropic", async () => { }); const message = new HumanMessage("Hello!"); const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); expect(res.response_metadata.usage).toBeDefined(); }); @@ -35,11 +35,13 @@ test("Test ChatAnthropic Generate", async () => { expect(res.generations.length).toBe(2); for (const generation of res.generations) { expect(generation.length).toBe(1); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for (const message of generation) { - console.log(message.text); + // console.log(message.text); } } - console.log({ res }); + // console.log({ res }); }); test.skip("Test ChatAnthropic Generate w/ ClientOptions", async () => { @@ -57,11 +59,13 @@ test.skip("Test ChatAnthropic Generate w/ ClientOptions", async () => { expect(res.generations.length).toBe(2); for (const generation of res.generations) { expect(generation.length).toBe(1); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for (const message of generation) { - console.log(message.text); + // console.log(message.text); } } - console.log({ res }); + // console.log({ res }); }); test("Test ChatAnthropic Generate with a signal in call options", async () => { @@ -90,11 +94,13 @@ test("Test ChatAnthropic tokenUsage with a batch", async () => { maxRetries: 0, modelName: "claude-3-sonnet-20240229", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate([ [new HumanMessage(`Hello!`)], [new HumanMessage(`Hi!`)], ]); - console.log({ res }); + // console.log({ res }); }); test("Test ChatAnthropic in streaming mode", async () => { @@ -114,7 +120,7 @@ test("Test ChatAnthropic in streaming mode", async () => { }); const message = new HumanMessage("Hello!"); const res = await model.invoke([message]); - console.log({ res }); + // console.log({ res }); expect(nrNewTokens > 0).toBe(true); expect(res.content).toBe(streamedCompletion); @@ -149,7 +155,7 @@ test("Test ChatAnthropic in streaming mode with a signal", async () => { return res; }).rejects.toThrow(); - console.log({ nrNewTokens, streamedCompletion }); + // console.log({ nrNewTokens, streamedCompletion }); }, 5000); test.skip("Test ChatAnthropic prompt value", async () => { @@ -161,11 +167,13 @@ test.skip("Test ChatAnthropic prompt value", async () => { const res = await chat.generatePrompt([new ChatPromptValue([message])]); expect(res.generations.length).toBe(1); for (const generation of res.generations) { + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for (const g of generation) { - console.log(g.text); + // console.log(g.text); } } - console.log({ res }); + // console.log({ res }); }); test.skip("ChatAnthropic, docs, prompt templates", async () => { @@ -184,6 +192,8 @@ test.skip("ChatAnthropic, docs, prompt templates", async () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ input_language: "English", @@ -192,7 +202,7 @@ test.skip("ChatAnthropic, docs, prompt templates", async () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test.skip("ChatAnthropic, longer chain of messages", async () => { @@ -208,13 +218,15 @@ test.skip("ChatAnthropic, longer chain of messages", async () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "What did I just say my name was?", }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test.skip("ChatAnthropic, Anthropic apiUrl set manually via constructor", async () => { @@ -226,8 +238,10 @@ test.skip("ChatAnthropic, Anthropic apiUrl set manually via constructor", async anthropicApiUrl, }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.call([message]); - console.log({ res }); + // console.log({ res }); }); test("Test ChatAnthropic stream method", async () => { @@ -257,8 +271,10 @@ test("Test ChatAnthropic stream method with abort", async () => { signal: AbortSignal.timeout(1000), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -273,8 +289,10 @@ test("Test ChatAnthropic stream method with early break", async () => { "How is your day going? Be extremely verbose." ); let i = 0; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); i += 1; if (i > 10) { break; @@ -294,8 +312,10 @@ test("Test ChatAnthropic headers passed through", async () => { }, }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test("Test ChatAnthropic multimodal", async () => { @@ -303,6 +323,8 @@ test("Test ChatAnthropic multimodal", async () => { modelName: "claude-3-sonnet-20240229", maxRetries: 0, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([ new HumanMessage({ content: [ @@ -316,7 +338,7 @@ test("Test ChatAnthropic multimodal", async () => { ], }), ]); - console.log(res); + // console.log(res); }); test("Stream tokens", async () => { @@ -335,7 +357,7 @@ test("Stream tokens", async () => { res = res.concat(chunk); } } - console.log(res); + // console.log(res); expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; diff --git a/libs/langchain-anthropic/src/tests/chat_models.test.ts b/libs/langchain-anthropic/src/tests/chat_models.test.ts index 537c0c8e4fef..e6f77731241a 100644 --- a/libs/langchain-anthropic/src/tests/chat_models.test.ts +++ b/libs/langchain-anthropic/src/tests/chat_models.test.ts @@ -95,6 +95,8 @@ test("withStructuredOutput with proper output", async () => { name: "Extractor", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await modelWithStructuredOutput.invoke(` Enumeration of Kernel Modules via Proc Prompt for Credentials with OSASCRIPT @@ -103,5 +105,5 @@ test("withStructuredOutput with proper output", async () => { Suspicious Automator Workflows Execution `); - console.log(result); + // console.log(result); }); diff --git a/libs/langchain-aws/src/retrievers/tests/kendra.int.test.ts b/libs/langchain-aws/src/retrievers/tests/kendra.int.test.ts index 0f97e921f79c..f2c86f9645e1 100644 --- a/libs/langchain-aws/src/retrievers/tests/kendra.int.test.ts +++ b/libs/langchain-aws/src/retrievers/tests/kendra.int.test.ts @@ -28,5 +28,5 @@ test("AmazonKendraRetriever", async () => { expect(docs.length).toBeGreaterThan(0); - console.log(docs); + // console.log(docs); }); diff --git a/libs/langchain-aws/src/tests/chat_models.int.test.ts b/libs/langchain-aws/src/tests/chat_models.int.test.ts index 78652f848f9c..8b119461814a 100644 --- a/libs/langchain-aws/src/tests/chat_models.int.test.ts +++ b/libs/langchain-aws/src/tests/chat_models.int.test.ts @@ -32,7 +32,7 @@ test("Test ChatBedrockConverse can invoke", async () => { maxTokens: 5, }); const res = await model.invoke([new HumanMessage("Print hello world")]); - console.log({ res }); + // console.log({ res }); expect(typeof res.content).toBe("string"); expect(res.content.length).toBeGreaterThan(1); expect(res.content).not.toContain("world"); @@ -48,8 +48,10 @@ test("Test ChatBedrockConverse stream method", async () => { for await (const chunk of stream) { chunks.push(chunk); } + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const finalMessage = chunks.map((c) => c.content).join(""); - console.log(finalMessage); + // console.log(finalMessage); expect(chunks.length).toBeGreaterThan(1); }); @@ -78,7 +80,7 @@ test("Test ChatBedrockConverse in streaming mode", async () => { }); const message = new HumanMessage("Hello!"); const result = await model.invoke([message]); - console.log(result); + // console.log(result); expect(nrNewTokens > 0).toBe(true); expect(result.content).toBe(streamedCompletion); @@ -96,7 +98,7 @@ test("Test ChatBedrockConverse with stop", async () => { const res = await model.invoke([new HumanMessage("Print hello world")], { stop: ["world"], }); - console.log({ res }); + // console.log({ res }); expect(typeof res.content).toBe("string"); expect(res.content.length).toBeGreaterThan(1); expect(res.content).not.toContain("world"); @@ -111,8 +113,10 @@ test("Test ChatBedrockConverse stream method with early break", async () => { "How is your day going? Be extremely verbose." ); let i = 0; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); i += 1; if (i > 10) { break; @@ -134,9 +138,9 @@ test("Streaming tokens can be found in usage_metadata field", async () => { finalResult = chunk; } } - console.log({ - usage_metadata: finalResult?.usage_metadata, - }); + // console.log({ + // usage_metadata: finalResult?.usage_metadata, + // }); expect(finalResult).toBeTruthy(); expect(finalResult?.usage_metadata).toBeTruthy(); expect(finalResult?.usage_metadata?.input_tokens).toBeGreaterThan(0); @@ -150,9 +154,9 @@ test("populates ID field on AIMessage", async () => { maxTokens: 5, }); const response = await model.invoke("Hell"); - console.log({ - invokeId: response.id, - }); + // console.log({ + // invokeId: response.id, + // }); expect(response.id?.length).toBeGreaterThan(1); /** @@ -181,10 +185,9 @@ test("Test ChatBedrockConverse can invoke tools", async () => { }); const tools = [ tool( - (input) => { - console.log("tool", input); - return "Hello"; - }, + (_input) => + // console.log("tool", input); + "Hello", { name: "get_weather", description: "Get the weather", @@ -201,7 +204,7 @@ test("Test ChatBedrockConverse can invoke tools", async () => { expect(result.tool_calls).toBeDefined(); expect(result.tool_calls).toHaveLength(1); - console.log("result.tool_calls?.[0]", result.tool_calls?.[0]); + // console.log("result.tool_calls?.[0]", result.tool_calls?.[0]); expect(result.tool_calls?.[0].name).toBe("get_weather"); expect(result.tool_calls?.[0].id).toBeDefined(); }); @@ -213,10 +216,9 @@ test("Test ChatBedrockConverse can invoke tools with non anthropic model", async }); const tools = [ tool( - (input) => { - console.log("tool", input); - return "Hello"; - }, + (_input) => + // console.log("tool", input); + "Hello", { name: "get_weather", description: "Get the weather", @@ -233,7 +235,7 @@ test("Test ChatBedrockConverse can invoke tools with non anthropic model", async expect(result.tool_calls).toBeDefined(); expect(result.tool_calls).toHaveLength(1); - console.log("result.tool_calls?.[0]", result.tool_calls?.[0]); + // console.log("result.tool_calls?.[0]", result.tool_calls?.[0]); expect(result.tool_calls?.[0].name).toBe("get_weather"); expect(result.tool_calls?.[0].id).toBeDefined(); }); @@ -244,10 +246,9 @@ test("Test ChatBedrockConverse can stream tools", async () => { }); const tools = [ tool( - (input) => { - console.log("tool", input); - return "Hello"; - }, + (_input) => + // console.log("tool", input); + "Hello", { name: "get_weather", description: "Get the weather", @@ -272,7 +273,7 @@ test("Test ChatBedrockConverse can stream tools", async () => { } expect(finalChunk?.tool_calls).toBeDefined(); expect(finalChunk?.tool_calls).toHaveLength(1); - console.log("result.tool_calls?.[0]", finalChunk?.tool_calls?.[0]); + // console.log("result.tool_calls?.[0]", finalChunk?.tool_calls?.[0]); expect(finalChunk?.tool_calls?.[0].name).toBe("get_weather"); expect(finalChunk?.tool_calls?.[0].id).toBeDefined(); }); @@ -283,10 +284,9 @@ test("Test ChatBedrockConverse tool_choice works", async () => { }); const tools = [ tool( - (input) => { - console.log("tool", input); - return "Hello"; - }, + (_input) => + // console.log("tool", input); + "Hello", { name: "get_weather", description: "Get the weather", @@ -296,10 +296,9 @@ test("Test ChatBedrockConverse tool_choice works", async () => { } ), tool( - (input) => { - console.log("tool", input); - return "Hello"; - }, + (_input) => + // console.log("tool", input); + "Hello", { name: "calculator", description: "Sum two numbers", @@ -321,7 +320,7 @@ test("Test ChatBedrockConverse tool_choice works", async () => { expect(result.tool_calls).toBeDefined(); expect(result.tool_calls).toHaveLength(1); - console.log("result.tool_calls?.[0]", result.tool_calls?.[0]); + // console.log("result.tool_calls?.[0]", result.tool_calls?.[0]); expect(result.tool_calls?.[0].name).toBe("get_weather"); expect(result.tool_calls?.[0].id).toBeDefined(); }); diff --git a/libs/langchain-azure-openai/src/tests/chat_models-extended.int.test.ts b/libs/langchain-azure-openai/src/tests/chat_models-extended.int.test.ts index adea7e1cae34..5db9da565c9c 100644 --- a/libs/langchain-azure-openai/src/tests/chat_models-extended.int.test.ts +++ b/libs/langchain-azure-openai/src/tests/chat_models-extended.int.test.ts @@ -13,8 +13,10 @@ test("Test ChatOpenAI JSON mode", async () => { }, }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([["system", "Only return JSON"], message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); }); test("Test ChatOpenAI seed", async () => { @@ -27,7 +29,7 @@ test("Test ChatOpenAI seed", async () => { }); const message = new HumanMessage("Say something random!"); const res = await chat.invoke([message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); const res2 = await chat.invoke([message]); expect(res).toEqual(res2); }); @@ -62,7 +64,7 @@ test("Test ChatOpenAI tool calling", async () => { const res = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], ]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); }); @@ -109,7 +111,7 @@ test("Test ChatOpenAI tool calling with ToolMessages", async () => { const res = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], ]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const toolMessages = res.additional_kwargs.tool_calls!.map( @@ -122,12 +124,14 @@ test("Test ChatOpenAI tool calling with ToolMessages", async () => { ), }) ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const finalResponse = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], res, ...toolMessages, ]); - console.log(finalResponse); + // console.log(finalResponse); }); test("Test ChatOpenAI tool calling with streaming", async () => { @@ -163,7 +167,7 @@ test("Test ChatOpenAI tool calling with streaming", async () => { let finalChunk; const chunks = []; for await (const chunk of stream) { - console.log(chunk.additional_kwargs.tool_calls); + // console.log(chunk.additional_kwargs.tool_calls); chunks.push(chunk); if (!finalChunk) { finalChunk = chunk; @@ -172,7 +176,7 @@ test("Test ChatOpenAI tool calling with streaming", async () => { } } expect(chunks.length).toBeGreaterThan(1); - console.log(finalChunk?.additional_kwargs.tool_calls); + // console.log(finalChunk?.additional_kwargs.tool_calls); expect(finalChunk?.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); }); @@ -193,10 +197,10 @@ test("ChatOpenAI in JSON mode can cache generations", async () => { "Respond with a JSON object containing arbitrary fields." ); const res = await chat.invoke([message]); - console.log(res); + // console.log(res); const res2 = await chat.invoke([message]); - console.log(res2); + // console.log(res2); expect(res).toEqual(res2); diff --git a/libs/langchain-azure-openai/src/tests/chat_models-vision.int.test.ts b/libs/langchain-azure-openai/src/tests/chat_models-vision.int.test.ts index 909e281184c6..f7f222903a5c 100644 --- a/libs/langchain-azure-openai/src/tests/chat_models-vision.int.test.ts +++ b/libs/langchain-azure-openai/src/tests/chat_models-vision.int.test.ts @@ -27,8 +27,10 @@ test("Test ChatOpenAI with a file", async () => { }, ], }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test("Test ChatOpenAI with a URL", async () => { @@ -49,6 +51,8 @@ test("Test ChatOpenAI with a URL", async () => { }, ], }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); diff --git a/libs/langchain-azure-openai/src/tests/chat_models.int.test.ts b/libs/langchain-azure-openai/src/tests/chat_models.int.test.ts index 1c862a3cf879..4fae73431f88 100644 --- a/libs/langchain-azure-openai/src/tests/chat_models.int.test.ts +++ b/libs/langchain-azure-openai/src/tests/chat_models.int.test.ts @@ -26,8 +26,10 @@ test("Test ChatOpenAI", async () => { maxTokens: 10, }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.call([message]); - console.log({ res }); + // console.log({ res }); }); test("Test ChatOpenAI with SystemChatMessage", async () => { @@ -37,8 +39,10 @@ test("Test ChatOpenAI with SystemChatMessage", async () => { }); const system_message = new SystemMessage("You are to chat with a user."); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.call([system_message, message]); - console.log({ res }); + // console.log({ res }); }); test("Test ChatOpenAI Generate", async () => { @@ -53,11 +57,11 @@ test("Test ChatOpenAI Generate", async () => { for (const generation of res.generations) { expect(generation.length).toBe(2); for (const message of generation) { - console.log(message.text); + // console.log(message.text); expect(typeof message.text).toBe("string"); } } - console.log({ res }); + // console.log({ res }); }); test("Test ChatOpenAI Generate throws when one of the calls fails", async () => { @@ -86,14 +90,16 @@ test("Test ChatOpenAI tokenUsage", async () => { maxTokens: 10, callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { - console.log(output); + // console.log(output); tokenUsage = output.llmOutput?.tokenUsage; }, }), }); const message = new HumanMessage("Hello"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke([message]); - console.log({ res }); + // console.log({ res }); expect(tokenUsage.promptTokens).toBeGreaterThan(0); }); @@ -114,11 +120,13 @@ test("Test ChatOpenAI tokenUsage with a batch", async () => { }, }), }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate([ [new HumanMessage("Hello")], [new HumanMessage("Hi")], ]); - console.log(res); + // console.log(res); expect(tokenUsage.promptTokens).toBeGreaterThan(0); }); @@ -189,11 +197,13 @@ test("Test ChatOpenAI prompt value", async () => { expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(2); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for (const g of generation) { - console.log(g.text); + // console.log(g.text); } } - console.log({ res }); + // console.log({ res }); }); test("OpenAI Chat, docs, prompt templates", async () => { @@ -208,6 +218,8 @@ test("OpenAI Chat, docs, prompt templates", async () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ input_language: "English", @@ -216,24 +228,28 @@ test("OpenAI Chat, docs, prompt templates", async () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }, 5000); test("Test OpenAI with stop", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.call( [new HumanMessage("Print hello world")], ["world"] ); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with stop in object", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke([new HumanMessage("Print hello world")], { stop: ["world"], }); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with timeout in call options", async () => { @@ -291,8 +307,10 @@ test("Test OpenAI with specific roles in ChatMessage", async () => { "system" ); const user_message = new ChatMessage("Hello!", "user"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.call([system_message, user_message]); - console.log({ res }); + // console.log({ res }); }); test("Test ChatOpenAI stream method", async () => { @@ -303,7 +321,7 @@ test("Test ChatOpenAI stream method", async () => { const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); @@ -321,8 +339,10 @@ test("Test ChatOpenAI stream method with abort", async () => { signal: AbortSignal.timeout(500), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -336,8 +356,10 @@ test("Test ChatOpenAI stream method with early break", async () => { "How is your day going? Be extremely verbose." ); let i = 0; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); i += 1; if (i > 10) { break; @@ -355,8 +377,10 @@ test("Test ChatOpenAI stream method, timeout error thrown from SDK", async () => const stream = await model.stream( "How is your day going? Be extremely verbose." ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -417,10 +441,10 @@ test("Function calling with streaming", async () => { expect(finalResult?.additional_kwargs?.function_call?.name).toBe( "get_current_weather" ); - console.log( - JSON.parse(finalResult?.additional_kwargs?.function_call?.arguments ?? "") - .location - ); + // console.log( + // JSON.parse(finalResult?.additional_kwargs?.function_call?.arguments ?? "") + // .location + // ); }); test("ChatOpenAI can cache generations", async () => { @@ -670,10 +694,10 @@ test("Test ChatOpenAI token usage reporting for streaming function calls", async handleLLMEnd: async (output) => { streamingTokenUsed = output.llmOutput?.estimatedTokenUsage?.totalTokens; - console.log("streaming usage", output.llmOutput?.estimatedTokenUsage); + // console.log("streaming usage", output.llmOutput?.estimatedTokenUsage); }, - handleLLMError: async (err) => { - console.error(err); + handleLLMError: async (_err) => { + // console.error(err); }, }, ], @@ -693,10 +717,10 @@ test("Test ChatOpenAI token usage reporting for streaming function calls", async { handleLLMEnd: async (output) => { nonStreamingTokenUsed = output.llmOutput?.tokenUsage?.totalTokens; - console.log("non-streaming usage", output.llmOutput?.tokenUsage); + // console.log("non-streaming usage", output.llmOutput?.tokenUsage); }, - handleLLMError: async (err) => { - console.error(err); + handleLLMError: async (_err) => { + // console.error(err); }, }, ], @@ -746,10 +770,10 @@ test("Test ChatOpenAI token usage reporting for streaming calls", async () => { handleLLMEnd: async (output) => { streamingTokenUsed = output.llmOutput?.estimatedTokenUsage?.totalTokens; - console.log("streaming usage", output.llmOutput?.estimatedTokenUsage); + // console.log("streaming usage", output.llmOutput?.estimatedTokenUsage); }, - handleLLMError: async (err) => { - console.error(err); + handleLLMError: async (_err) => { + // console.error(err); }, }, ], @@ -766,10 +790,10 @@ test("Test ChatOpenAI token usage reporting for streaming calls", async () => { { handleLLMEnd: async (output) => { nonStreamingTokenUsed = output.llmOutput?.tokenUsage?.totalTokens; - console.log("non-streaming usage", output.llmOutput?.estimated); + // console.log("non-streaming usage", output.llmOutput?.estimated); }, - handleLLMError: async (err) => { - console.error(err); + handleLLMError: async (_err) => { + // console.error(err); }, }, ], @@ -803,8 +827,10 @@ test("Test Azure ChatOpenAI with key credentials ", async () => { azureOpenAIApiDeploymentName: getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME") ?? "", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); }); test("Test ChatOpenAI with OpenAI API key credentials", async () => { @@ -819,6 +845,8 @@ test("Test ChatOpenAI with OpenAI API key credentials", async () => { azureOpenAIApiDeploymentName: "", }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([["system", "Say hi"], message]); - console.log(res); + // console.log(res); }); diff --git a/libs/langchain-azure-openai/src/tests/llms.int.test.ts b/libs/langchain-azure-openai/src/tests/llms.int.test.ts index 91ca1ca9da52..6004e0d1e2b7 100644 --- a/libs/langchain-azure-openai/src/tests/llms.int.test.ts +++ b/libs/langchain-azure-openai/src/tests/llms.int.test.ts @@ -14,8 +14,10 @@ test("Test OpenAI", async () => { maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with stop", async () => { @@ -23,8 +25,10 @@ test("Test OpenAI with stop", async () => { maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with stop in object", async () => { @@ -32,8 +36,10 @@ test("Test OpenAI with stop in object", async () => { maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world", { stop: ["world"] }); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with timeout in call options", async () => { @@ -100,11 +106,13 @@ test("Test OpenAI with concurrency == 1", async () => { modelName: "gpt-3.5-turbo-instruct", maxConcurrency: 1, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await Promise.all([ model.invoke("Print hello world"), model.invoke("Print hello world"), ]); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with maxTokens -1", async () => { @@ -112,15 +120,17 @@ test("Test OpenAI with maxTokens -1", async () => { maxTokens: -1, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with instruct model returns OpenAI", async () => { const model = new AzureOpenAI({ modelName: "gpt-3.5-turbo-instruct" }); expect(model).toBeInstanceOf(AzureOpenAI); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(typeof res).toBe("string"); }); @@ -130,7 +140,7 @@ test("Test OpenAI with versioned instruct model returns OpenAI", async () => { }); expect(model).toBeInstanceOf(AzureOpenAI); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(typeof res).toBe("string"); }); @@ -150,8 +160,10 @@ test("Test ChatOpenAI tokenUsage", async () => { }, }), }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Hello"); - console.log({ res }); + // console.log({ res }); expect(tokenUsage.promptTokens).toBe(1); }); @@ -172,7 +184,7 @@ test("Test OpenAI in streaming mode", async () => { }), }); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(nrNewTokens > 0).toBe(true); expect(res).toBe(streamedCompletion); @@ -198,10 +210,10 @@ test("Test OpenAI in streaming mode with multiple prompts", async () => { }), }); const res = await model.generate(["Print hello world", "print hello sea"]); - console.log( - res.generations, - res.generations.map((g) => g[0].generationInfo) - ); + // console.log( + // res.generations, + // res.generations.map((g) => g[0].generationInfo) + // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); @@ -227,10 +239,10 @@ test("Test OpenAIChat in streaming mode with multiple prompts", async () => { }), }); const res = await model.generate(["Print hello world", "print hello sea"]); - console.log( - res.generations, - res.generations.map((g) => g[0].generationInfo) - ); + // console.log( + // res.generations, + // res.generations.map((g) => g[0].generationInfo) + // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); @@ -250,11 +262,13 @@ test("Test OpenAI prompt value", async () => { expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(1); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for (const g of generation) { - console.log(g.text); + // console.log(g.text); } } - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI stream method", async () => { @@ -282,8 +296,10 @@ test("Test OpenAI stream method with abort", async () => { signal: AbortSignal.timeout(1000), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -297,8 +313,10 @@ test("Test OpenAI stream method with early break", async () => { "How is your day going? Be extremely verbose." ); let i = 0; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); i += 1; if (i > 5) { break; @@ -323,8 +341,10 @@ test("Test OpenAI with Token credentials ", async () => { modelName: "gpt-3.5-turbo-instruct", credentials, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); }); test("Test Azure OpenAI with key credentials ", async () => { @@ -337,8 +357,10 @@ test("Test Azure OpenAI with key credentials ", async () => { azureOpenAIApiDeploymentName: getEnvironmentVariable("AZURE_OPENAI_API_DEPLOYMENT_NAME") ?? "", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with OpenAI API key credentials ", async () => { @@ -352,6 +374,8 @@ test("Test OpenAI with OpenAI API key credentials ", async () => { azureOpenAIEndpoint: "", azureOpenAIApiDeploymentName: "", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); }); diff --git a/libs/langchain-baidu-qianfan/src/tests/chat_models.int.test.ts b/libs/langchain-baidu-qianfan/src/tests/chat_models.int.test.ts index 863e1a33afb3..b929a0061af7 100644 --- a/libs/langchain-baidu-qianfan/src/tests/chat_models.int.test.ts +++ b/libs/langchain-baidu-qianfan/src/tests/chat_models.int.test.ts @@ -9,7 +9,7 @@ test("invoke", async () => { }); const message = new HumanMessage("北京天气"); const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); @@ -20,6 +20,6 @@ test("invokeWithStream", async () => { }); const message = new HumanMessage("等额本金和等额本息有什么区别?"); const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); diff --git a/libs/langchain-baidu-qianfan/src/tests/embeddings.int.test.ts b/libs/langchain-baidu-qianfan/src/tests/embeddings.int.test.ts index 8ee42f508521..95c8c6ca0c63 100644 --- a/libs/langchain-baidu-qianfan/src/tests/embeddings.int.test.ts +++ b/libs/langchain-baidu-qianfan/src/tests/embeddings.int.test.ts @@ -4,13 +4,13 @@ import { BaiduQianfanEmbeddings } from "../embeddings.js"; test("embedQuery", async () => { const embeddings = new BaiduQianfanEmbeddings(); const res = await embeddings.embedQuery("Introduce the city Beijing"); - console.log({ res }); + // console.log({ res }); expect(res.length).toBeGreaterThan(10); }); test("embedDocuments", async () => { const embeddings = new BaiduQianfanEmbeddings(); const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); - console.log({ res }); + // console.log({ res }); expect(res.length).toBe(2); }); diff --git a/libs/langchain-cloudflare/src/tests/chat_models.int.test.ts b/libs/langchain-cloudflare/src/tests/chat_models.int.test.ts index 201a515a933f..3e1c85f38883 100644 --- a/libs/langchain-cloudflare/src/tests/chat_models.int.test.ts +++ b/libs/langchain-cloudflare/src/tests/chat_models.int.test.ts @@ -19,15 +19,19 @@ describe("ChatCloudflareWorkersAI", () => { test("call", async () => { const chat = new ChatCloudflareWorkersAI(); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.call([message]); - console.log({ res }); + // console.log({ res }); }); test("generate", async () => { const chat = new ChatCloudflareWorkersAI(); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.generate([[message]]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("generate with streaming true", async () => { @@ -65,11 +69,11 @@ describe("ChatCloudflareWorkersAI", () => { const stream = await chat.stream([message]); const chunks = []; for await (const chunk of stream) { - console.log(chunk.content); + // console.log(chunk.content); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); - console.log(chunks.map((chunk) => chunk.content).join("")); + // console.log(chunks.map((chunk) => chunk.content).join("")); expect( chunks.map((chunk) => chunk.content).join("").length ).toBeGreaterThan(1); @@ -77,8 +81,10 @@ describe("ChatCloudflareWorkersAI", () => { test("custom messages", async () => { const chat = new ChatCloudflareWorkersAI(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.call([new ChatMessage("Hello!", "user")]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("prompt templates", async () => { @@ -94,6 +100,8 @@ describe("ChatCloudflareWorkersAI", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ job: "pirate", @@ -101,7 +109,7 @@ describe("ChatCloudflareWorkersAI", () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test("longer chain of messages", async () => { @@ -113,13 +121,15 @@ describe("ChatCloudflareWorkersAI", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "What did I just say my name was?", }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test.skip("custom base url", async () => { @@ -135,12 +145,14 @@ describe("ChatCloudflareWorkersAI", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "What did I just say my name was?", }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); }); diff --git a/libs/langchain-cloudflare/src/tests/llms.int.test.ts b/libs/langchain-cloudflare/src/tests/llms.int.test.ts index 18c4665cb059..37e8db9398d8 100644 --- a/libs/langchain-cloudflare/src/tests/llms.int.test.ts +++ b/libs/langchain-cloudflare/src/tests/llms.int.test.ts @@ -9,8 +9,10 @@ const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; test("Test CloudflareWorkersAI", async () => { const model = new CloudflareWorkersAI({}); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 ="); - console.log(res); + // console.log(res); }, 50000); test("generate with streaming true", async () => { @@ -28,7 +30,7 @@ test("generate with streaming true", async () => { callbacks: [ { handleLLMNewToken: (token) => { - console.log(token); + // console.log(token); tokens.push(token); }, }, @@ -48,10 +50,10 @@ test("Test CloudflareWorkersAI streaming", async () => { const chunks = []; for await (const chunk of stream) { chunks.push(chunk); - console.log(chunk); + // console.log(chunk); } expect(chunks.length).toBeGreaterThan(1); - console.log(chunks.join("")); + // console.log(chunks.join("")); }, 50000); test.skip("Test custom base url", async () => { @@ -60,6 +62,8 @@ test.skip("Test custom base url", async () => { "CLOUDFLARE_ACCOUNT_ID" )}/lang-chainjs/workers-ai/`, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 ="); - console.log(res); + // console.log(res); }); diff --git a/libs/langchain-cohere/src/tests/chat_models.int.test.ts b/libs/langchain-cohere/src/tests/chat_models.int.test.ts index 857283937850..22c00832c86b 100644 --- a/libs/langchain-cohere/src/tests/chat_models.int.test.ts +++ b/libs/langchain-cohere/src/tests/chat_models.int.test.ts @@ -12,7 +12,7 @@ import { ChatCohere } from "../chat_models.js"; test("ChatCohere can invoke", async () => { const model = new ChatCohere(); const response = await model.invoke([new HumanMessage("Hello world")]); - console.log(response.additional_kwargs); + // console.log(response.additional_kwargs); expect(response.content).toBeTruthy(); expect(response.additional_kwargs).toBeTruthy(); }); @@ -22,7 +22,7 @@ test("ChatCohere can invoke", async () => { test("ChatCohere can count tokens", async () => { const model = new ChatCohere(); const response = await model.generate([[new HumanMessage("Hello world")]]); - console.log(response); + // console.log(response); expect(response.llmOutput?.estimatedTokenUsage).toBeTruthy(); expect( response.llmOutput?.estimatedTokenUsage.completionTokens @@ -44,7 +44,7 @@ test("ChatCohere can stream", async () => { for await (const streamItem of stream) { tokens += streamItem.content; streamIters += 1; - console.log(tokens); + // console.log(tokens); } expect(streamIters).toBeGreaterThan(1); }); @@ -82,7 +82,7 @@ test("Stream token count usage_metadata", async () => { } lastRes = chunk; } - console.log(res); + // console.log(res); expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; @@ -117,7 +117,7 @@ test("streamUsage excludes token usage", async () => { } lastRes = chunk; } - console.log(res); + // console.log(res); expect(res?.usage_metadata).not.toBeDefined(); if (res?.usage_metadata) { return; @@ -135,7 +135,7 @@ test("Invoke token count usage_metadata", async () => { temperature: 0, }); const res = await model.invoke("Why is the sky blue? Be concise."); - console.log(res); + // console.log(res); expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; @@ -172,7 +172,7 @@ test("Test model tool calling", async () => { ), ]; const res = await modelWithTools.invoke(messages); - console.log(res); + // console.log(res); expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; @@ -192,7 +192,7 @@ test("Test model tool calling", async () => { ) ); const resWithToolResults = await modelWithTools.invoke(messages); - console.log(resWithToolResults); + // console.log(resWithToolResults); expect(resWithToolResults?.usage_metadata).toBeDefined(); if (!resWithToolResults?.usage_metadata) { return; diff --git a/libs/langchain-cohere/src/tests/llms.int.test.ts b/libs/langchain-cohere/src/tests/llms.int.test.ts index 69a115a4e458..36620c5eaeec 100644 --- a/libs/langchain-cohere/src/tests/llms.int.test.ts +++ b/libs/langchain-cohere/src/tests/llms.int.test.ts @@ -8,10 +8,12 @@ const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; test("test invoke", async () => { const cohere = new Cohere({}); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await cohere.invoke( "What is a good name for a company that makes colorful socks?" ); - console.log({ result }); + // console.log({ result }); }); test("test invoke with callback", async () => { diff --git a/libs/langchain-cohere/src/tests/rerank.int.test.ts b/libs/langchain-cohere/src/tests/rerank.int.test.ts index ce09c662d610..01eb44bddf8a 100644 --- a/libs/langchain-cohere/src/tests/rerank.int.test.ts +++ b/libs/langchain-cohere/src/tests/rerank.int.test.ts @@ -27,7 +27,7 @@ test("CohereRerank can indeed rerank documents with compressDocuments method", a documents, query ); - console.log(rerankedDocuments); + // console.log(rerankedDocuments); expect(rerankedDocuments).toHaveLength(3); }); @@ -41,6 +41,6 @@ test("CohereRerank can indeed rerank documents with rerank method", async () => documents.map((doc) => doc.pageContent), query ); - console.log(rerankedDocuments); + // console.log(rerankedDocuments); expect(rerankedDocuments).toHaveLength(3); }); diff --git a/libs/langchain-community/src/callbacks/tests/llmonitor.int.test.ts b/libs/langchain-community/src/callbacks/tests/llmonitor.int.test.ts index 316e652618ad..ee2a1b2a33a7 100644 --- a/libs/langchain-community/src/callbacks/tests/llmonitor.int.test.ts +++ b/libs/langchain-community/src/callbacks/tests/llmonitor.int.test.ts @@ -10,18 +10,22 @@ test.skip("Test traced chat call with tags", async () => { callbacks: [new LLMonitorHandler({ verbose: true })], }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await chat.invoke([ new HumanMessage( "What is a good name for a company that makes colorful socks?" ), ]); - console.log(response.content); + // console.log(response.content); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response2 = await chat.invoke([ new SystemMessage( "You are a helpful assistant that translates English to French." ), new HumanMessage("Translate: I love programming."), ]); - console.log(response2.content); + // console.log(response2.content); }); diff --git a/libs/langchain-community/src/chains/graph_qa/tests/cypher.int.test.ts b/libs/langchain-community/src/chains/graph_qa/tests/cypher.int.test.ts index 1607a06926de..5175abb59bd6 100644 --- a/libs/langchain-community/src/chains/graph_qa/tests/cypher.int.test.ts +++ b/libs/langchain-community/src/chains/graph_qa/tests/cypher.int.test.ts @@ -110,8 +110,10 @@ describe.skip("testCypherGeneratingRun", () => { const expectedOutput = "Bruce Willis"; expect(output.result.includes(expectedOutput)).toBeTruthy(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const { query } = output[INTERMEDIATE_STEPS_KEY][0]; - console.log(query); + // console.log(query); // const expectedQuery = // "\n\nMATCH (a:Actor)-[:ACTED_IN]->" + // "(m:Movie) WHERE m.title = 'Pulp Fiction' RETURN a.name"; diff --git a/libs/langchain-community/src/chat_models/tests/chatalitongyi.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatalitongyi.int.test.ts index 917d8b159bc3..cb6abff6154a 100644 --- a/libs/langchain-community/src/chat_models/tests/chatalitongyi.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatalitongyi.int.test.ts @@ -62,7 +62,7 @@ const runTest = async ({ } const res = await chat.invoke(messages); - console.log({ res }); + // console.log({ res }); // test streaming call const stream = await chat.stream( diff --git a/libs/langchain-community/src/chat_models/tests/chatbaiduwenxin.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatbaiduwenxin.int.test.ts index c1b3e634f611..c286d29eb9b0 100644 --- a/libs/langchain-community/src/chat_models/tests/chatbaiduwenxin.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatbaiduwenxin.int.test.ts @@ -76,7 +76,7 @@ const runTest = async ({ } const res = await chat.invoke(messages); - console.log({ res }); + // console.log({ res }); if (config.streaming) { expect(nrNewTokens > 0).toBe(true); diff --git a/libs/langchain-community/src/chat_models/tests/chatbedrock.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatbedrock.int.test.ts index 5bfdc15fc774..926a47e0fd2b 100644 --- a/libs/langchain-community/src/chat_models/tests/chatbedrock.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatbedrock.int.test.ts @@ -186,7 +186,7 @@ async function testChatModel( }); const res = await bedrock.invoke([new HumanMessage(message)]); - console.log(res, res.content); + // console.log(res, res.content); expect(res).toBeDefined(); if (trace && guardrailIdentifier && guardrailVersion) { @@ -255,7 +255,7 @@ async function testChatStreamingModel( ]); const chunks = []; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); @@ -306,8 +306,8 @@ async function testChatHandleLLMNewToken( handleLLMNewToken: (token) => { tokens.push(token); }, - handleLLMEnd(output) { - console.log(output); + handleLLMEnd(_output) { + // console.log(output); }, }, ], @@ -355,10 +355,12 @@ test.skip("Tool calling agent with Anthropic", async () => { tools, }); const input = "what is the current weather in SF?"; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await agentExecutor.invoke({ input, }); - console.log(result); + // console.log(result); }); test.skip.each([ @@ -382,7 +384,7 @@ test.skip.each([ }); const res = await bedrock.invoke([new HumanMessage("What is your name?")]); - console.log(res); + // console.log(res); expect(res.content.length).toBeGreaterThan(1); }); @@ -440,7 +442,7 @@ test.skip(".bind tools", async () => { const response = await modelWithTools.invoke( "Whats the weather like in san francisco?" ); - console.log(response); + // console.log(response); if (!response.tool_calls?.[0]) { throw new Error("No tool calls found in response"); } @@ -479,7 +481,7 @@ test.skip(".bindTools with openai tool format", async () => { const response = await modelWithTools.invoke( "Whats the weather like in san francisco?" ); - console.log(response); + // console.log(response); if (!response.tool_calls?.[0]) { throw new Error("No tool calls found in response"); } diff --git a/libs/langchain-community/src/chat_models/tests/chatdeepinfra.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatdeepinfra.int.test.ts index e8b8cfbc97af..b2b324e6744e 100644 --- a/libs/langchain-community/src/chat_models/tests/chatdeepinfra.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatdeepinfra.int.test.ts @@ -6,14 +6,18 @@ describe("ChatDeepInfra", () => { test("call", async () => { const deepInfraChat = new ChatDeepInfra({ maxTokens: 20 }); const message = new HumanMessage("1 + 1 = "); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await deepInfraChat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test("generate", async () => { const deepInfraChat = new ChatDeepInfra({ maxTokens: 20 }); const message = new HumanMessage("1 + 1 = "); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await deepInfraChat.generate([[message]]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); }); diff --git a/libs/langchain-community/src/chat_models/tests/chatfireworks.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatfireworks.int.test.ts index de7ba0027aa7..7cd8abb2fbf1 100644 --- a/libs/langchain-community/src/chat_models/tests/chatfireworks.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatfireworks.int.test.ts @@ -15,21 +15,27 @@ describe.skip("ChatFireworks", () => { test("call", async () => { const chat = new ChatFireworks(); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test("generate", async () => { const chat = new ChatFireworks(); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.generate([[message]]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("custom messages", async () => { const chat = new ChatFireworks(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([new ChatMessage("Hello!", "user")]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("prompt templates", async () => { @@ -45,6 +51,8 @@ describe.skip("ChatFireworks", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ job: "pirate", @@ -52,7 +60,7 @@ describe.skip("ChatFireworks", () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test("longer chain of messages", async () => { @@ -64,13 +72,15 @@ describe.skip("ChatFireworks", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "What did I just say my name was?", }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test("Tool calling", async () => { @@ -98,7 +108,9 @@ describe.skip("ChatFireworks", () => { }, ], }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await chat.invoke("What is the current weather in SF?"); - console.log(result); + // console.log(result); }); }); diff --git a/libs/langchain-community/src/chat_models/tests/chatfriendli.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatfriendli.int.test.ts index d59baf7a9b72..765b39bef843 100644 --- a/libs/langchain-community/src/chat_models/tests/chatfriendli.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatfriendli.int.test.ts @@ -6,14 +6,18 @@ describe.skip("ChatFriendli", () => { test("call", async () => { const chatFriendli = new ChatFriendli({ maxTokens: 20 }); const message = new HumanMessage("1 + 1 = "); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chatFriendli.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test("generate", async () => { const chatFriendli = new ChatFriendli({ maxTokens: 20 }); const message = new HumanMessage("1 + 1 = "); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chatFriendli.generate([[message]]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); }); diff --git a/libs/langchain-community/src/chat_models/tests/chatgooglepalm.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatgooglepalm.int.test.ts index 752042420ad2..c9e15956e678 100644 --- a/libs/langchain-community/src/chat_models/tests/chatgooglepalm.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatgooglepalm.int.test.ts @@ -14,8 +14,10 @@ test.skip("Test ChatGooglePalm", async () => { maxRetries: 1, }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test.skip("Test ChatGooglePalm generate", async () => { @@ -23,8 +25,10 @@ test.skip("Test ChatGooglePalm generate", async () => { maxRetries: 1, }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.generate([[message]]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test.skip("ChatGooglePalm, prompt templates", async () => { @@ -48,6 +52,8 @@ test.skip("ChatGooglePalm, prompt templates", async () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ job: "pirate", @@ -55,7 +61,7 @@ test.skip("ChatGooglePalm, prompt templates", async () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test.skip("ChatGooglePalm, longer chain of messages", async () => { @@ -74,13 +80,15 @@ test.skip("ChatGooglePalm, longer chain of messages", async () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "What did I just say my name was?", }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test.skip("ChatGooglePalm, chain of messages on code", async () => { @@ -95,11 +103,13 @@ test.skip("ChatGooglePalm, chain of messages on code", async () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "How can I write a for loop counting to 10?", }), ]); - console.log(JSON.stringify(responseA.generations, null, 1)); + // console.log(JSON.stringify(responseA.generations, null, 1)); }); diff --git a/libs/langchain-community/src/chat_models/tests/chatgooglevertexai.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatgooglevertexai.int.test.ts index 255ff7eb8cf6..93bff88e86c0 100644 --- a/libs/langchain-community/src/chat_models/tests/chatgooglevertexai.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatgooglevertexai.int.test.ts @@ -13,8 +13,10 @@ describe("ChatGoogleVertexAI", () => { test("call", async () => { const chat = new ChatGoogleVertexAI(); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test("32k", async () => { @@ -22,21 +24,27 @@ describe("ChatGoogleVertexAI", () => { model: "chat-bison-32k", }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test("generate", async () => { const chat = new ChatGoogleVertexAI(); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.generate([[message]]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("custom messages", async () => { const chat = new ChatGoogleVertexAI(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([new ChatMessage("Hello!", "user")]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("prompt templates", async () => { @@ -52,6 +60,8 @@ describe("ChatGoogleVertexAI", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ job: "pirate", @@ -59,7 +69,7 @@ describe("ChatGoogleVertexAI", () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test("longer chain of messages", async () => { @@ -71,13 +81,15 @@ describe("ChatGoogleVertexAI", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "What did I just say my name was?", }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test("code, chain of messages", async () => { @@ -90,13 +102,15 @@ describe("ChatGoogleVertexAI", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "How can I write a for loop counting to 10?", }), ]); - console.log(JSON.stringify(responseA.generations, null, 1)); + // console.log(JSON.stringify(responseA.generations, null, 1)); }); test("stream method", async () => { @@ -106,7 +120,7 @@ describe("ChatGoogleVertexAI", () => { ); const chunks = []; for await (const chunk of stream) { - console.log("chunk", chunk); + // console.log("chunk", chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); diff --git a/libs/langchain-community/src/chat_models/tests/chatgooglevertexai.test.ts b/libs/langchain-community/src/chat_models/tests/chatgooglevertexai.test.ts index 9c3ee527d430..23ace5a83958 100644 --- a/libs/langchain-community/src/chat_models/tests/chatgooglevertexai.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatgooglevertexai.test.ts @@ -51,7 +51,7 @@ test("Google examples", async () => { examples, }); const instance = model.createInstance(messages); - console.log(JSON.stringify(instance, null, 2)); + // console.log(JSON.stringify(instance, null, 2)); expect(instance.examples?.[0].input.author).toBe("user"); expect(instance.examples?.[0].output.author).toBe("bot"); }); diff --git a/libs/langchain-community/src/chat_models/tests/chatgooglevertexai_web.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatgooglevertexai_web.int.test.ts index b17238308444..4eec451f13e9 100644 --- a/libs/langchain-community/src/chat_models/tests/chatgooglevertexai_web.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatgooglevertexai_web.int.test.ts @@ -16,8 +16,10 @@ describe("ChatGoogleVertexAIWeb", () => { test("call", async () => { const chat = new ChatGoogleVertexAI(); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test("32k", async () => { @@ -25,21 +27,27 @@ describe("ChatGoogleVertexAIWeb", () => { model: "chat-bison-32k", }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test("generate", async () => { const chat = new ChatGoogleVertexAI(); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.generate([[message]]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("custom messages", async () => { const chat = new ChatGoogleVertexAI(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([new ChatMessage("Hello!", "user")]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("prompt templates", async () => { @@ -55,6 +63,8 @@ describe("ChatGoogleVertexAIWeb", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ job: "pirate", @@ -62,7 +72,7 @@ describe("ChatGoogleVertexAIWeb", () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test("longer chain of messages", async () => { @@ -74,13 +84,15 @@ describe("ChatGoogleVertexAIWeb", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "What did I just say my name was?", }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }); test("code, chain of messages", async () => { @@ -93,13 +105,15 @@ describe("ChatGoogleVertexAIWeb", () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ text: "How can I write a for loop counting to 10?", }), ]); - console.log(JSON.stringify(responseA.generations, null, 1)); + // console.log(JSON.stringify(responseA.generations, null, 1)); }); test("stream method", async () => { @@ -107,7 +121,7 @@ describe("ChatGoogleVertexAIWeb", () => { const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); diff --git a/libs/langchain-community/src/chat_models/tests/chatgooglevertexai_web.test.ts b/libs/langchain-community/src/chat_models/tests/chatgooglevertexai_web.test.ts index 5858e525fd84..cfaa03170dac 100644 --- a/libs/langchain-community/src/chat_models/tests/chatgooglevertexai_web.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatgooglevertexai_web.test.ts @@ -62,7 +62,7 @@ test("Google examples", async () => { }, }); const instance = model.createInstance(messages); - console.log(JSON.stringify(instance, null, 2)); + // console.log(JSON.stringify(instance, null, 2)); expect(instance.examples?.[0].input.author).toBe("user"); expect(instance.examples?.[0].output.author).toBe("bot"); }); diff --git a/libs/langchain-community/src/chat_models/tests/chatllama_cpp.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatllama_cpp.int.test.ts index 0f9b1608461a..488e15c0874e 100644 --- a/libs/langchain-community/src/chat_models/tests/chatllama_cpp.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatllama_cpp.int.test.ts @@ -14,22 +14,28 @@ const llamaPath = getEnvironmentVariable("LLAMA_PATH")!; test.skip("Test predict", async () => { const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await llamaCpp.invoke("Where do Llamas come from?"); - console.log({ response }); + // console.log({ response }); }); test.skip("Test call", async () => { const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await llamaCpp.invoke([ new HumanMessage({ content: "My name is Nigel." }), ]); - console.log({ response }); + // console.log({ response }); }); test.skip("Test multiple messages", async () => { const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await llamaCpp.invoke([ new HumanMessage("My name is Nigel."), new AIMessage( @@ -37,19 +43,21 @@ test.skip("Test multiple messages", async () => { ), new HumanMessage("What did I say my name was?"), ]); - console.log({ response }); + // console.log({ response }); }); test.skip("Test system message", async () => { const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await llamaCpp.invoke([ new SystemMessage( "You are a pirate, responses must be very verbose and in pirate dialect, add 'Arr, m'hearty!' to each sentence." ), new HumanMessage("Tell me where Llamas come from?"), ]); - console.log({ response }); + // console.log({ response }); }); test.skip("test streaming call", async () => { @@ -62,7 +70,7 @@ test.skip("test streaming call", async () => { const chunks = []; for await (const chunk of stream) { chunks.push(chunk.content); - console.log(chunk.content); + // console.log(chunk.content); } expect(chunks.length).toBeGreaterThan(1); @@ -81,7 +89,7 @@ test.skip("test multi-mesage streaming call", async () => { const chunks = []; for await (const chunk of stream) { chunks.push(chunk.content); - console.log(chunk.content); + // console.log(chunk.content); } expect(chunks.length).toBeGreaterThan(1); @@ -107,7 +115,7 @@ test.skip("test multi-mesage streaming call and abort after 5s", async () => { callbacks: [ { handleLLMNewToken(token) { - console.log(token); + // console.log(token); chunks.push(token); }, }, diff --git a/libs/langchain-community/src/chat_models/tests/chatmoonshot.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatmoonshot.int.test.ts index c0db3719c3c2..8f0e22cc6b5b 100644 --- a/libs/langchain-community/src/chat_models/tests/chatmoonshot.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatmoonshot.int.test.ts @@ -61,7 +61,7 @@ const runTest = async ({ } const res = await chat.invoke(messages, passedConfig); - console.log({ res }); + // console.log({ res }); if (passedConfig.streaming) { expect(nrNewTokens > 0).toBe(true); diff --git a/libs/langchain-community/src/chat_models/tests/chatollama.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatollama.int.test.ts index ae60c1fc6188..174bd9d99048 100644 --- a/libs/langchain-community/src/chat_models/tests/chatollama.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatollama.int.test.ts @@ -12,10 +12,12 @@ import { ChatOllama } from "../ollama.js"; test.skip("test call", async () => { const ollama = new ChatOllama({}); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await ollama.invoke( "What is a good name for a company that makes colorful socks?" ); - console.log({ result }); + // console.log({ result }); }); test.skip("test call with callback", async () => { @@ -70,10 +72,14 @@ test.skip("should abort the request", async () => { test.skip("Test multiple messages", async () => { const model = new ChatOllama({ baseUrl: "http://localhost:11434" }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke([ new HumanMessage({ content: "My name is Jonas" }), ]); - console.log({ res }); + // console.log({ res }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res2 = await model.invoke([ new HumanMessage("My name is Jonas"), new AIMessage( @@ -81,7 +87,7 @@ test.skip("Test multiple messages", async () => { ), new HumanMessage("What did I say my name was?"), ]); - console.log({ res2 }); + // console.log({ res2 }); }); test.skip("should stream through with a bytes output parser", async () => { @@ -106,7 +112,7 @@ test.skip("should stream through with a bytes output parser", async () => { for await (const chunk of stream) { chunks.push(chunk); } - console.log(chunks.join("")); + // console.log(chunks.join("")); expect(chunks.length).toBeGreaterThan(1); }); @@ -140,6 +146,8 @@ test.skip("Test ChatOllama with an image", async () => { model: "llava", baseUrl: "http://127.0.0.1:11434", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([ new HumanMessage({ content: [ @@ -154,7 +162,7 @@ test.skip("Test ChatOllama with an image", async () => { ], }), ]); - console.log({ res }); + // console.log({ res }); }); test.skip("test max tokens (numPredict)", async () => { @@ -171,7 +179,7 @@ test.skip("test max tokens (numPredict)", async () => { response += s; } - console.log({ numTokens, response }); + // console.log({ numTokens, response }); // Ollama doesn't always stream back the exact number of tokens, so we // check for a number which is slightly above the `numPredict`. expect(numTokens).toBeLessThanOrEqual(12); diff --git a/libs/langchain-community/src/chat_models/tests/chatpremai.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatpremai.int.test.ts index 8f77952cf2ec..fad3583ac4c6 100644 --- a/libs/langchain-community/src/chat_models/tests/chatpremai.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatpremai.int.test.ts @@ -42,7 +42,7 @@ describe.skip("ChatPrem", () => { iters += 1; finalRes += chunk.content; } - console.log({ finalRes, iters }); + // console.log({ finalRes, iters }); expect(iters).toBeGreaterThan(1); }); }); diff --git a/libs/langchain-community/src/chat_models/tests/chattencenthunyuan.int.test.ts b/libs/langchain-community/src/chat_models/tests/chattencenthunyuan.int.test.ts index fa816f49a255..0ed2aa583d49 100644 --- a/libs/langchain-community/src/chat_models/tests/chattencenthunyuan.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chattencenthunyuan.int.test.ts @@ -57,7 +57,7 @@ const runTest = async ({ } const res = await chat.invoke(messages); - console.log({ res }); + // console.log({ res }); if (config.streaming) { expect(nrNewTokens > 0).toBe(true); diff --git a/libs/langchain-community/src/chat_models/tests/chattogetherai.int.test.ts b/libs/langchain-community/src/chat_models/tests/chattogetherai.int.test.ts index 57d1fd00b52e..2dd1c5bbf476 100644 --- a/libs/langchain-community/src/chat_models/tests/chattogetherai.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chattogetherai.int.test.ts @@ -17,7 +17,7 @@ describe("ChatTogetherAI", () => { const chat = new ChatTogetherAI(); const message = new HumanMessage("Hello!"); const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); @@ -25,14 +25,14 @@ describe("ChatTogetherAI", () => { const chat = new ChatTogetherAI(); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message]]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); expect(res.generations[0][0].text.length).toBeGreaterThan(10); }); test("custom messages", async () => { const chat = new ChatTogetherAI(); const res = await chat.invoke([new ChatMessage("Hello!", "user")]); - console.log({ res }); + // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); @@ -56,7 +56,7 @@ describe("ChatTogetherAI", () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); expect(responseA.generations[0][0].text.length).toBeGreaterThan(10); }); @@ -75,7 +75,7 @@ describe("ChatTogetherAI", () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); expect(responseA.generations[0][0].text.length).toBeGreaterThan(10); }); @@ -103,7 +103,7 @@ describe("ChatTogetherAI", () => { ["human", "Please list this output in order of DESC [1, 4, 2, 8]."], ]); const res = await prompt.pipe(chat).invoke({}); - console.log({ res }); + // console.log({ res }); expect(typeof res.content).toEqual("string"); expect(JSON.parse(res.content as string)).toMatchObject({ orderedArray: expect.any(Array), @@ -139,7 +139,7 @@ describe("ChatTogetherAI", () => { ["human", "What is 1273926 times 27251?"], ]); const res = await prompt.pipe(chat).invoke({}); - console.log({ res }); + // console.log({ res }); expect(res.additional_kwargs.tool_calls?.length).toBeGreaterThan(0); expect( JSON.parse(res.additional_kwargs.tool_calls?.[0].function.arguments ?? "") diff --git a/libs/langchain-community/src/chat_models/tests/chatzhipuai.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatzhipuai.int.test.ts index 456d21f9911d..637396389e2d 100644 --- a/libs/langchain-community/src/chat_models/tests/chatzhipuai.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatzhipuai.int.test.ts @@ -31,7 +31,7 @@ test.skip("Test chat.stream work fine", async () => { for await (const chunk of stream) { chunks.push(chunk); } - console.log(chunks); + // console.log(chunks); expect(chunks.length).toBeGreaterThan(0); }); @@ -76,7 +76,7 @@ const runTest = async ({ } const res = await chat.invoke(messages, passedConfig); - console.log({ res }); + // console.log({ res }); if (passedConfig.streaming) { expect(nrNewTokens > 0).toBe(true); diff --git a/libs/langchain-community/src/chat_models/tests/minimax.int.test.ts b/libs/langchain-community/src/chat_models/tests/minimax.int.test.ts index 944a6a174beb..4f7aab06bab5 100644 --- a/libs/langchain-community/src/chat_models/tests/minimax.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/minimax.int.test.ts @@ -25,16 +25,20 @@ test.skip("Test ChatMinimax", async () => { ], }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test.skip("Test ChatMinimax with SystemChatMessage", async () => { const chat = new ChatMinimax(); const system_message = new SystemMessage("You are to chat with a user."); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([system_message, message]); - console.log({ res }); + // console.log({ res }); }); test.skip("Test ChatMinimax Generate", async () => { @@ -52,11 +56,11 @@ test.skip("Test ChatMinimax Generate", async () => { for (const generation of res.generations) { expect(generation.length).toBe(1); for (const message of generation) { - console.log(message.text); + // console.log(message.text); expect(typeof message.text).toBe("string"); } } - console.log({ res }); + // console.log({ res }); }); test.skip("Test ChatMinimax Generate throws when one of the calls fails", async () => { @@ -95,8 +99,10 @@ test.skip("Test ChatMinimax tokenUsage", async () => { }), }); const message = new HumanMessage("Hello"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke([message]); - console.log({ res }); + // console.log({ res }); expect(tokenUsage.totalTokens).toBeGreaterThan(0); }); @@ -120,12 +126,14 @@ test.skip("Test ChatMinimax tokenUsage with a batch", async () => { }, }), }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate([ [new HumanMessage("Hello")], [new HumanMessage("Hi")], ]); - console.log({ tokenUsage }); - console.log(res); + // console.log({ tokenUsage }); + // console.log(res); expect(tokenUsage.totalTokens).toBeGreaterThan(0); }); @@ -154,7 +162,7 @@ test.skip("Test ChatMinimax in streaming mode", async () => { }); const message = new HumanMessage("Hello!"); const result = await model.invoke([message]); - console.log(result); + // console.log(result); expect(nrNewTokens > 0).toBe(true); expect(result.content).toBe(streamedCompletion); @@ -175,6 +183,8 @@ test.skip("OpenAI Chat, docs, prompt templates", async () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ input_language: "English", @@ -183,7 +193,7 @@ test.skip("OpenAI Chat, docs, prompt templates", async () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }, 5000); test.skip("Test OpenAI with signal in call options", async () => { @@ -207,8 +217,10 @@ test.skip("Test OpenAI with specific roles in ChatMessage", async () => { "system" ); const user_message = new ChatMessage("Hello!", "user"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([system_message, user_message]); - console.log({ res }); + // console.log({ res }); }); test.skip("Function calling ", async () => { @@ -245,7 +257,7 @@ test.skip("Function calling ", async () => { }), ]); - console.log(result); + // console.log(result); expect(result.additional_kwargs.function_call?.name).toBe("get_weather"); }); test.skip("Test ChatMinimax Function calling ", async () => { @@ -282,7 +294,7 @@ test.skip("Test ChatMinimax Function calling ", async () => { }), ]); - console.log(result); + // console.log(result); expect(result.additional_kwargs.function_call?.name).toBe("get_weather"); }); @@ -315,7 +327,7 @@ test.skip("Test ChatMinimax Glyph", async () => { const messages = await messagesTemplate.formatMessages({ text: "你好" }); const result = await model.invoke(messages); - console.log(result); + // console.log(result); expect(result.content).toMatch(/The translated text:.*/); }); test.skip("Test ChatMinimax Plugins", async () => { @@ -331,11 +343,13 @@ test.skip("Test ChatMinimax Plugins", async () => { plugins: ["plugin_web_search"], }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await model.invoke([ new HumanMessage({ content: " What is the weather like in NewYork tomorrow?", }), ]); - console.log(result); + // console.log(result); }); diff --git a/libs/langchain-community/src/document_loaders/tests/figma.int.test.ts b/libs/langchain-community/src/document_loaders/tests/figma.int.test.ts index bb7da95e6887..4926f07c7b66 100644 --- a/libs/langchain-community/src/document_loaders/tests/figma.int.test.ts +++ b/libs/langchain-community/src/document_loaders/tests/figma.int.test.ts @@ -9,6 +9,8 @@ test.skip("Test FigmaFileLoader", async () => { nodeIds: (process.env.FIGMA_NODE_IDS ?? "").split(","), fileKey: process.env.FIGMA_FILE_KEY!, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const documents = await loader.load(); - console.log(documents[0].pageContent); + // console.log(documents[0].pageContent); }); diff --git a/libs/langchain-community/src/document_loaders/tests/gitbook.int.test.ts b/libs/langchain-community/src/document_loaders/tests/gitbook.int.test.ts index 04e8b3f26f2f..60c312dc69b2 100644 --- a/libs/langchain-community/src/document_loaders/tests/gitbook.int.test.ts +++ b/libs/langchain-community/src/document_loaders/tests/gitbook.int.test.ts @@ -6,14 +6,18 @@ test("Test GitbookLoader", async () => { "https://docs.gitbook.com/product-tour/navigation" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const docs = await loader.load(); - console.log("Loaded", docs.length, "Gitbook documents"); + // console.log("Loaded", docs.length, "Gitbook documents"); }); test("Test GitbookLoader with shouldLoadAllPaths", async () => { const loader = new GitbookLoader("https://docs.maildrop.cc", { shouldLoadAllPaths: true, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const docs = await loader.load(); - console.log("Loaded", docs.length, "Gitbook documents"); + // console.log("Loaded", docs.length, "Gitbook documents"); }); diff --git a/libs/langchain-community/src/document_loaders/tests/github.int.test.ts b/libs/langchain-community/src/document_loaders/tests/github.int.test.ts index 33a75fe08b45..245b4c668d5c 100644 --- a/libs/langchain-community/src/document_loaders/tests/github.int.test.ts +++ b/libs/langchain-community/src/document_loaders/tests/github.int.test.ts @@ -15,7 +15,7 @@ test("Test GithubRepoLoader", async () => { documents.filter((document) => document.metadata.source === "README.md") .length ).toBe(1); - console.log(documents[0].pageContent); + // console.log(documents[0].pageContent); }); test("Test ignoreFiles with GithubRepoLoader", async () => { @@ -37,7 +37,7 @@ test("Test ignoreFiles with GithubRepoLoader", async () => { documents.filter((document) => document.metadata.source === "README.md") .length ).toBe(0); - console.log(documents[0].pageContent); + // console.log(documents[0].pageContent); }); test("Test ignorePaths with GithubRepoLoader", async () => { @@ -59,7 +59,7 @@ test("Test ignorePaths with GithubRepoLoader", async () => { documents.filter((document) => document.metadata.source.endsWith(".md")) .length ).toBe(0); - console.log(documents[0].pageContent); + // console.log(documents[0].pageContent); }); test("Test streaming documents from GithubRepoLoader", async () => { diff --git a/libs/langchain-community/src/document_loaders/tests/notionapi.int.test.ts b/libs/langchain-community/src/document_loaders/tests/notionapi.int.test.ts index 759e5e556d3a..04bd667d7661 100644 --- a/libs/langchain-community/src/document_loaders/tests/notionapi.int.test.ts +++ b/libs/langchain-community/src/document_loaders/tests/notionapi.int.test.ts @@ -9,17 +9,21 @@ test.skip("Test Notion API Loader Page", async () => { auth: process.env.NOTION_INTEGRATION_TOKEN, }, id: process.env.NOTION_PAGE_ID ?? "", + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var onDocumentLoaded: (current, total, currentTitle, rootTitle) => { - console.log( - `Loaded ${currentTitle} in ${rootTitle}: (${current}/${total})` - ); + // console.log( + // `Loaded ${currentTitle} in ${rootTitle}: (${current}/${total})` + // ); }, }); const docs = await loader.load(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const titles = docs.map((doc) => doc.metadata.properties._title); - console.log("Titles:", titles); - console.log(`Loaded ${docs.length} pages`); + // console.log("Titles:", titles); + // console.log(`Loaded ${docs.length} pages`); }); test.skip("Test Notion API Loader Database", async () => { @@ -28,17 +32,21 @@ test.skip("Test Notion API Loader Database", async () => { auth: process.env.NOTION_INTEGRATION_TOKEN, }, id: process.env.NOTION_DATABASE_ID ?? "", + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var onDocumentLoaded: (current, total, currentTitle, rootTitle) => { - console.log( - `Loaded ${currentTitle} in ${rootTitle}: (${current}/${total})` - ); + // console.log( + // `Loaded ${currentTitle} in ${rootTitle}: (${current}/${total})` + // ); }, }); const docs = await loader.load(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const titles = docs.map((doc) => doc.metadata.properties._title); - console.log("Titles:", titles); - console.log(`Loaded ${docs.length} pages from the database`); + // console.log("Titles:", titles); + // console.log(`Loaded ${docs.length} pages from the database`); }); test.skip("Test Notion API Loader onDocumentLoad", async () => { @@ -60,7 +68,7 @@ test.skip("Test Notion API Loader onDocumentLoad", async () => { expect(onDocumentLoadedCheck.length).toBe(3); - console.log(onDocumentLoadedCheck); + // console.log(onDocumentLoadedCheck); }); test.skip("Test docs with empty database page content", async () => { @@ -101,5 +109,5 @@ test.skip("Test docs with empty database page content and propertiesAsHeader ena expect(docs.length).toBe(3); - console.log(docs); + // console.log(docs); }); diff --git a/libs/langchain-community/src/document_loaders/tests/notionapi.test.ts b/libs/langchain-community/src/document_loaders/tests/notionapi.test.ts index d3b4ac563d0c..ee0172d612d8 100644 --- a/libs/langchain-community/src/document_loaders/tests/notionapi.test.ts +++ b/libs/langchain-community/src/document_loaders/tests/notionapi.test.ts @@ -16,8 +16,8 @@ test("Properties Parser", async () => { auth: process.env.NOTION_INTEGRATION_TOKEN, }, id: process.env.NOTION_PAGE_ID ?? "", - onDocumentLoaded: (current, total, currentTitle) => { - console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); + onDocumentLoaded: (_current, _total, _currentTitle) => { + // console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); }, }); @@ -81,8 +81,8 @@ test("Get Title (page)", async () => { auth: process.env.NOTION_INTEGRATION_TOKEN, }, id: process.env.NOTION_PAGE_ID ?? "", - onDocumentLoaded: (current, total, currentTitle) => { - console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); + onDocumentLoaded: (_current, _total, _currentTitle) => { + // console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); }, }); @@ -108,8 +108,8 @@ test("Get Title (database)", async () => { auth: process.env.NOTION_INTEGRATION_TOKEN, }, id: process.env.NOTION_PAGE_ID ?? "", - onDocumentLoaded: (current, total, currentTitle) => { - console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); + onDocumentLoaded: (_current, _total, _currentTitle) => { + // console.log(`Loaded Page: ${currentTitle} (${current}/${total})`); }, }); diff --git a/libs/langchain-community/src/document_loaders/tests/s3.int.test.ts b/libs/langchain-community/src/document_loaders/tests/s3.int.test.ts index 80ba00e18971..3d073669180d 100644 --- a/libs/langchain-community/src/document_loaders/tests/s3.int.test.ts +++ b/libs/langchain-community/src/document_loaders/tests/s3.int.test.ts @@ -11,8 +11,8 @@ const fsMock = { ...fs, mkdtempSync: jest.fn().mockReturnValue("tmp/s3fileloader-12345"), mkdirSync: jest.fn().mockImplementation(() => {}), - writeFileSync: jest.fn().mockImplementation((path, data) => { - console.log(`Writing "${(data as object).toString()}" to ${path}`); + writeFileSync: jest.fn().mockImplementation((_path, _data) => { + // console.log(`Writing "${(data as object).toString()}" to ${path}`); }), }; diff --git a/libs/langchain-community/src/document_loaders/tests/sort_xyz_blockchain.int.test.ts b/libs/langchain-community/src/document_loaders/tests/sort_xyz_blockchain.int.test.ts index dd0d9b9d8030..3b42e6468c9a 100644 --- a/libs/langchain-community/src/document_loaders/tests/sort_xyz_blockchain.int.test.ts +++ b/libs/langchain-community/src/document_loaders/tests/sort_xyz_blockchain.int.test.ts @@ -16,8 +16,10 @@ test.skip("Test Blockchain NFT Metadata Loader", async () => { }, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await nftMetadataLoader.load(); - console.log(response); + // console.log(response); }); test.skip("Test Blockchain Latest Transactions Loader", async () => { @@ -31,8 +33,10 @@ test.skip("Test Blockchain Latest Transactions Loader", async () => { }, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await latestTransactionsLoader.load(); - console.log(response); + // console.log(response); }); test.skip("Test Blockchain SQL Query Loader", async () => { @@ -41,6 +45,8 @@ test.skip("Test Blockchain SQL Query Loader", async () => { query: `SELECT * FROM ethereum.nft_metadata WHERE contract_address = '${contractAddress}' AND token_id = 1 LIMIT 1`, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const response = await sqlQueryLoader.load(); - console.log(response); + // console.log(response); }); diff --git a/libs/langchain-community/src/document_loaders/tests/taskade.int.test.ts b/libs/langchain-community/src/document_loaders/tests/taskade.int.test.ts index d9389736893f..d38042cace22 100644 --- a/libs/langchain-community/src/document_loaders/tests/taskade.int.test.ts +++ b/libs/langchain-community/src/document_loaders/tests/taskade.int.test.ts @@ -8,6 +8,8 @@ test.skip("Test TaskadeProjectLoader", async () => { personalAccessToken: process.env.TASKADE_PERSONAL_ACCESS_TOKEN!, projectId: process.env.TASKADE_PROJECT_ID!, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const documents = await loader.load(); - console.log(documents[0].pageContent); + // console.log(documents[0].pageContent); }); diff --git a/libs/langchain-community/src/embeddings/tests/googlepalm.int.test.ts b/libs/langchain-community/src/embeddings/tests/googlepalm.int.test.ts index 5257555b7357..f194d547adb3 100644 --- a/libs/langchain-community/src/embeddings/tests/googlepalm.int.test.ts +++ b/libs/langchain-community/src/embeddings/tests/googlepalm.int.test.ts @@ -7,7 +7,7 @@ test.skip("Test GooglePalmEmbeddings.embedQuery", async () => { maxRetries: 1, }); const res = await embeddings.embedQuery("Hello world"); - console.log(res); + // console.log(res); expect(typeof res[0]).toBe("number"); }); @@ -23,7 +23,7 @@ test.skip("Test GooglePalmEmbeddings.embedDocuments", async () => { "six documents", "to test pagination", ]); - console.log(res); + // console.log(res); expect(res).toHaveLength(6); res.forEach((r) => { expect(typeof r[0]).toBe("number"); diff --git a/libs/langchain-community/src/embeddings/tests/googlevertexai.int.test.ts b/libs/langchain-community/src/embeddings/tests/googlevertexai.int.test.ts index 879b99d7feb9..98d487b6ab57 100644 --- a/libs/langchain-community/src/embeddings/tests/googlevertexai.int.test.ts +++ b/libs/langchain-community/src/embeddings/tests/googlevertexai.int.test.ts @@ -5,7 +5,7 @@ import { HNSWLib } from "../../vectorstores/hnswlib.js"; test("Test GoogleVertexAIEmbeddings.embedQuery", async () => { const embeddings = new GoogleVertexAIEmbeddings(); const res = await embeddings.embedQuery("Hello world"); - console.log(res); + // console.log(res); expect(typeof res[0]).toBe("number"); }); @@ -19,7 +19,7 @@ test("Test GoogleVertexAIEmbeddings.embedDocuments", async () => { "six documents", "to test pagination", ]); - console.log(res); + // console.log(res); expect(res).toHaveLength(6); res.forEach((r) => { expect(typeof r[0]).toBe("number"); diff --git a/libs/langchain-community/src/experimental/chat_models/tests/ollama_functions.int.test.ts b/libs/langchain-community/src/experimental/chat_models/tests/ollama_functions.int.test.ts index d4f76a0cb461..8b8a4c94945d 100644 --- a/libs/langchain-community/src/experimental/chat_models/tests/ollama_functions.int.test.ts +++ b/libs/langchain-community/src/experimental/chat_models/tests/ollama_functions.int.test.ts @@ -7,8 +7,10 @@ import { OllamaFunctions } from "../ollama_functions.js"; test.skip("Test OllamaFunctions", async () => { const chat = new OllamaFunctions({ model: "mistral" }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); }); test.skip("Test OllamaFunctions with functions", async () => { @@ -38,8 +40,10 @@ test.skip("Test OllamaFunctions with functions", async () => { ], }); const message = new HumanMessage("What is the weather in San Francisco?"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); }); test.skip("Test OllamaFunctions with a forced function call", async () => { @@ -76,6 +80,8 @@ test.skip("Test OllamaFunctions with a forced function call", async () => { const message = new HumanMessage( "Extract the desired information from the following passage:\n\nthis is really cool" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); }); diff --git a/libs/langchain-community/src/experimental/graph_transformers/llm.int.test.ts b/libs/langchain-community/src/experimental/graph_transformers/llm.int.test.ts index 8702c1cff782..c85fad11e763 100644 --- a/libs/langchain-community/src/experimental/graph_transformers/llm.int.test.ts +++ b/libs/langchain-community/src/experimental/graph_transformers/llm.int.test.ts @@ -17,11 +17,13 @@ test.skip("convertToGraphDocuments", async () => { llm: model, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await llmGraphTransformer.convertToGraphDocuments([ new Document({ pageContent: "Elon Musk is suing OpenAI" }), ]); - console.log(result); + // console.log(result); }); test("convertToGraphDocuments with allowed", async () => { @@ -40,7 +42,7 @@ test("convertToGraphDocuments with allowed", async () => { new Document({ pageContent: "Elon Musk is suing OpenAI" }), ]); - console.log(JSON.stringify(result)); + // console.log(JSON.stringify(result)); expect(result).toEqual([ new GraphDocument({ @@ -79,7 +81,7 @@ test("convertToGraphDocuments with allowed lowercased", async () => { new Document({ pageContent: "Elon Musk is suing OpenAI" }), ]); - console.log(JSON.stringify(result)); + // console.log(JSON.stringify(result)); expect(result).toEqual([ new GraphDocument({ diff --git a/libs/langchain-community/src/experimental/hubs/makersuite/tests/googlemakersuitehub.int.test.ts b/libs/langchain-community/src/experimental/hubs/makersuite/tests/googlemakersuitehub.int.test.ts index 85dcf1d76405..59a966b2bc85 100644 --- a/libs/langchain-community/src/experimental/hubs/makersuite/tests/googlemakersuitehub.int.test.ts +++ b/libs/langchain-community/src/experimental/hubs/makersuite/tests/googlemakersuitehub.int.test.ts @@ -45,7 +45,7 @@ describe.skip("Google Maker Suite Hub Integration", () => { const result = await chain.invoke({ product: "shoes", }); - console.log("text chain result", result); + // console.log("text chain result", result); expect(result).toBeTruthy(); }); @@ -55,7 +55,7 @@ describe.skip("Google Maker Suite Hub Integration", () => { const result = await chain.invoke({ description: "shoes", }); - console.log("data chain result", result); + // console.log("data chain result", result); expect(result).toBeTruthy(); }); @@ -65,7 +65,7 @@ describe.skip("Google Maker Suite Hub Integration", () => { const message = new HumanMessage("Hello!"); const result = await model.invoke([message]); expect(result).toBeTruthy(); - console.log({ result }); + // console.log({ result }); }); }); @@ -74,9 +74,11 @@ describe.skip("Google Maker Suite Hub Integration", () => { const fileId = "1IAWobj3BYvbj5X3JOAKaoXTcNJlZLdpK"; const caller = new AsyncCaller({}); const connection = new DriveFileReadConnection({ fileId }, caller); - console.log("connection client", connection?.client); + // console.log("connection client", connection?.client); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await connection.request(); - console.log(result); + // console.log(result); }); }); @@ -89,14 +91,14 @@ describe.skip("Google Maker Suite Hub Integration", () => { const result = await model.invoke( "What would be a good name for a company that makes socks" ); - console.log("text chain result", result); + // console.log("text chain result", result); expect(result).toBeTruthy(); }); test("text chain", async () => { const prompt = await hub.pull("1gxLasQIeQdwR4wxtV_nb93b_g9f0GaMm"); const result = await prompt.toChain().invoke({ product: "socks" }); - console.log("text chain result", result); + // console.log("text chain result", result); expect(result).toBeTruthy(); }); }); diff --git a/libs/langchain-community/src/experimental/multimodal_embeddings/tests/googlevertexai.int.test.ts b/libs/langchain-community/src/experimental/multimodal_embeddings/tests/googlevertexai.int.test.ts index 3311befaedac..83c2d59a313a 100644 --- a/libs/langchain-community/src/experimental/multimodal_embeddings/tests/googlevertexai.int.test.ts +++ b/libs/langchain-community/src/experimental/multimodal_embeddings/tests/googlevertexai.int.test.ts @@ -11,7 +11,7 @@ test.skip("embedding text", async () => { const vector: number[] = await e.embedQuery("test 1"); expect(vector).toHaveLength(1408); - console.log(vector); + // console.log(vector); }); test.skip("embedding multiple texts", async () => { @@ -22,7 +22,7 @@ test.skip("embedding multiple texts", async () => { expect(vector).toHaveLength(2); expect(vector[0]).toHaveLength(1408); expect(vector[1]).toHaveLength(1408); - console.log(vector); + // console.log(vector); }); test.skip("embedding image", async () => { @@ -36,7 +36,7 @@ test.skip("embedding image", async () => { const img = fs.readFileSync(pathname); const vector: number[] = await e.embedImageQuery(img); expect(vector).toHaveLength(1408); - console.log(vector); + // console.log(vector); }); test.skip("embedding image with text in a vector store", async () => { @@ -48,8 +48,10 @@ test.skip("embedding image with text in a vector store", async () => { e ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const resultOne = await vectorStore.similaritySearch("bird", 2); - console.log(resultOne); + // console.log(resultOne); const pathname = path.join( path.dirname(fileURLToPath(import.meta.url)), @@ -76,9 +78,11 @@ test.skip("embedding image with text in a vector store", async () => { const img2 = fs.readFileSync(pathname2); const vector2: number[] = await e.embedImageQuery(img2); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const resultTwo = await vectorStore.similaritySearchVectorWithScore( vector2, 2 ); - console.log(resultTwo); + // console.log(resultTwo); }); diff --git a/libs/langchain-community/src/graphs/tests/memgraph_graph.int.test.ts b/libs/langchain-community/src/graphs/tests/memgraph_graph.int.test.ts index ddeb17c635b1..a9e682daf6e3 100644 --- a/libs/langchain-community/src/graphs/tests/memgraph_graph.int.test.ts +++ b/libs/langchain-community/src/graphs/tests/memgraph_graph.int.test.ts @@ -30,7 +30,7 @@ describe.skip("Memgraph Graph Tests", () => { ); await graph.refreshSchema(); - console.log(graph.getSchema()); + // console.log(graph.getSchema()); expect(graph.getSchema()).toMatchInlineSnapshot(` "Node properties are the following: diff --git a/libs/langchain-community/src/graphs/tests/neo4j_graph.int.test.ts b/libs/langchain-community/src/graphs/tests/neo4j_graph.int.test.ts index 02fc74bca3b8..f31de3cc8928 100644 --- a/libs/langchain-community/src/graphs/tests/neo4j_graph.int.test.ts +++ b/libs/langchain-community/src/graphs/tests/neo4j_graph.int.test.ts @@ -51,7 +51,7 @@ describe.skip("Neo4j Graph Tests", () => { ); await graph.refreshSchema(); - console.log(graph.getSchema()); + // console.log(graph.getSchema()); // expect(graph.getSchema()).toMatchInlineSnapshot(` // "Node properties are the following: @@ -229,7 +229,7 @@ describe.skip("Neo4j Graph with custom config", () => { const output = graphWithEnhancedSchema.getStructuredSchema(); delete output.metadata; - console.log(output); + // console.log(output); expect(output).toEqual({ nodeProps: { foo: [ @@ -258,7 +258,7 @@ describe.skip("Neo4j Graph with custom config", () => { test("Test running on multiple demo databases", async () => { for (const database of DEMO_DATABASES) { - console.log("Connecting demo database:", database); + // console.log("Connecting demo database:", database); const graphDemo = await Neo4jGraph.initialize({ url: DEMO_URL, @@ -270,6 +270,6 @@ describe.skip("Neo4j Graph with custom config", () => { await graphDemo.close(); } - console.log("All database tests completed."); + // console.log("All database tests completed."); }, 10000000); }); diff --git a/libs/langchain-community/src/indexes/tests/postgres.int.test.ts b/libs/langchain-community/src/indexes/tests/postgres.int.test.ts index 79564ced15bd..05c7759364fc 100644 --- a/libs/langchain-community/src/indexes/tests/postgres.int.test.ts +++ b/libs/langchain-community/src/indexes/tests/postgres.int.test.ts @@ -55,11 +55,11 @@ describe.skip("PostgresRecordManager", () => { ); // create new schema for test - console.log("creating new schema in test"); + // console.log("creating new schema in test"); await explicitSchemaRecordManager.pool.query('CREATE SCHEMA "newSchema"'); // create table in new schema - console.log("calling createSchema function from test"); + // console.log("calling createSchema function from test"); await explicitSchemaRecordManager.createSchema(); // drop created schema diff --git a/libs/langchain-community/src/llms/tests/ai21.int.test.ts b/libs/langchain-community/src/llms/tests/ai21.int.test.ts index a4aad18ebc5e..b45e54ae8c67 100644 --- a/libs/langchain-community/src/llms/tests/ai21.int.test.ts +++ b/libs/langchain-community/src/llms/tests/ai21.int.test.ts @@ -4,26 +4,32 @@ import { AI21 } from "../ai21.js"; describe.skip("AI21", () => { test("test call", async () => { const ai21 = new AI21({}); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await ai21.invoke( "What is a good name for a company that makes colorful socks?" ); - console.log({ result }); + // console.log({ result }); }); test("test translation call", async () => { const ai21 = new AI21({}); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await ai21.invoke( `Translate "I love programming" into German.` ); - console.log({ result }); + // console.log({ result }); }); test("test JSON output call", async () => { const ai21 = new AI21({}); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await ai21.invoke( `Output a JSON object with three string fields: "name", "birthplace", "bio".` ); - console.log({ result }); + // console.log({ result }); }); test("should abort the request", async () => { diff --git a/libs/langchain-community/src/llms/tests/aleph_alpha.int.test.ts b/libs/langchain-community/src/llms/tests/aleph_alpha.int.test.ts index 510655500e53..f4dd271d0709 100644 --- a/libs/langchain-community/src/llms/tests/aleph_alpha.int.test.ts +++ b/libs/langchain-community/src/llms/tests/aleph_alpha.int.test.ts @@ -4,26 +4,32 @@ import { AlephAlpha } from "../aleph_alpha.js"; describe("Aleph Alpha", () => { test("test call", async () => { const aleph_alpha = new AlephAlpha({}); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await aleph_alpha.invoke( "What is a good name for a company that makes colorful socks?" ); - console.log({ result }); + // console.log({ result }); }); test("test translation call", async () => { const aleph_alpha = new AlephAlpha({}); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await aleph_alpha.invoke( `Translate "I love programming" into German.` ); - console.log({ result }); + // console.log({ result }); }); test("test JSON output call", async () => { const aleph_alpha = new AlephAlpha({}); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await aleph_alpha.invoke( `Output a JSON object with three string fields: "name", "birthplace", "bio".` ); - console.log({ result }); + // console.log({ result }); }); test("should abort the request", async () => { diff --git a/libs/langchain-community/src/llms/tests/bedrock.int.test.ts b/libs/langchain-community/src/llms/tests/bedrock.int.test.ts index 4d7e7b5050c6..b05c1c1f962c 100644 --- a/libs/langchain-community/src/llms/tests/bedrock.int.test.ts +++ b/libs/langchain-community/src/llms/tests/bedrock.int.test.ts @@ -24,7 +24,7 @@ test("Test Bedrock LLM: AI21", async () => { const res = await bedrock.invoke(prompt); expect(typeof res).toBe("string"); - console.log(res); + // console.log(res); }); test.skip("Test Bedrock LLM: Meta Llama2", async () => { @@ -47,7 +47,7 @@ test.skip("Test Bedrock LLM: Meta Llama2", async () => { const res = await bedrock.invoke(prompt); expect(typeof res).toBe("string"); - console.log(res); + // console.log(res); }); test.skip("Test Bedrock LLM streaming: Meta Llama2", async () => { @@ -70,7 +70,7 @@ test.skip("Test Bedrock LLM streaming: Meta Llama2", async () => { const stream = await bedrock.stream(prompt); const chunks = []; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); @@ -95,7 +95,7 @@ test("Test Bedrock LLM: Claude-v2", async () => { const res = await bedrock.invoke(prompt); expect(typeof res).toBe("string"); - console.log(res); + // console.log(res); }); test("Test Bedrock LLM streaming: AI21", async () => { @@ -118,7 +118,7 @@ test("Test Bedrock LLM streaming: AI21", async () => { const stream = await bedrock.stream(prompt); const chunks = []; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toEqual(1); @@ -175,7 +175,7 @@ test("Test Bedrock LLM streaming: Claude-v2", async () => { const stream = await bedrock.stream(prompt); const chunks = []; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); diff --git a/libs/langchain-community/src/llms/tests/cohere.int.test.ts b/libs/langchain-community/src/llms/tests/cohere.int.test.ts index fbabe666b645..9990d5701217 100644 --- a/libs/langchain-community/src/llms/tests/cohere.int.test.ts +++ b/libs/langchain-community/src/llms/tests/cohere.int.test.ts @@ -3,6 +3,8 @@ import { Cohere } from "../cohere.js"; test("Test Cohere", async () => { const model = new Cohere({ maxTokens: 20 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 ="); - console.log(res); + // console.log(res); }, 50000); diff --git a/libs/langchain-community/src/llms/tests/deepinfra.int.test.ts b/libs/langchain-community/src/llms/tests/deepinfra.int.test.ts index 1c8853d2782a..ede4813a3f48 100644 --- a/libs/langchain-community/src/llms/tests/deepinfra.int.test.ts +++ b/libs/langchain-community/src/llms/tests/deepinfra.int.test.ts @@ -3,6 +3,8 @@ import { DeepInfraLLM } from "../deepinfra.js"; test("Test DeepInfra", async () => { const model = new DeepInfraLLM({ maxTokens: 20 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 ="); - console.log(res); + // console.log(res); }, 50000); diff --git a/libs/langchain-community/src/llms/tests/fireworks.int.test.ts b/libs/langchain-community/src/llms/tests/fireworks.int.test.ts index 80b51e629a6a..173d437d5142 100644 --- a/libs/langchain-community/src/llms/tests/fireworks.int.test.ts +++ b/libs/langchain-community/src/llms/tests/fireworks.int.test.ts @@ -4,14 +4,18 @@ import { Fireworks } from "../fireworks.js"; describe("Fireworks", () => { test("call", async () => { const model = new Fireworks({ maxTokens: 50 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 = "); - console.log({ res }); + // console.log({ res }); }); test("generate", async () => { const model = new Fireworks({ maxTokens: 50 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate(["1 + 1 = "]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); await expect( async () => await model.generate(["1 + 1 = ", "2 + 2 = "]) diff --git a/libs/langchain-community/src/llms/tests/friendli.int.test.ts b/libs/langchain-community/src/llms/tests/friendli.int.test.ts index 960e2df0e62d..0af071f6767b 100644 --- a/libs/langchain-community/src/llms/tests/friendli.int.test.ts +++ b/libs/langchain-community/src/llms/tests/friendli.int.test.ts @@ -4,13 +4,17 @@ import { Friendli } from "../friendli.js"; describe.skip("Friendli", () => { test("call", async () => { const friendli = new Friendli({ maxTokens: 20 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await friendli.invoke("1 + 1 = "); - console.log({ res }); + // console.log({ res }); }); test("generate", async () => { const friendli = new Friendli({ maxTokens: 20 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await friendli.generate(["1 + 1 = "]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); }); diff --git a/libs/langchain-community/src/llms/tests/googlepalm.int.test.ts b/libs/langchain-community/src/llms/tests/googlepalm.int.test.ts index 815bc070e752..ed8dd828056f 100644 --- a/libs/langchain-community/src/llms/tests/googlepalm.int.test.ts +++ b/libs/langchain-community/src/llms/tests/googlepalm.int.test.ts @@ -4,21 +4,21 @@ import { GooglePaLM } from "../googlepalm.js"; test.skip("Test Google Palm", async () => { const model = new GooglePaLM(); const res = await model.invoke("what is 1 + 1?"); - console.log({ res }); + // console.log({ res }); expect(res).toBeTruthy(); }); test.skip("Test Google Palm generation", async () => { const model = new GooglePaLM(); const res = await model.generate(["what is 1 + 1?"]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); expect(res).toBeTruthy(); }); test.skip("Test Google Palm generation", async () => { const model = new GooglePaLM(); const res = await model.generate(["Print hello world."]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); expect(res).toBeTruthy(); }); @@ -27,6 +27,6 @@ test.skip("Test Google Palm generation", async () => { const res = await model.generate([ `Translate "I love programming" into Korean.`, ]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); expect(res).toBeTruthy(); }); diff --git a/libs/langchain-community/src/llms/tests/googlevertexai.int.test.ts b/libs/langchain-community/src/llms/tests/googlevertexai.int.test.ts index 43647528e221..e9c02e415ead 100644 --- a/libs/langchain-community/src/llms/tests/googlevertexai.int.test.ts +++ b/libs/langchain-community/src/llms/tests/googlevertexai.int.test.ts @@ -4,28 +4,36 @@ import { GoogleVertexAI } from "../googlevertexai/index.js"; describe("Vertex AI", () => { test("Test Google Vertex", async () => { const model = new GoogleVertexAI({ maxOutputTokens: 50 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 = "); - console.log({ res }); + // console.log({ res }); }); test("Test Google Vertex generation", async () => { const model = new GoogleVertexAI({ maxOutputTokens: 50 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate(["1 + 1 = "]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("Test Google Vertex generation", async () => { const model = new GoogleVertexAI({ maxOutputTokens: 50 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate(["Print hello world."]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("Test Google Vertex generation", async () => { const model = new GoogleVertexAI({ maxOutputTokens: 50 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate([ `Translate "I love programming" into Korean.`, ]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("Test Google Vertex Codey gecko model", async () => { @@ -34,8 +42,10 @@ describe("Vertex AI", () => { expect(model.temperature).toEqual(0.2); expect(model.maxOutputTokens).toEqual(64); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("for( let co = 0"); - console.log(res); + // console.log(res); }); test("Test Google Vertex Codey bison model", async () => { @@ -45,8 +55,10 @@ describe("Vertex AI", () => { }); expect(model.model).toEqual("code-bison"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Count to 10 in JavaScript."); - console.log(res); + // console.log(res); }); test("Test Google Vertex bison-32k model", async () => { @@ -54,8 +66,10 @@ describe("Vertex AI", () => { model: "text-bison-32k", maxOutputTokens: 50, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 = "); - console.log({ res }); + // console.log({ res }); }); test("streaming text", async () => { @@ -70,7 +84,7 @@ describe("Vertex AI", () => { const chunks = []; for await (const chunk of stream) { chunks.push(chunk); - console.log("chunk", chunk); + // console.log("chunk", chunk); } expect(chunks.length).toBeGreaterThan(1); expect(chunks[chunks.length - 1]).toEqual(""); diff --git a/libs/langchain-community/src/llms/tests/googlevertexai_web.int.test.ts b/libs/langchain-community/src/llms/tests/googlevertexai_web.int.test.ts index fd6d15108fbf..6247cf05e2a4 100644 --- a/libs/langchain-community/src/llms/tests/googlevertexai_web.int.test.ts +++ b/libs/langchain-community/src/llms/tests/googlevertexai_web.int.test.ts @@ -4,28 +4,36 @@ import { GoogleVertexAI } from "../googlevertexai/web.js"; describe("Web Vertex AI", () => { test("Test Google Vertex", async () => { const model = new GoogleVertexAI({ maxOutputTokens: 50 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 = "); - console.log({ res }); + // console.log({ res }); }); test("Test Google Vertex generation", async () => { const model = new GoogleVertexAI({ maxOutputTokens: 50 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate(["1 + 1 = "]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("Test Google Vertex generation", async () => { const model = new GoogleVertexAI({ maxOutputTokens: 50 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate(["Print hello world."]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("Test Google Vertex generation", async () => { const model = new GoogleVertexAI({ maxOutputTokens: 50 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate([ `Translate "I love programming" into Korean.`, ]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); }); test("Test Google Vertex Codey gecko model", async () => { @@ -34,8 +42,10 @@ describe("Web Vertex AI", () => { expect(model.temperature).toEqual(0.2); expect(model.maxOutputTokens).toEqual(64); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("for( let co = 0"); - console.log(res); + // console.log(res); }); test("Test Google Vertex Codey bison model", async () => { @@ -45,8 +55,10 @@ describe("Web Vertex AI", () => { }); expect(model.model).toEqual("code-bison"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Count to 10 in JavaScript."); - console.log(res); + // console.log(res); }); test("Test Google Vertex bison-32k model", async () => { @@ -54,8 +66,10 @@ describe("Web Vertex AI", () => { model: "text-bison-32k", maxOutputTokens: 50, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 = "); - console.log({ res }); + // console.log({ res }); }); test("Test Google Vertex stream returns one chunk", async () => { @@ -70,7 +84,7 @@ describe("Web Vertex AI", () => { const chunks = []; for await (const chunk of stream) { chunks.push(chunk); - console.log(chunk); + // console.log(chunk); } expect(chunks.length).toBeGreaterThan(1); }); diff --git a/libs/langchain-community/src/llms/tests/huggingface_hub.int.test.ts b/libs/langchain-community/src/llms/tests/huggingface_hub.int.test.ts index 86ae8b9d8d51..6e4e4565949d 100644 --- a/libs/langchain-community/src/llms/tests/huggingface_hub.int.test.ts +++ b/libs/langchain-community/src/llms/tests/huggingface_hub.int.test.ts @@ -3,8 +3,10 @@ import { HuggingFaceInference } from "../hf.js"; test.skip("Test HuggingFace", async () => { const model = new HuggingFaceInference({ temperature: 0.1, topP: 0.5 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 ="); - console.log(res); + // console.log(res); }, 50000); test.skip("Test HuggingFace with streaming", async () => { @@ -18,9 +20,9 @@ test.skip("Test HuggingFace with streaming", async () => { const chunks = []; for await (const chunk of stream) { chunks.push(chunk); - console.log(chunk); + // console.log(chunk); } - console.log(chunks.join("")); + // console.log(chunks.join("")); expect(chunks.length).toBeGreaterThan(1); }, 50000); @@ -30,10 +32,12 @@ test.skip("Test HuggingFace with stop sequence", async () => { temperature: 0.1, topP: 0.5, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model .bind({ stop: ["ramento"], }) .invoke(`What is the capital of California?`); - console.log(res); + // console.log(res); }, 50000); diff --git a/libs/langchain-community/src/llms/tests/llama_cpp.int.test.ts b/libs/langchain-community/src/llms/tests/llama_cpp.int.test.ts index 30f55350f467..8f8d34e70a52 100644 --- a/libs/langchain-community/src/llms/tests/llama_cpp.int.test.ts +++ b/libs/langchain-community/src/llms/tests/llama_cpp.int.test.ts @@ -7,14 +7,18 @@ const llamaPath = getEnvironmentVariable("LLAMA_PATH")!; test.skip("Test Llama_CPP", async () => { const model = new LlamaCpp({ modelPath: llamaPath }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Where do Llamas live?"); - console.log(res); + // console.log(res); }, 100000); test.skip("Test Llama_CPP", async () => { const model = new LlamaCpp({ modelPath: llamaPath }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Where do Pandas live?"); - console.log(res); + // console.log(res); }, 100000); test.skip("Test Llama_CPP", async () => { @@ -52,10 +56,12 @@ const gbnfListGrammer = test.skip("Test Llama_CPP", async () => { const model = new LlamaCpp({ modelPath: llamaPath, gbnf: gbnfListGrammer }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke( "Can you give me a list of 3 cute llama names?" ); - console.log(res); + // console.log(res); }, 100000); // JSON schema test @@ -77,6 +83,8 @@ const schemaJSON = { test.skip("Test Llama_CPP", async () => { const model = new LlamaCpp({ modelPath: llamaPath, jsonSchema: schemaJSON }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Where do llamas live?"); - console.log(res); + // console.log(res); }, 100000); diff --git a/libs/langchain-community/src/llms/tests/ollama.int.test.ts b/libs/langchain-community/src/llms/tests/ollama.int.test.ts index 24d3686c7d97..0777028f14b8 100644 --- a/libs/langchain-community/src/llms/tests/ollama.int.test.ts +++ b/libs/langchain-community/src/llms/tests/ollama.int.test.ts @@ -11,10 +11,12 @@ import { Ollama } from "../ollama.js"; test.skip("test call", async () => { const ollama = new Ollama({}); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await ollama.invoke( "What is a good name for a company that makes colorful socks?" ); - console.log({ result }); + // console.log({ result }); }); test.skip("test call with callback", async () => { @@ -49,7 +51,7 @@ test.skip("test streaming call", async () => { for await (const chunk of stream) { chunks.push(chunk); } - console.log(chunks.join("")); + // console.log(chunks.join("")); expect(chunks.length).toBeGreaterThan(1); }); @@ -89,7 +91,7 @@ test.skip("should stream through with a bytes output parser", async () => { for await (const chunk of stream) { chunks.push(chunk); } - console.log(chunks.join("")); + // console.log(chunks.join("")); expect(chunks.length).toBeGreaterThan(1); }); @@ -112,7 +114,7 @@ test.skip("JSON mode", async () => { const res = await chain.invoke({ input: `Translate "I love programming" into German.`, }); - console.log(res); + // console.log(res); expect(JSON.parse(res).response).toBeDefined(); }); @@ -126,6 +128,8 @@ test.skip("Test Ollama with an image", async () => { }).bind({ images: [imageData.toString("base64")], }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("What's in this image?"); - console.log({ res }); + // console.log({ res }); }); diff --git a/libs/langchain-community/src/llms/tests/replicate.int.test.ts b/libs/langchain-community/src/llms/tests/replicate.int.test.ts index c2fdca08567e..ae76ed1ed7a8 100644 --- a/libs/langchain-community/src/llms/tests/replicate.int.test.ts +++ b/libs/langchain-community/src/llms/tests/replicate.int.test.ts @@ -13,7 +13,7 @@ test.skip("Test Replicate", async () => { const res = await model.invoke("Hello, my name is "); - console.log({ res }); + // console.log({ res }); expect(typeof res).toBe("string"); }); @@ -33,7 +33,7 @@ test.skip("Test Replicate streaming", async () => { for await (const chunk of stream) { chunks.push(chunk); } - console.log(chunks); + // console.log(chunks); expect(chunks.length).toBeGreaterThan(1); }); diff --git a/libs/langchain-community/src/llms/tests/togetherai.int.test.ts b/libs/langchain-community/src/llms/tests/togetherai.int.test.ts index 2a7c3445c1d6..8e47a0f94a84 100644 --- a/libs/langchain-community/src/llms/tests/togetherai.int.test.ts +++ b/libs/langchain-community/src/llms/tests/togetherai.int.test.ts @@ -10,8 +10,10 @@ test.skip("TogetherAI can make a request to an LLM", async () => { ["human", "Tell me a joke about bears."], ]); const chain = prompt.pipe(model); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await chain.invoke({}); - console.log("result", result); + // console.log("result", result); }); test.skip("TogetherAI can stream responses", async () => { @@ -28,10 +30,10 @@ test.skip("TogetherAI can stream responses", async () => { let numItems = 0; let fullText = ""; for await (const item of result) { - console.log("stream item", item); + // console.log("stream item", item); fullText += item; numItems += 1; } - console.log(fullText); + // console.log(fullText); expect(numItems).toBeGreaterThan(1); }); diff --git a/libs/langchain-community/src/llms/tests/writer.int.test.ts b/libs/langchain-community/src/llms/tests/writer.int.test.ts index 9c45c789aafe..adb532ee8562 100644 --- a/libs/langchain-community/src/llms/tests/writer.int.test.ts +++ b/libs/langchain-community/src/llms/tests/writer.int.test.ts @@ -3,6 +3,8 @@ import { Writer } from "../writer.js"; test.skip("Test Writer", async () => { const model = new Writer({ maxTokens: 20 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("1 + 1 ="); - console.log(res); + // console.log(res); }, 50000); diff --git a/libs/langchain-community/src/memory/tests/zep_memory.int.test.ts b/libs/langchain-community/src/memory/tests/zep_memory.int.test.ts index 4f78cef9f5e0..24402e9c1741 100644 --- a/libs/langchain-community/src/memory/tests/zep_memory.int.test.ts +++ b/libs/langchain-community/src/memory/tests/zep_memory.int.test.ts @@ -23,7 +23,7 @@ test.skip("addMemory to Zep memory", async () => { test.skip("getMessages from Zep memory", async () => { const memoryVariables = await zepMemory.loadMemoryVariables({}); - console.log("memoryVariables", memoryVariables); + // console.log("memoryVariables", memoryVariables); // Check if memoryKey exists in the memoryVariables expect(memoryVariables).toHaveProperty(zepMemory.memoryKey); @@ -37,7 +37,7 @@ test.skip("getMessages from Zep memory", async () => { } else if (Array.isArray(messages)) { expect(messages.length).toBeGreaterThanOrEqual(1); } else { - console.log("failed to get messages: ", messages); + // console.log("failed to get messages: ", messages); // Fail the test because messages is neither string nor array throw new Error("Returned messages is neither string nor array"); } diff --git a/libs/langchain-community/src/retrievers/tests/amazon_kendra.int.test.ts b/libs/langchain-community/src/retrievers/tests/amazon_kendra.int.test.ts index 604930097aad..2e6d194f0789 100644 --- a/libs/langchain-community/src/retrievers/tests/amazon_kendra.int.test.ts +++ b/libs/langchain-community/src/retrievers/tests/amazon_kendra.int.test.ts @@ -20,5 +20,5 @@ test.skip("AmazonKendraRetriever", async () => { expect(docs.length).toBeGreaterThan(0); - console.log(docs); + // console.log(docs); }); diff --git a/libs/langchain-community/src/retrievers/tests/dria.int.test.ts b/libs/langchain-community/src/retrievers/tests/dria.int.test.ts index 5b6c5350a64e..293e2b60799a 100644 --- a/libs/langchain-community/src/retrievers/tests/dria.int.test.ts +++ b/libs/langchain-community/src/retrievers/tests/dria.int.test.ts @@ -12,5 +12,5 @@ test.skip("DriaRetriever", async () => { const docs = await retriever.getRelevantDocuments("What is a union type?"); expect(docs.length).toBe(topK); - console.log(docs[0].pageContent); + // console.log(docs[0].pageContent); }); diff --git a/libs/langchain-community/src/retrievers/tests/metal.int.test.ts b/libs/langchain-community/src/retrievers/tests/metal.int.test.ts index f462984ba66f..ab1dbd8cc4c9 100644 --- a/libs/langchain-community/src/retrievers/tests/metal.int.test.ts +++ b/libs/langchain-community/src/retrievers/tests/metal.int.test.ts @@ -18,5 +18,5 @@ test("MetalRetriever", async () => { expect(docs.length).toBeGreaterThan(0); - console.log(docs); + // console.log(docs); }); diff --git a/libs/langchain-community/src/retrievers/tests/tavily_search_api.int.test.ts b/libs/langchain-community/src/retrievers/tests/tavily_search_api.int.test.ts index fd71faed7ee3..a7943594b0bc 100644 --- a/libs/langchain-community/src/retrievers/tests/tavily_search_api.int.test.ts +++ b/libs/langchain-community/src/retrievers/tests/tavily_search_api.int.test.ts @@ -13,5 +13,5 @@ test.skip("TavilySearchAPIRetriever", async () => { const docs = await retriever.getRelevantDocuments("what bear is best?"); expect(docs.length).toBeGreaterThan(0); - console.log(docs); + // console.log(docs); }); diff --git a/libs/langchain-community/src/retrievers/tests/vespa.int.test.ts b/libs/langchain-community/src/retrievers/tests/vespa.int.test.ts index 4c02d978df86..df9d6f76e705 100644 --- a/libs/langchain-community/src/retrievers/tests/vespa.int.test.ts +++ b/libs/langchain-community/src/retrievers/tests/vespa.int.test.ts @@ -23,5 +23,5 @@ test.skip("VespaRetriever", async () => { const docs = await retriever.getRelevantDocuments("what is vespa?"); expect(docs.length).toBeGreaterThan(0); - console.log(docs); + // console.log(docs); }); diff --git a/libs/langchain-community/src/retrievers/tests/zep.int.test.ts b/libs/langchain-community/src/retrievers/tests/zep.int.test.ts index a7c4dd26a33c..f8dbce9b050a 100644 --- a/libs/langchain-community/src/retrievers/tests/zep.int.test.ts +++ b/libs/langchain-community/src/retrievers/tests/zep.int.test.ts @@ -34,7 +34,7 @@ test.skip("ZepRetriever - memory exists", async () => { expect(docs.length).toBeGreaterThanOrEqual(2); - console.log(docs); + // console.log(docs); }); test.skip("ZepRetriever - does not exist", async () => { @@ -47,5 +47,5 @@ test.skip("ZepRetriever - does not exist", async () => { expect(docs.length).toBe(0); - console.log(docs); + // console.log(docs); }); diff --git a/libs/langchain-community/src/storage/tests/ioredis.int.test.ts b/libs/langchain-community/src/storage/tests/ioredis.int.test.ts index 2d05d50bc0a3..935db6b52e95 100644 --- a/libs/langchain-community/src/storage/tests/ioredis.int.test.ts +++ b/libs/langchain-community/src/storage/tests/ioredis.int.test.ts @@ -28,8 +28,10 @@ describe("RedisByteStore", () => { value1, value2, ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const key of store.yieldKeys()) { - console.log(key); + // console.log(key); } await store.mdelete(["key1", "key2"]); const retrievedValues2 = await store.mget(["key1", "key2"]); @@ -54,7 +56,7 @@ describe("RedisByteStore", () => { for await (const key of store.yieldKeys(prefix)) { yieldedKeys.push(key); } - console.log(yieldedKeys); + // console.log(yieldedKeys); expect(yieldedKeys).toEqual(expect.arrayContaining(prefixedKeys)); }); }); diff --git a/libs/langchain-community/src/storage/tests/upstash_redis.int.test.ts b/libs/langchain-community/src/storage/tests/upstash_redis.int.test.ts index f3635c87ac82..5b425743453e 100644 --- a/libs/langchain-community/src/storage/tests/upstash_redis.int.test.ts +++ b/libs/langchain-community/src/storage/tests/upstash_redis.int.test.ts @@ -79,7 +79,7 @@ describe.skip("UpstashRedisStore", () => { for await (const key of store.yieldKeys(prefix)) { yieldedKeys.push(key); } - console.log("Yielded keys:", yieldedKeys); + // console.log("Yielded keys:", yieldedKeys); expect(yieldedKeys.sort()).toEqual(keysWithPrefix.sort()); // afterEach won't automatically delete these since we're applying a prefix. await store.mdelete(keysWithPrefix); diff --git a/libs/langchain-community/src/storage/tests/vercel_kv.int.test.ts b/libs/langchain-community/src/storage/tests/vercel_kv.int.test.ts index 4d8fe74c1207..5172210e8fd4 100644 --- a/libs/langchain-community/src/storage/tests/vercel_kv.int.test.ts +++ b/libs/langchain-community/src/storage/tests/vercel_kv.int.test.ts @@ -39,8 +39,10 @@ describe("VercelKVStore", () => { encoder.encode(value1), encoder.encode(value2), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const key of store.yieldKeys()) { - console.log(key); + // console.log(key); } await store.mdelete(["key1", "key2"]); const retrievedValues2 = await store.mget(["key1", "key2"]); @@ -69,7 +71,7 @@ describe("VercelKVStore", () => { for await (const key of store.yieldKeys(prefix)) { yieldedKeys.push(key); } - console.log(yieldedKeys); + // console.log(yieldedKeys); expect(yieldedKeys).toEqual(expect.arrayContaining(prefixedKeys)); await store.mdelete(prefixedKeys); const retrievedValues2 = await store.mget(prefixedKeys); diff --git a/libs/langchain-community/src/stores/doc/tests/gcs.int.test.ts b/libs/langchain-community/src/stores/doc/tests/gcs.int.test.ts index c136c1c8bf9b..d1e3d0098645 100644 --- a/libs/langchain-community/src/stores/doc/tests/gcs.int.test.ts +++ b/libs/langchain-community/src/stores/doc/tests/gcs.int.test.ts @@ -50,7 +50,7 @@ describe.skip("GoogleCloudStorageDocstore", () => { bucket, }); const document = await store.search(name); - console.log(document); + // console.log(document); expect(document.pageContent).toEqual("This is a test"); }); @@ -60,7 +60,7 @@ describe.skip("GoogleCloudStorageDocstore", () => { bucket, }); const document = await store.search(name); - console.log(document); + // console.log(document); expect(document.pageContent).toEqual("This is a metadata test"); expect(document.metadata.meta1).toEqual("one"); }); @@ -73,7 +73,7 @@ describe.skip("GoogleCloudStorageDocstore", () => { prefix, }); const document = await store.search(name); - console.log(document); + // console.log(document); expect(document.pageContent).toEqual("This is a prefix test"); }); }); diff --git a/libs/langchain-community/src/stores/tests/astradb.int.test.ts b/libs/langchain-community/src/stores/tests/astradb.int.test.ts index ffaa318a7064..944750c884d4 100644 --- a/libs/langchain-community/src/stores/tests/astradb.int.test.ts +++ b/libs/langchain-community/src/stores/tests/astradb.int.test.ts @@ -22,7 +22,7 @@ describe.skip("AstraDBChatMessageHistory", () => { try { await db.dropCollection("test_messages"); } catch (e) { - console.debug("Collection doesn't exist yet, skipping drop"); + // console.debug("Collection doesn't exist yet, skipping drop"); } await db.createCollection("test_messages"); diff --git a/libs/langchain-community/src/structured_query/tests/chroma_self_query.int.test.ts b/libs/langchain-community/src/structured_query/tests/chroma_self_query.int.test.ts index acad03175c9a..c8296b551757 100644 --- a/libs/langchain-community/src/structured_query/tests/chroma_self_query.int.test.ts +++ b/libs/langchain-community/src/structured_query/tests/chroma_self_query.int.test.ts @@ -86,17 +86,25 @@ test.skip("Chroma Store Self Query Retriever Test", async () => { structuredQueryTranslator: new ChromaTranslator(), }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query4 = await selfQueryRetriever.getRelevantDocuments( "Which movies are either comedy or drama and are less than 90 minutes?" ); - console.log(query1, query2, query3, query4); + // console.log(query1, query2, query3, query4); }); diff --git a/libs/langchain-community/src/structured_query/tests/hnswlib_self_query.int.test.ts b/libs/langchain-community/src/structured_query/tests/hnswlib_self_query.int.test.ts index 28998f2b22c7..175efe6c171f 100644 --- a/libs/langchain-community/src/structured_query/tests/hnswlib_self_query.int.test.ts +++ b/libs/langchain-community/src/structured_query/tests/hnswlib_self_query.int.test.ts @@ -92,19 +92,19 @@ test("HNSWLib Store Self Query Retriever Test", async () => { const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); - console.log(query1); + // console.log(query1); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); - console.log(query2); + // console.log(query2); expect(query2.length).toEqual(2); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); - console.log(query3); + // console.log(query3); expect(query3.length).toEqual(1); }); @@ -187,6 +187,6 @@ test("HNSWLib shouldn't throw an error if a filter can't be generated, but shoul const query1 = await selfQueryRetriever.getRelevantDocuments( "Which sectionTitle talks about pools?" ); - console.log(query1); + // console.log(query1); expect(query1.length).toEqual(0); }); diff --git a/libs/langchain-community/src/structured_query/tests/supabase_self_query.int.test.ts b/libs/langchain-community/src/structured_query/tests/supabase_self_query.int.test.ts index ba7859b672bf..a6acd57934bb 100644 --- a/libs/langchain-community/src/structured_query/tests/supabase_self_query.int.test.ts +++ b/libs/langchain-community/src/structured_query/tests/supabase_self_query.int.test.ts @@ -123,21 +123,21 @@ test("Supabase Store Self Query Retriever Test", async () => { const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); - console.log(query1); + // console.log(query1); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); - console.log(query2); + // console.log(query2); expect(query2.length).toEqual(3); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); - console.log(query3); + // console.log(query3); expect(query3.length).toEqual(1); const query4 = await selfQueryRetriever.getRelevantDocuments("What is what"); // this should return empty since it'll create empty filter - console.log(query4); + // console.log(query4); expect(query4.length).toEqual(0); }); @@ -276,21 +276,21 @@ test("Supabase Store Self Query Retriever Test With Default Filter And Merge Ope const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); - console.log(query1); + // console.log(query1); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); - console.log(query2); + // console.log(query2); expect(query2.length).toEqual(2); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); - console.log(query3); + // console.log(query3); expect(query3.length).toEqual(1); const query4 = await selfQueryRetriever.getRelevantDocuments("What is what"); // query4 has to empty document, since we can't use "or" operator - console.log(query4); + // console.log(query4); expect(query4.length).toEqual(0); }); @@ -431,21 +431,21 @@ test("Supabase Store Self Query Retriever Test With Default Filter Or Merge Oper const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); - console.log(query1); + // console.log(query1); expect(query1.length).toEqual(5); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); - console.log(query2); + // console.log(query2); expect(query2.length).toEqual(6); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); - console.log(query3); + // console.log(query3); expect(query3.length).toEqual(5); const query4 = await selfQueryRetriever.getRelevantDocuments("What is what"); - console.log(query4); + // console.log(query4); expect(query4.length).toEqual(5); }); @@ -585,20 +585,20 @@ test("Supabase Store Self Query Retriever Test With Default Filter And Merge Ope const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); - console.log(query1); + // console.log(query1); expect(query1.length).toEqual(0); const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); - console.log(query2); + // console.log(query2); expect(query2.length).toEqual(2); const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); - console.log(query3); + // console.log(query3); expect(query3.length).toEqual(1); const query4 = await selfQueryRetriever.getRelevantDocuments("What is what"); // query4 has to empty document, since we can't use "or" operator - console.log(query4); + // console.log(query4); expect(query4.length).toEqual(0); }); diff --git a/libs/langchain-community/src/structured_query/tests/vectara_self_query.int.test.ts b/libs/langchain-community/src/structured_query/tests/vectara_self_query.int.test.ts index ae5e2fc92ae7..6ae451af81f9 100644 --- a/libs/langchain-community/src/structured_query/tests/vectara_self_query.int.test.ts +++ b/libs/langchain-community/src/structured_query/tests/vectara_self_query.int.test.ts @@ -105,7 +105,7 @@ test.skip("Vectara Self Query Retriever Test", async () => { const query4 = await selfQueryRetriever.getRelevantDocuments( "Wau wau wau wau hello gello hello?" ); - console.log(query1, query2, query3, query4); + // console.log(query1, query2, query3, query4); expect(query1.length).toBe(2); expect(query2.length).toBe(1); expect(query3.length).toBe(1); diff --git a/libs/langchain-community/src/tools/tests/brave_search.int.test.ts b/libs/langchain-community/src/tools/tests/brave_search.int.test.ts index 6e1e3692c9bd..b0703c0ab108 100644 --- a/libs/langchain-community/src/tools/tests/brave_search.int.test.ts +++ b/libs/langchain-community/src/tools/tests/brave_search.int.test.ts @@ -4,7 +4,9 @@ import { BraveSearch } from "../brave_search.js"; test.skip("BraveSearchTool", async () => { const tool = new BraveSearch(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await tool.invoke("What is Langchain?"); - console.log({ result }); + // console.log({ result }); }); diff --git a/libs/langchain-community/src/tools/tests/discord.int.test.ts b/libs/langchain-community/src/tools/tests/discord.int.test.ts index b8920e7c5809..08384ef81deb 100644 --- a/libs/langchain-community/src/tools/tests/discord.int.test.ts +++ b/libs/langchain-community/src/tools/tests/discord.int.test.ts @@ -10,60 +10,40 @@ import { test.skip("DiscordGetMessagesTool", async () => { const tool = new DiscordGetMessagesTool(); - try { - const result = await tool.invoke("1153400523718938780"); - console.log(result); - expect(result).toBeTruthy(); - expect(result).not.toEqual("Channel not found."); - expect(result).not.toEqual("Error getting messages."); - } catch (error) { - console.error(error); - } + const result = await tool.invoke("1153400523718938780"); + // console.log(result); + expect(result).toBeTruthy(); + expect(result).not.toEqual("Channel not found."); + expect(result).not.toEqual("Error getting messages."); }); test.skip("DiscordGetGuildsTool", async () => { const tool = new DiscordGetGuildsTool(); - try { - const result = await tool.invoke(""); - console.log(result); - expect(result).toBeTruthy(); - expect(result).not.toEqual("Error getting guilds."); - } catch (error) { - console.error(error); - } + const result = await tool.invoke(""); + // console.log(result); + expect(result).toBeTruthy(); + expect(result).not.toEqual("Error getting guilds."); }); test.skip("DiscordChannelSearchTool", async () => { const tool = new DiscordChannelSearchTool(); - try { - const result = await tool.invoke("Test"); - console.log(result); - expect(result).toBeTruthy(); - expect(result).not.toEqual("Error searching through channel."); - } catch (error) { - console.error(error); - } + const result = await tool.invoke("Test"); + // console.log(result); + expect(result).toBeTruthy(); + expect(result).not.toEqual("Error searching through channel."); }); test.skip("DiscordGetTextChannelsTool", async () => { const tool = new DiscordGetTextChannelsTool(); - try { - const result = await tool.invoke("1153400523718938775"); - console.log(result); - expect(result).toBeTruthy(); - expect(result).not.toEqual("Error getting text channels."); - } catch (error) { - console.error(error); - } + const result = await tool.invoke("1153400523718938775"); + // console.log(result); + expect(result).toBeTruthy(); + expect(result).not.toEqual("Error getting text channels."); }); test.skip("DiscordSendMessagesTool", async () => { const tool = new DiscordSendMessagesTool(); - try { - const result = await tool.invoke("test message from new code"); - console.log(result); - expect(result).toEqual("Message sent successfully."); - } catch (err) { - console.log(err); - } + const result = await tool.invoke("test message from new code"); + // console.log(result); + expect(result).toEqual("Message sent successfully."); }); diff --git a/libs/langchain-community/src/tools/tests/google_custom_search.int.test.ts b/libs/langchain-community/src/tools/tests/google_custom_search.int.test.ts index 1b784d65350f..e0e340b6832d 100644 --- a/libs/langchain-community/src/tools/tests/google_custom_search.int.test.ts +++ b/libs/langchain-community/src/tools/tests/google_custom_search.int.test.ts @@ -4,7 +4,9 @@ import { GoogleCustomSearch } from "../google_custom_search.js"; test.skip("GoogleCustomSearchTool", async () => { const tool = new GoogleCustomSearch(); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await tool.invoke("What is Langchain?"); - console.log({ result }); + // console.log({ result }); }); diff --git a/libs/langchain-community/src/tools/tests/wikipedia.int.test.ts b/libs/langchain-community/src/tools/tests/wikipedia.int.test.ts index 0811f3736379..bf3f4cd35b50 100644 --- a/libs/langchain-community/src/tools/tests/wikipedia.int.test.ts +++ b/libs/langchain-community/src/tools/tests/wikipedia.int.test.ts @@ -10,13 +10,13 @@ test.skip("WikipediaQueryRunTool returns a string for valid query", async () => test.skip("WikipediaQueryRunTool returns non-empty string for valid query", async () => { const tool = new WikipediaQueryRun(); const result = await tool.invoke("Langchain"); - console.log(result); + // console.log(result); expect(result).not.toBe(""); }); test.skip("WikipediaQueryRunTool returns 'No good Wikipedia Search Result was found' for bad query", async () => { const tool = new WikipediaQueryRun(); const result = await tool.invoke("kjdsfklfjskladjflkdsajflkadsjf"); - console.log(result); + // console.log(result); expect(result).toBe("No good Wikipedia Search Result was found"); }); diff --git a/libs/langchain-community/src/vectorstores/tests/astradb.int.test.ts b/libs/langchain-community/src/vectorstores/tests/astradb.int.test.ts index 530cf598e66e..98febdfffa48 100644 --- a/libs/langchain-community/src/vectorstores/tests/astradb.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/astradb.int.test.ts @@ -36,7 +36,7 @@ describe.skip("AstraDBVectorStore", () => { try { await db.dropCollection(astraConfig.collection); } catch (e) { - console.debug("Collection doesn't exist yet, skipping drop"); + // console.debug("Collection doesn't exist yet, skipping drop"); } }); diff --git a/libs/langchain-community/src/vectorstores/tests/elasticsearch.int.test.ts b/libs/langchain-community/src/vectorstores/tests/elasticsearch.int.test.ts index 9f883526a584..cc22163c7763 100644 --- a/libs/langchain-community/src/vectorstores/tests/elasticsearch.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/elasticsearch.int.test.ts @@ -78,7 +78,7 @@ describe("ElasticVectorSearch", () => { expect(results3).toHaveLength(3); - console.log(`Upserted:`, results3); + // console.log(`Upserted:`, results3); await store.delete({ ids: ids.slice(2) }); diff --git a/libs/langchain-community/src/vectorstores/tests/faiss.int.test.ts b/libs/langchain-community/src/vectorstores/tests/faiss.int.test.ts index cf3fc3e894a8..ffed4dc7202f 100644 --- a/libs/langchain-community/src/vectorstores/tests/faiss.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/faiss.int.test.ts @@ -64,7 +64,7 @@ test("Test FaissStore.load and FaissStore.save", async () => { const tempDirectory = await fs.mkdtemp(path.join(os.tmpdir(), "lcjs-")); - console.log(tempDirectory); + // console.log(tempDirectory); await vectorStore.save(tempDirectory); @@ -165,7 +165,7 @@ And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketan const tempDirectory = await fs.mkdtemp(path.join(os.tmpdir(), "lcjs-")); - console.log(tempDirectory); + // console.log(tempDirectory); await loadedFromPythonVectorStore.save(tempDirectory); diff --git a/libs/langchain-community/src/vectorstores/tests/googlevertexai.int.test.ts b/libs/langchain-community/src/vectorstores/tests/googlevertexai.int.test.ts index 40148aeb3c82..dd29a7c1d561 100644 --- a/libs/langchain-community/src/vectorstores/tests/googlevertexai.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/googlevertexai.int.test.ts @@ -40,7 +40,7 @@ describe("Vertex AI matching", () => { test.skip("public endpoint", async () => { const apiendpoint = await engine.determinePublicAPIEndpoint(); - console.log(apiendpoint); + // console.log(apiendpoint); expect(apiendpoint).toHaveProperty("apiEndpoint"); expect(apiendpoint).toHaveProperty("deployedIndexId"); }); @@ -48,12 +48,12 @@ describe("Vertex AI matching", () => { test.skip("store", async () => { const doc = new Document({ pageContent: "this" }); await engine.addDocuments([doc]); - console.log(store._docs); + // console.log(store._docs); }); test.skip("query", async () => { const results = await engine.similaritySearch("that"); - console.log("query", results); + // console.log("query", results); expect(results?.length).toBeGreaterThanOrEqual(1); }); @@ -65,27 +65,27 @@ describe("Vertex AI matching", () => { }, ]; const results = await engine.similaritySearch("that", 4, filters); - console.log("query", results); + // console.log("query", results); expect(results?.length).toEqual(0); }); test.skip("delete", async () => { const newDoc = new Document({ pageContent: "this" }); await engine.addDocuments([newDoc]); - console.log("added", newDoc); + // console.log("added", newDoc); const oldResults: IdDocument[] = await engine.similaritySearch("this", 10); expect(oldResults?.length).toBeGreaterThanOrEqual(1); - console.log(oldResults); + // console.log(oldResults); const oldIds = oldResults.map((doc) => doc.id!); await engine.delete({ ids: oldIds }); - console.log("deleted", oldIds); + // console.log("deleted", oldIds); const newResults: IdDocument[] = await engine.similaritySearch("this", 10); expect(newResults).not.toEqual(oldResults); - console.log(newResults); + // console.log(newResults); }); describe("restrictions", () => { diff --git a/libs/langchain-community/src/vectorstores/tests/googlevertexai.test.ts b/libs/langchain-community/src/vectorstores/tests/googlevertexai.test.ts index 2553790bee2d..826954ad1f1a 100644 --- a/libs/langchain-community/src/vectorstores/tests/googlevertexai.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/googlevertexai.test.ts @@ -48,7 +48,7 @@ describe("Vertex AI matching", () => { charlie: ["e", "f"], }; const flat = engine.cleanMetadata(m); - console.log("flatten metadata", flat); + // console.log("flatten metadata", flat); expect(flat.alpha).toEqual("a"); expect(flat["bravo.uno"]).toEqual(1); expect(flat["bravo.dos"]).toEqual("two"); @@ -75,7 +75,7 @@ describe("Vertex AI matching", () => { charlie: ["e", "f"], }; const r = engine.metadataToRestrictions(m); - console.log("restrictions", r); + // console.log("restrictions", r); expect(r[0].namespace).toEqual("alpha"); expect(r[0].allowList).toEqual(["a"]); expect(r[4].namespace).toEqual("bravo.quatro"); diff --git a/libs/langchain-community/src/vectorstores/tests/hanavector.int.test.ts b/libs/langchain-community/src/vectorstores/tests/hanavector.int.test.ts index 465d7736c2e2..42a1fa945576 100644 --- a/libs/langchain-community/src/vectorstores/tests/hanavector.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/hanavector.int.test.ts @@ -67,13 +67,13 @@ async function connectToHANA() { if (err) { reject(err); } else { - console.log("Connected to SAP HANA successfully."); + // console.log("Connected to SAP HANA successfully."); resolve(); } }); }); } catch (error) { - console.error("Connect error", error); + // console.error("Connect error", error); } } @@ -523,7 +523,7 @@ describe("Deletion tests", () => { await vectorStore.delete({}); } catch (error) { exceptionOccurred = true; - console.log(error); + // console.log(error); } expect(exceptionOccurred).toBe(true); @@ -536,7 +536,7 @@ describe("Deletion tests", () => { }); } catch (error) { exceptionOccurred = true; - console.log(error); + // console.log(error); } expect(exceptionOccurred).toBe(true); }); @@ -628,7 +628,7 @@ describe("Tests on HANA side", () => { await vectordb.initialize(); } catch (error) { // An Error is expected here - console.log(error); + // console.log(error); exceptionOccurred = true; } @@ -662,7 +662,7 @@ describe("Tests on HANA side", () => { await vectordb.initialize(); } catch (error) { // An Error is expected here - console.log(error); + // console.log(error); exceptionOccurred = true; } @@ -821,7 +821,7 @@ describe("Tests on HANA side", () => { args ); } catch (error) { - console.log(error); + // console.log(error); exceptionOccurred = true; } expect(exceptionOccurred).toBe(true); @@ -839,7 +839,7 @@ describe("Tests on HANA side", () => { args ); } catch (error) { - console.log(error); + // console.log(error); exceptionOccurred = true; } expect(exceptionOccurred).toBe(true); @@ -862,7 +862,7 @@ describe("Tests on HANA side", () => { try { await vector.similaritySearch("foo", 3, { wrong_type: 0.1 }); } catch (error) { - console.log(error); + // console.log(error); exceptionOccurred = true; } expect(exceptionOccurred).toBe(true); diff --git a/libs/langchain-community/src/vectorstores/tests/hnswlib.int.test.ts b/libs/langchain-community/src/vectorstores/tests/hnswlib.int.test.ts index 79872e8cf474..020276e2d063 100644 --- a/libs/langchain-community/src/vectorstores/tests/hnswlib.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/hnswlib.int.test.ts @@ -65,7 +65,7 @@ test("Test HNSWLib.load, HNSWLib.save, and HNSWLib.delete", async () => { const tempDirectory = await fs.mkdtemp(path.join(os.tmpdir(), "lcjs-")); - console.log(tempDirectory); + // console.log(tempDirectory); await vectorStore.save(tempDirectory); diff --git a/libs/langchain-community/src/vectorstores/tests/neo4j_vector.int.test.ts b/libs/langchain-community/src/vectorstores/tests/neo4j_vector.int.test.ts index 4f492b55463d..ae61646514d1 100644 --- a/libs/langchain-community/src/vectorstores/tests/neo4j_vector.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/neo4j_vector.int.test.ts @@ -711,8 +711,8 @@ describe("Neo4j Vector", () => { } } - console.log("OUTPUT:", output); - console.log("EXPECTED OUTPUT:", expectedOutput); + // console.log("OUTPUT:", output); + // console.log("EXPECTED OUTPUT:", expectedOutput); expect(output.length).toEqual(expectedOutput.length); expect(output).toEqual(expect.arrayContaining(expectedOutput)); diff --git a/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts b/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts index a1a241c80e8a..5f01d3012b25 100644 --- a/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/pgvector/pgvector.int.test.ts @@ -75,26 +75,21 @@ describe("PGVectorStore", () => { }); test("PGvector can save documents with a list greater than default chunk size", async () => { - try { - // Extract the default chunk size and add one. - const docsToGenerate = pgvectorVectorStore.chunkSize + 1; - const documents = []; - for (let i = 1; i <= docsToGenerate; i += 1) { - documents.push({ pageContent: "Lorem Ipsum", metadata: { a: i } }); - } - await pgvectorVectorStore.addDocuments(documents); - - // Query the table to check the number of rows - const result = await pgvectorVectorStore.pool.query( - `SELECT COUNT(*) FROM "${tableName}"` - ); - const rowCount = parseInt(result.rows[0].count, 10); - // Check if the number of rows is equal to the number of documents added - expect(rowCount).toEqual(docsToGenerate); - } catch (e) { - console.error("Error: ", e); - throw e; + // Extract the default chunk size and add one. + const docsToGenerate = pgvectorVectorStore.chunkSize + 1; + const documents = []; + for (let i = 1; i <= docsToGenerate; i += 1) { + documents.push({ pageContent: "Lorem Ipsum", metadata: { a: i } }); } + await pgvectorVectorStore.addDocuments(documents); + + // Query the table to check the number of rows + const result = await pgvectorVectorStore.pool.query( + `SELECT COUNT(*) FROM "${tableName}"` + ); + const rowCount = parseInt(result.rows[0].count, 10); + // Check if the number of rows is equal to the number of documents added + expect(rowCount).toEqual(docsToGenerate); }); test("PGvector can save documents with ids", async () => { @@ -189,82 +184,72 @@ describe("PGVectorStore", () => { }); test("PGvector can delete document by id", async () => { - try { - const documents = [ - { pageContent: "Lorem Ipsum", metadata: { a: 1 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 2 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 3 } }, - ]; - await pgvectorVectorStore.addDocuments(documents); - - const result = await pgvectorVectorStore.pool.query( - `SELECT id FROM "${tableName}"` - ); - - const initialIds = result.rows.map((row) => row.id); - const firstIdToDelete = initialIds[0]; - const secondIdToDelete = initialIds[1]; - const idToKeep = initialIds[2]; - - await pgvectorVectorStore.delete({ - ids: [firstIdToDelete, secondIdToDelete], - }); - - const result2 = await pgvectorVectorStore.pool.query( - `SELECT id FROM "${tableName}"` - ); - - // Only one row should be left - expect(result2.rowCount).toEqual(1); - - // The deleted ids should not be in the result - const idsAfterDelete = result2.rows.map((row) => row.id); - expect(idsAfterDelete).not.toContain(firstIdToDelete); - expect(idsAfterDelete).not.toContain(secondIdToDelete); - - expect(idsAfterDelete).toContain(idToKeep); - } catch (e) { - console.error("Error: ", e); - throw e; - } + const documents = [ + { pageContent: "Lorem Ipsum", metadata: { a: 1 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 2 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 3 } }, + ]; + await pgvectorVectorStore.addDocuments(documents); + + const result = await pgvectorVectorStore.pool.query( + `SELECT id FROM "${tableName}"` + ); + + const initialIds = result.rows.map((row) => row.id); + const firstIdToDelete = initialIds[0]; + const secondIdToDelete = initialIds[1]; + const idToKeep = initialIds[2]; + + await pgvectorVectorStore.delete({ + ids: [firstIdToDelete, secondIdToDelete], + }); + + const result2 = await pgvectorVectorStore.pool.query( + `SELECT id FROM "${tableName}"` + ); + + // Only one row should be left + expect(result2.rowCount).toEqual(1); + + // The deleted ids should not be in the result + const idsAfterDelete = result2.rows.map((row) => row.id); + expect(idsAfterDelete).not.toContain(firstIdToDelete); + expect(idsAfterDelete).not.toContain(secondIdToDelete); + + expect(idsAfterDelete).toContain(idToKeep); }); test("PGvector can delete document by metadata", async () => { - try { - const documents = [ - { pageContent: "Lorem Ipsum", metadata: { a: 1, b: 1 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 2, b: 1 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 1, c: 1 } }, - ]; - await pgvectorVectorStore.addDocuments(documents); - const result = await pgvectorVectorStore.pool.query( - `SELECT id FROM "${tableName}"` - ); - - const initialIds = result.rows.map((row) => row.id); - - // Filter Matches 1st document - await pgvectorVectorStore.delete({ filter: { a: 1, b: 1 } }); - - const result2 = await pgvectorVectorStore.pool.query( - `SELECT id FROM "${tableName}"` - ); - - // Two rows should be left - expect(result2.rowCount).toEqual(2); - - const idsAfterDelete = result2.rows.map((row) => row.id); - - // The document with matching metadata should not be in the database - expect(idsAfterDelete).not.toContainEqual(initialIds[0]); - - // All other documents should still be in database - expect(idsAfterDelete).toContainEqual(initialIds[1]); - expect(idsAfterDelete).toContainEqual(initialIds[2]); - } catch (e) { - console.error("Error: ", e); - throw e; - } + const documents = [ + { pageContent: "Lorem Ipsum", metadata: { a: 1, b: 1 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 2, b: 1 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 1, c: 1 } }, + ]; + await pgvectorVectorStore.addDocuments(documents); + const result = await pgvectorVectorStore.pool.query( + `SELECT id FROM "${tableName}"` + ); + + const initialIds = result.rows.map((row) => row.id); + + // Filter Matches 1st document + await pgvectorVectorStore.delete({ filter: { a: 1, b: 1 } }); + + const result2 = await pgvectorVectorStore.pool.query( + `SELECT id FROM "${tableName}"` + ); + + // Two rows should be left + expect(result2.rowCount).toEqual(2); + + const idsAfterDelete = result2.rows.map((row) => row.id); + + // The document with matching metadata should not be in the database + expect(idsAfterDelete).not.toContainEqual(initialIds[0]); + + // All other documents should still be in database + expect(idsAfterDelete).toContainEqual(initialIds[1]); + expect(idsAfterDelete).toContainEqual(initialIds[2]); }); test.skip("PGvector supports different vector types", async () => { @@ -372,26 +357,21 @@ describe("PGVectorStore with collection", () => { }); test("PGvector can save documents with a list greater than default chunk size", async () => { - try { - // Extract the default chunk size and add one. - const docsToGenerate = pgvectorVectorStore.chunkSize + 1; - const documents = []; - for (let i = 1; i <= docsToGenerate; i += 1) { - documents.push({ pageContent: "Lorem Ipsum", metadata: { a: i } }); - } - await pgvectorVectorStore.addDocuments(documents); - - // Query the table to check the number of rows - const result = await pgvectorVectorStore.pool.query( - `SELECT COUNT(*) FROM "${tableName}"` - ); - const rowCount = parseInt(result.rows[0].count, 10); - // Check if the number of rows is equal to the number of documents added - expect(rowCount).toEqual(docsToGenerate); - } catch (e) { - console.error("Error: ", e); - throw e; + // Extract the default chunk size and add one. + const docsToGenerate = pgvectorVectorStore.chunkSize + 1; + const documents = []; + for (let i = 1; i <= docsToGenerate; i += 1) { + documents.push({ pageContent: "Lorem Ipsum", metadata: { a: i } }); } + await pgvectorVectorStore.addDocuments(documents); + + // Query the table to check the number of rows + const result = await pgvectorVectorStore.pool.query( + `SELECT COUNT(*) FROM "${tableName}"` + ); + const rowCount = parseInt(result.rows[0].count, 10); + // Check if the number of rows is equal to the number of documents added + expect(rowCount).toEqual(docsToGenerate); }); test("PGvector can save documents with ids", async () => { @@ -448,82 +428,72 @@ describe("PGVectorStore with collection", () => { }); test("PGvector can delete document by id", async () => { - try { - const documents = [ - { pageContent: "Lorem Ipsum", metadata: { a: 1 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 2 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 3 } }, - ]; - await pgvectorVectorStore.addDocuments(documents); - - const result = await pgvectorVectorStore.pool.query( - `SELECT id FROM "${tableName}"` - ); - - const initialIds = result.rows.map((row) => row.id); - const firstIdToDelete = initialIds[0]; - const secondIdToDelete = initialIds[1]; - const idToKeep = initialIds[2]; - - await pgvectorVectorStore.delete({ - ids: [firstIdToDelete, secondIdToDelete], - }); - - const result2 = await pgvectorVectorStore.pool.query( - `SELECT id FROM "${tableName}"` - ); - - // Only one row should be left - expect(result2.rowCount).toEqual(1); - - // The deleted ids should not be in the result - const idsAfterDelete = result2.rows.map((row) => row.id); - expect(idsAfterDelete).not.toContain(firstIdToDelete); - expect(idsAfterDelete).not.toContain(secondIdToDelete); - - expect(idsAfterDelete).toContain(idToKeep); - } catch (e) { - console.error("Error: ", e); - throw e; - } + const documents = [ + { pageContent: "Lorem Ipsum", metadata: { a: 1 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 2 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 3 } }, + ]; + await pgvectorVectorStore.addDocuments(documents); + + const result = await pgvectorVectorStore.pool.query( + `SELECT id FROM "${tableName}"` + ); + + const initialIds = result.rows.map((row) => row.id); + const firstIdToDelete = initialIds[0]; + const secondIdToDelete = initialIds[1]; + const idToKeep = initialIds[2]; + + await pgvectorVectorStore.delete({ + ids: [firstIdToDelete, secondIdToDelete], + }); + + const result2 = await pgvectorVectorStore.pool.query( + `SELECT id FROM "${tableName}"` + ); + + // Only one row should be left + expect(result2.rowCount).toEqual(1); + + // The deleted ids should not be in the result + const idsAfterDelete = result2.rows.map((row) => row.id); + expect(idsAfterDelete).not.toContain(firstIdToDelete); + expect(idsAfterDelete).not.toContain(secondIdToDelete); + + expect(idsAfterDelete).toContain(idToKeep); }); test("PGvector can delete document by metadata", async () => { - try { - const documents = [ - { pageContent: "Lorem Ipsum", metadata: { a: 1, b: 1 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 2, b: 1 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 1, c: 1 } }, - ]; - await pgvectorVectorStore.addDocuments(documents); - const result = await pgvectorVectorStore.pool.query( - `SELECT id FROM "${tableName}"` - ); - - const initialIds = result.rows.map((row) => row.id); - - // Filter Matches 1st document - await pgvectorVectorStore.delete({ filter: { a: 1, b: 1 } }); - - const result2 = await pgvectorVectorStore.pool.query( - `SELECT id FROM "${tableName}"` - ); - - // Two rows should be left - expect(result2.rowCount).toEqual(2); - - const idsAfterDelete = result2.rows.map((row) => row.id); - - // The document with matching metadata should not be in the database - expect(idsAfterDelete).not.toContainEqual(initialIds[0]); - - // All other documents should still be in database - expect(idsAfterDelete).toContainEqual(initialIds[1]); - expect(idsAfterDelete).toContainEqual(initialIds[2]); - } catch (e) { - console.error("Error: ", e); - throw e; - } + const documents = [ + { pageContent: "Lorem Ipsum", metadata: { a: 1, b: 1 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 2, b: 1 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 1, c: 1 } }, + ]; + await pgvectorVectorStore.addDocuments(documents); + const result = await pgvectorVectorStore.pool.query( + `SELECT id FROM "${tableName}"` + ); + + const initialIds = result.rows.map((row) => row.id); + + // Filter Matches 1st document + await pgvectorVectorStore.delete({ filter: { a: 1, b: 1 } }); + + const result2 = await pgvectorVectorStore.pool.query( + `SELECT id FROM "${tableName}"` + ); + + // Two rows should be left + expect(result2.rowCount).toEqual(2); + + const idsAfterDelete = result2.rows.map((row) => row.id); + + // The document with matching metadata should not be in the database + expect(idsAfterDelete).not.toContainEqual(initialIds[0]); + + // All other documents should still be in database + expect(idsAfterDelete).toContainEqual(initialIds[1]); + expect(idsAfterDelete).toContainEqual(initialIds[2]); }); }); @@ -615,26 +585,21 @@ describe("PGVectorStore with schema", () => { }); test("PGvector can save documents with a list greater than default chunk size", async () => { - try { - // Extract the default chunk size and add one. - const docsToGenerate = pgvectorVectorStore.chunkSize + 1; - const documents = []; - for (let i = 1; i <= docsToGenerate; i += 1) { - documents.push({ pageContent: "Lorem Ipsum", metadata: { a: i } }); - } - await pgvectorVectorStore.addDocuments(documents); - - // Query the table to check the number of rows - const result = await pgvectorVectorStore.pool.query( - `SELECT COUNT(*) FROM ${computedTableName}` - ); - const rowCount = parseInt(result.rows[0].count, 10); - // Check if the number of rows is equal to the number of documents added - expect(rowCount).toEqual(docsToGenerate); - } catch (e) { - console.error("Error: ", e); - throw e; + // Extract the default chunk size and add one. + const docsToGenerate = pgvectorVectorStore.chunkSize + 1; + const documents = []; + for (let i = 1; i <= docsToGenerate; i += 1) { + documents.push({ pageContent: "Lorem Ipsum", metadata: { a: i } }); } + await pgvectorVectorStore.addDocuments(documents); + + // Query the table to check the number of rows + const result = await pgvectorVectorStore.pool.query( + `SELECT COUNT(*) FROM ${computedTableName}` + ); + const rowCount = parseInt(result.rows[0].count, 10); + // Check if the number of rows is equal to the number of documents added + expect(rowCount).toEqual(docsToGenerate); }); test("PGvector can save documents with ids", async () => { @@ -691,82 +656,72 @@ describe("PGVectorStore with schema", () => { }); test("PGvector can delete document by id", async () => { - try { - const documents = [ - { pageContent: "Lorem Ipsum", metadata: { a: 1 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 2 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 3 } }, - ]; - await pgvectorVectorStore.addDocuments(documents); - - const result = await pgvectorVectorStore.pool.query( - `SELECT id FROM ${computedTableName}` - ); - - const initialIds = result.rows.map((row) => row.id); - const firstIdToDelete = initialIds[0]; - const secondIdToDelete = initialIds[1]; - const idToKeep = initialIds[2]; - - await pgvectorVectorStore.delete({ - ids: [firstIdToDelete, secondIdToDelete], - }); - - const result2 = await pgvectorVectorStore.pool.query( - `SELECT id FROM ${computedTableName}` - ); - - // Only one row should be left - expect(result2.rowCount).toEqual(1); - - // The deleted ids should not be in the result - const idsAfterDelete = result2.rows.map((row) => row.id); - expect(idsAfterDelete).not.toContain(firstIdToDelete); - expect(idsAfterDelete).not.toContain(secondIdToDelete); - - expect(idsAfterDelete).toContain(idToKeep); - } catch (e) { - console.error("Error: ", e); - throw e; - } + const documents = [ + { pageContent: "Lorem Ipsum", metadata: { a: 1 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 2 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 3 } }, + ]; + await pgvectorVectorStore.addDocuments(documents); + + const result = await pgvectorVectorStore.pool.query( + `SELECT id FROM ${computedTableName}` + ); + + const initialIds = result.rows.map((row) => row.id); + const firstIdToDelete = initialIds[0]; + const secondIdToDelete = initialIds[1]; + const idToKeep = initialIds[2]; + + await pgvectorVectorStore.delete({ + ids: [firstIdToDelete, secondIdToDelete], + }); + + const result2 = await pgvectorVectorStore.pool.query( + `SELECT id FROM ${computedTableName}` + ); + + // Only one row should be left + expect(result2.rowCount).toEqual(1); + + // The deleted ids should not be in the result + const idsAfterDelete = result2.rows.map((row) => row.id); + expect(idsAfterDelete).not.toContain(firstIdToDelete); + expect(idsAfterDelete).not.toContain(secondIdToDelete); + + expect(idsAfterDelete).toContain(idToKeep); }); test("PGvector can delete document by metadata", async () => { - try { - const documents = [ - { pageContent: "Lorem Ipsum", metadata: { a: 1, b: 1 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 2, b: 1 } }, - { pageContent: "Lorem Ipsum", metadata: { a: 1, c: 1 } }, - ]; - await pgvectorVectorStore.addDocuments(documents); - const result = await pgvectorVectorStore.pool.query( - `SELECT id FROM ${computedTableName}` - ); - - const initialIds = result.rows.map((row) => row.id); - - // Filter Matches 1st document - await pgvectorVectorStore.delete({ filter: { a: 1, b: 1 } }); - - const result2 = await pgvectorVectorStore.pool.query( - `SELECT id FROM ${computedTableName}` - ); - - // Two rows should be left - expect(result2.rowCount).toEqual(2); - - const idsAfterDelete = result2.rows.map((row) => row.id); - - // The document with matching metadata should not be in the database - expect(idsAfterDelete).not.toContainEqual(initialIds[0]); - - // All other documents should still be in database - expect(idsAfterDelete).toContainEqual(initialIds[1]); - expect(idsAfterDelete).toContainEqual(initialIds[2]); - } catch (e) { - console.error("Error: ", e); - throw e; - } + const documents = [ + { pageContent: "Lorem Ipsum", metadata: { a: 1, b: 1 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 2, b: 1 } }, + { pageContent: "Lorem Ipsum", metadata: { a: 1, c: 1 } }, + ]; + await pgvectorVectorStore.addDocuments(documents); + const result = await pgvectorVectorStore.pool.query( + `SELECT id FROM ${computedTableName}` + ); + + const initialIds = result.rows.map((row) => row.id); + + // Filter Matches 1st document + await pgvectorVectorStore.delete({ filter: { a: 1, b: 1 } }); + + const result2 = await pgvectorVectorStore.pool.query( + `SELECT id FROM ${computedTableName}` + ); + + // Two rows should be left + expect(result2.rowCount).toEqual(2); + + const idsAfterDelete = result2.rows.map((row) => row.id); + + // The document with matching metadata should not be in the database + expect(idsAfterDelete).not.toContainEqual(initialIds[0]); + + // All other documents should still be in database + expect(idsAfterDelete).toContainEqual(initialIds[1]); + expect(idsAfterDelete).toContainEqual(initialIds[2]); }); }); diff --git a/libs/langchain-community/src/vectorstores/tests/turbopuffer.int.test.ts b/libs/langchain-community/src/vectorstores/tests/turbopuffer.int.test.ts index 345aa5116d19..3a1958408b0e 100644 --- a/libs/langchain-community/src/vectorstores/tests/turbopuffer.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/turbopuffer.int.test.ts @@ -34,7 +34,7 @@ test("similaritySearchVectorWithScore", async () => { { pageContent: "bye", metadata: { a: createdAt } }, { pageContent: "what's this", metadata: { a: createdAt } }, ]); - console.log("added docs"); + // console.log("added docs"); const results = await store.similaritySearch(createdAt.toString(), 1); expect(results).toHaveLength(1); diff --git a/libs/langchain-exa/src/tests/retrievers.int.test.ts b/libs/langchain-exa/src/tests/retrievers.int.test.ts index 263faf8177f9..724beb32af9b 100644 --- a/libs/langchain-exa/src/tests/retrievers.int.test.ts +++ b/libs/langchain-exa/src/tests/retrievers.int.test.ts @@ -12,7 +12,7 @@ test("ExaRetriever can retrieve some data", async () => { "What does the AI company LangChain do?" ); - console.log("results:", JSON.stringify(results, null, 2)); + // console.log("results:", JSON.stringify(results, null, 2)); expect(results.length).toBeGreaterThan(0); // verify metadata fields are populated expect(results[0].metadata.url.length).toBeGreaterThan(1); diff --git a/libs/langchain-exa/src/tests/tools.int.test.ts b/libs/langchain-exa/src/tests/tools.int.test.ts index 529691560614..fd723cc85c55 100644 --- a/libs/langchain-exa/src/tests/tools.int.test.ts +++ b/libs/langchain-exa/src/tests/tools.int.test.ts @@ -14,7 +14,7 @@ test("ExaSearchResults can perform a search given a string query", async () => { const parsedData = JSON.parse(toolData); expect("results" in parsedData).toBeTruthy(); - console.log("results:", parsedData.results); + // console.log("results:", parsedData.results); expect(parsedData.results.length).toBeGreaterThan(0); }); @@ -28,6 +28,6 @@ test("ExaFindSimilarResults can perform a simalitaty search with a provided URL" const parsedData = JSON.parse(toolData); expect("results" in parsedData).toBeTruthy(); - console.log("results:", parsedData.results); + // console.log("results:", parsedData.results); expect(parsedData.results.length).toBeGreaterThan(0); }); diff --git a/libs/langchain-google-common/src/tests/chat_models.test.ts b/libs/langchain-google-common/src/tests/chat_models.test.ts index 6f0b07806304..71f3aa876ffe 100644 --- a/libs/langchain-google-common/src/tests/chat_models.test.ts +++ b/libs/langchain-google-common/src/tests/chat_models.test.ts @@ -140,9 +140,11 @@ describe("Mock ChatGoogle", () => { new AIMessage("H"), new HumanMessage("Flip it again"), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await model.invoke(messages); - console.log("record", JSON.stringify(record, null, 1)); - console.log("result", JSON.stringify(result, null, 1)); + // console.log("record", JSON.stringify(record, null, 1)); + // console.log("result", JSON.stringify(result, null, 1)); expect(record.opts).toBeDefined(); expect(record.opts.data).toBeDefined(); @@ -176,9 +178,11 @@ describe("Mock ChatGoogle", () => { new AIMessage("H"), new HumanMessage("Flip it again"), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await model.invoke(messages); - console.log("record", JSON.stringify(record, null, 1)); - console.log("result", JSON.stringify(result, null, 1)); + // console.log("record", JSON.stringify(record, null, 1)); + // console.log("result", JSON.stringify(result, null, 1)); expect(record.opts).toBeDefined(); expect(record.opts.data).toBeDefined(); @@ -269,9 +273,11 @@ describe("Mock ChatGoogle", () => { new AIMessage("H"), new HumanMessage("Flip it again"), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await model.invoke(messages); - console.log("record", JSON.stringify(record, null, 1)); - console.log("result", JSON.stringify(result, null, 1)); + // console.log("record", JSON.stringify(record, null, 1)); + // console.log("result", JSON.stringify(result, null, 1)); expect(record.opts).toBeDefined(); expect(record.opts.data).toBeDefined(); @@ -312,9 +318,11 @@ describe("Mock ChatGoogle", () => { new AIMessage("H"), new HumanMessage("Flip it again"), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await model.invoke(messages); - console.log("record", JSON.stringify(record, null, 1)); - console.log("result", JSON.stringify(result, null, 1)); + // console.log("record", JSON.stringify(record, null, 1)); + // console.log("result", JSON.stringify(result, null, 1)); expect(record.opts).toBeDefined(); expect(record.opts.data).toBeDefined(); @@ -355,9 +363,11 @@ describe("Mock ChatGoogle", () => { new AIMessage("H"), new HumanMessage("Flip it again"), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await model.invoke(messages); - console.log("record", JSON.stringify(record, null, 1)); - console.log("result", JSON.stringify(result, null, 1)); + // console.log("record", JSON.stringify(record, null, 1)); + // console.log("result", JSON.stringify(result, null, 1)); expect(record.opts).toBeDefined(); expect(record.opts.data).toBeDefined(); @@ -396,9 +406,11 @@ describe("Mock ChatGoogle", () => { new AIMessage("H"), new HumanMessage("Flip it again"), ]; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await model.invoke(messages); - console.log("record", JSON.stringify(record, null, 1)); - console.log("result", JSON.stringify(result, null, 1)); + // console.log("record", JSON.stringify(record, null, 1)); + // console.log("result", JSON.stringify(result, null, 1)); expect(record.opts).toBeDefined(); expect(record.opts.data).toBeDefined(); @@ -441,8 +453,10 @@ describe("Mock ChatGoogle", () => { let caught = false; try { + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await model.invoke(messages); - console.log(result); + // console.log(result); } catch (xx) { caught = true; } @@ -471,8 +485,10 @@ describe("Mock ChatGoogle", () => { let caught = false; try { + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const result = await model.invoke(messages); - console.log(result); + // console.log(result); } catch (xx) { caught = true; } @@ -607,7 +623,7 @@ describe("Mock ChatGoogle", () => { const result = await model.invoke("What?"); - console.log(JSON.stringify(record, null, 1)); + // console.log(JSON.stringify(record, null, 1)); expect(result).toBeDefined(); @@ -681,7 +697,7 @@ describe("Mock ChatGoogle", () => { await model.invoke("What?"); - console.log(JSON.stringify(record, null, 1)); + // console.log(JSON.stringify(record, null, 1)); const toolsResult = record?.opts?.data?.tools; expect(toolsResult).toBeDefined(); @@ -760,7 +776,7 @@ describe("Mock ChatGoogle", () => { const result = await model.invoke("What?"); - console.log(JSON.stringify(result, null, 1)); + // console.log(JSON.stringify(result, null, 1)); expect(result).toHaveProperty("content"); expect(result.content).toBe(""); const args = result?.lc_kwargs?.additional_kwargs; @@ -840,7 +856,7 @@ describe("Mock ChatGoogle", () => { const result = await model.invoke(messages); expect(result).toBeDefined(); - console.log(JSON.stringify(record?.opts?.data, null, 1)); + // console.log(JSON.stringify(record?.opts?.data, null, 1)); }); }); diff --git a/libs/langchain-google-common/src/tests/llms.test.ts b/libs/langchain-google-common/src/tests/llms.test.ts index 829456f50b11..ff68b776e844 100644 --- a/libs/langchain-google-common/src/tests/llms.test.ts +++ b/libs/langchain-google-common/src/tests/llms.test.ts @@ -191,7 +191,7 @@ describe("Mock Google LLM", () => { "1. Sock it to Me!\n2. Heel Yeah Socks\n3. Sole Mates\n4. Happy Soles\n5. Toe-tally Awesome Socks\n6. Sock Appeal\n7. Footsie Wootsies\n8. Thread Heads\n9. Sock Squad\n10. Sock-a-licious\n11. Darn Good Socks\n12. Sockcessories\n13. Sole Searching\n14. Sockstar\n15. Socktopia\n16. Sockology\n17. Elevated Toes\n18. The Urban Sole\n19. The Hippie Sole\n20. Sole Fuel" ); // expect(record.opts.url).toEqual(`https://us-central1-aiplatform.googleapis.com/v1/projects/${projectId}/locations/us-central1/publishers/google/models/gemini-pro:generateContent`) - console.log("record", JSON.stringify(record, null, 2)); + // console.log("record", JSON.stringify(record, null, 2)); }); test("1: invoke", async () => { @@ -211,14 +211,14 @@ describe("Mock Google LLM", () => { "1. Sock it to Me!\n2. Heel Yeah Socks\n3. Sole Mates\n4. Happy Soles\n5. Toe-tally Awesome Socks\n6. Sock Appeal\n7. Footsie Wootsies\n8. Thread Heads\n9. Sock Squad\n10. Sock-a-licious\n11. Darn Good Socks\n12. Sockcessories\n13. Sole Searching\n14. Sockstar\n15. Socktopia\n16. Sockology\n17. Elevated Toes\n18. The Urban Sole\n19. The Hippie Sole\n20. Sole Fuel" ); // expect(record.opts.url).toEqual(`https://us-central1-aiplatform.googleapis.com/v1/projects/${projectId}/locations/us-central1/publishers/google/models/gemini-pro:generateContent`) - console.log("record", JSON.stringify(record, null, 2)); + // console.log("record", JSON.stringify(record, null, 2)); expect(record.opts).toHaveProperty("data"); expect(record.opts.data).toHaveProperty("contents"); expect(record.opts.data.contents).toHaveLength(1); expect(record.opts.data.contents[0]).toHaveProperty("parts"); const parts = record?.opts?.data?.contents[0]?.parts; - console.log(parts); + // console.log(parts); expect(parts).toHaveLength(1); expect(parts[0]).toHaveProperty("text"); expect(parts[0].text).toEqual("Hello world"); @@ -243,7 +243,7 @@ describe("Mock Google LLM", () => { expect(record.opts.url).toEqual( `https://us-central1-aiplatform.googleapis.com/v1/projects/${projectId}/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent` ); - console.log("record", JSON.stringify(record, null, 2)); + // console.log("record", JSON.stringify(record, null, 2)); }); test("3: streamGenerateContent - streaming", async () => { @@ -266,7 +266,7 @@ describe("Mock Google LLM", () => { } expect(responseArray).toHaveLength(6); - console.log("record", JSON.stringify(record, null, 2)); + // console.log("record", JSON.stringify(record, null, 2)); }); test("4: streamGenerateContent - non-streaming - safety exception", async () => { @@ -342,7 +342,7 @@ describe("Mock Google LLM", () => { } expect(responseArray).toHaveLength(4); - console.log("record", JSON.stringify(record, null, 2)); + // console.log("record", JSON.stringify(record, null, 2)); expect(caught).toEqual(true); }); @@ -372,7 +372,7 @@ describe("Mock Google LLM", () => { expect(responseArray).toHaveLength(6); expect(responseArray[4]).toEqual("I'm sorry Dave, but I can't do that."); - console.log("record", JSON.stringify(record, null, 2)); + // console.log("record", JSON.stringify(record, null, 2)); }); test("6: predictMessages image blue-square", async () => { @@ -406,14 +406,14 @@ describe("Mock Google LLM", () => { ]; const res = await model.predictMessages(messages); - console.log("record", record); + // console.log("record", record); expect(record.opts).toHaveProperty("data"); expect(record.opts.data).toHaveProperty("contents"); expect(record.opts.data.contents).toHaveLength(1); expect(record.opts.data.contents[0]).toHaveProperty("parts"); const parts = record?.opts?.data?.contents[0]?.parts; - console.log(parts); + // console.log(parts); expect(parts).toHaveLength(2); expect(parts[0]).toHaveProperty("text"); expect(parts[1]).toHaveProperty("inlineData"); @@ -459,14 +459,14 @@ describe("Mock Google LLM", () => { const input = new ChatPromptValue(messages); const res = await model.invoke(input); - console.log("record", record); + // console.log("record", record); expect(record.opts).toHaveProperty("data"); expect(record.opts.data).toHaveProperty("contents"); expect(record.opts.data.contents).toHaveLength(1); expect(record.opts.data.contents[0]).toHaveProperty("parts"); const parts = record?.opts?.data?.contents[0]?.parts; - console.log(parts); + // console.log(parts); expect(parts).toHaveLength(2); expect(parts[0]).toHaveProperty("text"); expect(parts[1]).toHaveProperty("inlineData"); @@ -519,7 +519,7 @@ describe("Mock Google LLM", () => { } expect(responseArray).toHaveLength(3); - console.log("record", JSON.stringify(record, null, 2)); + // console.log("record", JSON.stringify(record, null, 2)); }); test("8: streamGenerateContent - streaming - json responseMimeType", async () => { @@ -545,7 +545,7 @@ describe("Mock Google LLM", () => { expect(responseArray).toHaveLength(10); expect(typeof JSON.parse(responseArray.join(""))).toEqual("object"); - console.log("record", JSON.stringify(record, null, 2)); + // console.log("record", JSON.stringify(record, null, 2)); }); test("9: streamGenerateContent - non-streaming - check json responseMimeType", async () => { @@ -568,6 +568,6 @@ describe("Mock Google LLM", () => { "application/json" ); - console.log("record", JSON.stringify(record, null, 2)); + // console.log("record", JSON.stringify(record, null, 2)); }); }); diff --git a/libs/langchain-google-genai/src/tests/chat_models.test.ts b/libs/langchain-google-genai/src/tests/chat_models.test.ts index ca41b8b5e100..97015725fc7e 100644 --- a/libs/langchain-google-genai/src/tests/chat_models.test.ts +++ b/libs/langchain-google-genai/src/tests/chat_models.test.ts @@ -184,7 +184,7 @@ test("convertMessageContentToParts correctly handles message types", () => { const messagesAsGoogleParts = messages .map((msg) => convertMessageContentToParts(msg, false)) .flat(); - console.log(messagesAsGoogleParts); + // console.log(messagesAsGoogleParts); expect(messagesAsGoogleParts).toEqual([ { text: "You are a helpful assistant" }, { text: "What's the weather like in new york?" }, @@ -224,7 +224,7 @@ test("convertBaseMessagesToContent correctly creates properly formatted content" ]; const messagesAsGoogleContent = convertBaseMessagesToContent(messages, false); - console.log(messagesAsGoogleContent); + // console.log(messagesAsGoogleContent); // Google Generative AI API only allows for 'model' and 'user' roles // This means that 'system', 'human' and 'tool' messages are converted // to 'user' messages, and ai messages are converted to 'model' messages. diff --git a/libs/langchain-google-genai/src/tests/embeddings.int.test.ts b/libs/langchain-google-genai/src/tests/embeddings.int.test.ts index a91bc147f395..0689f285709f 100644 --- a/libs/langchain-google-genai/src/tests/embeddings.int.test.ts +++ b/libs/langchain-google-genai/src/tests/embeddings.int.test.ts @@ -6,7 +6,7 @@ test("Test GooglePalmEmbeddings.embedQuery", async () => { maxRetries: 1, }); const res = await embeddings.embedQuery("Hello world"); - console.log(res); + // console.log(res); expect(typeof res[0]).toBe("number"); }); @@ -22,7 +22,7 @@ test("Test GooglePalmEmbeddings.embedDocuments", async () => { "six documents", "to test pagination", ]); - console.log(res); + // console.log(res); expect(res).toHaveLength(6); res.forEach((r) => { expect(typeof r[0]).toBe("number"); diff --git a/libs/langchain-google-vertexai-web/src/tests/chat_models.int.test.ts b/libs/langchain-google-vertexai-web/src/tests/chat_models.int.test.ts index 38db387d36dc..96680092203c 100644 --- a/libs/langchain-google-vertexai-web/src/tests/chat_models.int.test.ts +++ b/libs/langchain-google-vertexai-web/src/tests/chat_models.int.test.ts @@ -29,7 +29,7 @@ class WeatherTool extends StructuredTool { name = "get_weather"; async _call(input: z.infer) { - console.log(`WeatherTool called with input: ${input}`); + // console.log(`WeatherTool called with input: ${input}`); return `The weather in ${JSON.stringify(input.locations)} is 25°C`; } } @@ -37,101 +37,71 @@ class WeatherTool extends StructuredTool { describe("Google APIKey Chat", () => { test("invoke", async () => { const model = new ChatVertexAI(); - try { - const res = await model.invoke("What is 1 + 1?"); - console.log(res); - expect(res).toBeDefined(); - expect(res._getType()).toEqual("ai"); - - const aiMessage = res as AIMessageChunk; - console.log(aiMessage); - expect(aiMessage.content).toBeDefined(); - expect(aiMessage.content.length).toBeGreaterThan(0); - expect(aiMessage.content[0]).toBeDefined(); - - // const content = aiMessage.content[0] as MessageContentComplex; - // expect(content).toHaveProperty("type"); - // expect(content.type).toEqual("text"); - - // const textContent = content as MessageContentText; - // expect(textContent.text).toBeDefined(); - // expect(textContent.text).toEqual("2"); - } catch (e) { - console.error(e); - throw e; - } + const res = await model.invoke("What is 1 + 1?"); + // console.log(res); + expect(res).toBeDefined(); + expect(res._getType()).toEqual("ai"); + + const aiMessage = res as AIMessageChunk; + // console.log(aiMessage); + expect(aiMessage.content).toBeDefined(); + expect(aiMessage.content.length).toBeGreaterThan(0); + expect(aiMessage.content[0]).toBeDefined(); }); test("generate", async () => { const model = new ChatVertexAI(); - try { - const messages: BaseMessage[] = [ - new SystemMessage( - "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." - ), - new HumanMessage("Flip it"), - new AIMessage("T"), - new HumanMessage("Flip the coin again"), - ]; - const res = await model.predictMessages(messages); - expect(res).toBeDefined(); - expect(res._getType()).toEqual("ai"); - - const aiMessage = res as AIMessageChunk; - expect(aiMessage.content).toBeDefined(); - expect(aiMessage.content.length).toBeGreaterThan(0); - expect(aiMessage.content[0]).toBeDefined(); - console.log(aiMessage); - - // const content = aiMessage.content[0] as MessageContentComplex; - // expect(content).toHaveProperty("type"); - // expect(content.type).toEqual("text"); - - // const textContent = content as MessageContentText; - // expect(textContent.text).toBeDefined(); - // expect(["H", "T"]).toContainEqual(textContent.text); - } catch (e) { - console.error(e); - throw e; - } + const messages: BaseMessage[] = [ + new SystemMessage( + "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." + ), + new HumanMessage("Flip it"), + new AIMessage("T"), + new HumanMessage("Flip the coin again"), + ]; + const res = await model.predictMessages(messages); + expect(res).toBeDefined(); + expect(res._getType()).toEqual("ai"); + + const aiMessage = res as AIMessageChunk; + expect(aiMessage.content).toBeDefined(); + expect(aiMessage.content.length).toBeGreaterThan(0); + expect(aiMessage.content[0]).toBeDefined(); }); test("stream", async () => { const model = new ChatVertexAI(); - try { - const input: BaseLanguageModelInput = new ChatPromptValue([ - new SystemMessage( - "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." - ), - new HumanMessage("Flip it"), - new AIMessage("T"), - new HumanMessage("Flip the coin again"), - ]); - const res = await model.stream(input); - const resArray: BaseMessageChunk[] = []; - for await (const chunk of res) { - resArray.push(chunk); - } - expect(resArray).toBeDefined(); - expect(resArray.length).toBeGreaterThanOrEqual(1); - - const lastChunk = resArray[resArray.length - 1]; - expect(lastChunk).toBeDefined(); - expect(lastChunk._getType()).toEqual("ai"); - const aiChunk = lastChunk as AIMessageChunk; - console.log(aiChunk); - - console.log(JSON.stringify(resArray, null, 2)); - } catch (e) { - console.error(e); - throw e; + const input: BaseLanguageModelInput = new ChatPromptValue([ + new SystemMessage( + "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." + ), + new HumanMessage("Flip it"), + new AIMessage("T"), + new HumanMessage("Flip the coin again"), + ]); + const res = await model.stream(input); + const resArray: BaseMessageChunk[] = []; + for await (const chunk of res) { + resArray.push(chunk); } + expect(resArray).toBeDefined(); + expect(resArray.length).toBeGreaterThanOrEqual(1); + + const lastChunk = resArray[resArray.length - 1]; + expect(lastChunk).toBeDefined(); + expect(lastChunk._getType()).toEqual("ai"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var + const aiChunk = lastChunk as AIMessageChunk; + // console.log(aiChunk); + + // console.log(JSON.stringify(resArray, null, 2)); }); test("Tool call", async () => { const chat = new ChatVertexAI().bindTools([new WeatherTool()]); const res = await chat.invoke("What is the weather in SF and LA"); - console.log(res); + // console.log(res); expect(res.tool_calls?.length).toEqual(1); expect(res.tool_calls?.[0].args).toEqual( JSON.parse(res.additional_kwargs.tool_calls?.[0].function.arguments ?? "") @@ -140,8 +110,10 @@ describe("Google APIKey Chat", () => { test("Few shotting with tool calls", async () => { const chat = new ChatVertexAI().bindTools([new WeatherTool()]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke("What is the weather in SF"); - console.log(res); + // console.log(res); const res2 = await chat.invoke([ new HumanMessage("What is the weather in SF?"), new AIMessage({ @@ -163,7 +135,7 @@ describe("Google APIKey Chat", () => { new AIMessage("It is currently 24 degrees in SF with hail in SF."), new HumanMessage("What did you say the weather was?"), ]); - console.log(res2); + // console.log(res2); expect(res2.content).toContain("24"); }); @@ -192,93 +164,62 @@ describe("Google APIKey Chat", () => { describe("Google Webauth Chat", () => { test("invoke", async () => { const model = new ChatVertexAI(); - try { - const res = await model.invoke("What is 1 + 1?"); - expect(res).toBeDefined(); - expect(res._getType()).toEqual("ai"); - - const aiMessage = res as AIMessageChunk; - expect(aiMessage.content).toBeDefined(); - expect(aiMessage.content.length).toBeGreaterThan(0); - expect(aiMessage.content[0]).toBeDefined(); - console.log(aiMessage); - - // const content = aiMessage.content[0] as MessageContentComplex; - // expect(content).toHaveProperty("type"); - // expect(content.type).toEqual("text"); - - // const textContent = content as MessageContentText; - // expect(textContent.text).toBeDefined(); - // expect(textContent.text).toEqual("2"); - } catch (e) { - console.error(e); - throw e; - } + const res = await model.invoke("What is 1 + 1?"); + expect(res).toBeDefined(); + expect(res._getType()).toEqual("ai"); + + const aiMessage = res as AIMessageChunk; + expect(aiMessage.content).toBeDefined(); + expect(aiMessage.content.length).toBeGreaterThan(0); + expect(aiMessage.content[0]).toBeDefined(); + // console.log(aiMessage); }); test("generate", async () => { const model = new ChatVertexAI(); - try { - const messages: BaseMessage[] = [ - new SystemMessage( - "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." - ), - new HumanMessage("Flip it"), - new AIMessage("T"), - new HumanMessage("Flip the coin again"), - ]; - const res = await model.predictMessages(messages); - expect(res).toBeDefined(); - expect(res._getType()).toEqual("ai"); - - const aiMessage = res as AIMessageChunk; - expect(aiMessage.content).toBeDefined(); - expect(aiMessage.content.length).toBeGreaterThan(0); - expect(aiMessage.content[0]).toBeDefined(); - console.log(aiMessage); - - // const content = aiMessage.content[0] as MessageContentComplex; - // expect(content).toHaveProperty("type"); - // expect(content.type).toEqual("text"); - - // const textContent = content as MessageContentText; - // expect(textContent.text).toBeDefined(); - // expect(["H", "T"]).toContainEqual(textContent.text); - } catch (e) { - console.error(e); - throw e; - } + const messages: BaseMessage[] = [ + new SystemMessage( + "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." + ), + new HumanMessage("Flip it"), + new AIMessage("T"), + new HumanMessage("Flip the coin again"), + ]; + const res = await model.predictMessages(messages); + expect(res).toBeDefined(); + expect(res._getType()).toEqual("ai"); + + const aiMessage = res as AIMessageChunk; + expect(aiMessage.content).toBeDefined(); + expect(aiMessage.content.length).toBeGreaterThan(0); + expect(aiMessage.content[0]).toBeDefined(); + // console.log(aiMessage); }); test("stream", async () => { const model = new ChatVertexAI(); - try { - const input: BaseLanguageModelInput = new ChatPromptValue([ - new SystemMessage( - "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." - ), - new HumanMessage("Flip it"), - new AIMessage("T"), - new HumanMessage("Flip the coin again"), - ]); - const res = await model.stream(input); - const resArray: BaseMessageChunk[] = []; - for await (const chunk of res) { - resArray.push(chunk); - } - expect(resArray).toBeDefined(); - expect(resArray.length).toBeGreaterThanOrEqual(1); - - const lastChunk = resArray[resArray.length - 1]; - expect(lastChunk).toBeDefined(); - expect(lastChunk._getType()).toEqual("ai"); - const aiChunk = lastChunk as AIMessageChunk; - console.log(aiChunk); - - console.log(JSON.stringify(resArray, null, 2)); - } catch (e) { - console.error(e); - throw e; + const input: BaseLanguageModelInput = new ChatPromptValue([ + new SystemMessage( + "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." + ), + new HumanMessage("Flip it"), + new AIMessage("T"), + new HumanMessage("Flip the coin again"), + ]); + const res = await model.stream(input); + const resArray: BaseMessageChunk[] = []; + for await (const chunk of res) { + resArray.push(chunk); } + expect(resArray).toBeDefined(); + expect(resArray.length).toBeGreaterThanOrEqual(1); + + const lastChunk = resArray[resArray.length - 1]; + expect(lastChunk).toBeDefined(); + expect(lastChunk._getType()).toEqual("ai"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var + const aiChunk = lastChunk as AIMessageChunk; + // console.log(aiChunk); }); }); diff --git a/libs/langchain-google-vertexai-web/src/tests/llms.int.test.ts b/libs/langchain-google-vertexai-web/src/tests/llms.int.test.ts index cf92f36d2bae..0d02a3ae27d4 100644 --- a/libs/langchain-google-vertexai-web/src/tests/llms.int.test.ts +++ b/libs/langchain-google-vertexai-web/src/tests/llms.int.test.ts @@ -29,20 +29,15 @@ describe("Google APIKey LLM", () => { expect(res).toBe("2"); } else { expect(res.length).toBeGreaterThan(0); - console.log("call result:", res); + // console.log("call result:", res); } }); test("call", async () => { const model = new VertexAI(); - try { - const res = await model.invoke("If the time is 1:00, what time is it?"); - expect(res.length).toBeGreaterThan(0); - expect(res.substring(0, 4)).toEqual("1:00"); - } catch (xx) { - console.error(xx); - throw xx; - } + const res = await model.invoke("If the time is 1:00, what time is it?"); + expect(res.length).toBeGreaterThan(0); + expect(res.substring(0, 4)).toEqual("1:00"); }); test("stream", async () => { @@ -79,7 +74,7 @@ describe("Google APIKey LLM", () => { expect(res).toBeInstanceOf(AIMessage); expect(Array.isArray(res.content)).toEqual(true); expect(res.content[0]).toHaveProperty("text"); - console.log("res", res); + // console.log("res", res); }); test("invoke image", async () => { @@ -104,7 +99,7 @@ describe("Google APIKey LLM", () => { const res = await model.invoke(input); expect(res).toBeDefined(); expect(res.length).toBeGreaterThan(0); - console.log("res", res); + // console.log("res", res); }); }); @@ -128,7 +123,7 @@ describe("Google WebAuth gai LLM", () => { expect(res).toBe("2"); } else { expect(res.length).toBeGreaterThan(0); - console.log("call result:", res); + // console.log("call result:", res); } }); @@ -136,14 +131,9 @@ describe("Google WebAuth gai LLM", () => { const model = new VertexAI({ platformType: "gai", }); - try { - const res = await model.invoke("If the time is 1:00, what time is it?"); - expect(res.length).toBeGreaterThan(0); - expect(res.substring(0, 4)).toEqual("1:00"); - } catch (xx) { - console.error(xx); - throw xx; - } + const res = await model.invoke("If the time is 1:00, what time is it?"); + expect(res.length).toBeGreaterThan(0); + expect(res.substring(0, 4)).toEqual("1:00"); }); test("stream", async () => { @@ -183,7 +173,7 @@ describe("Google WebAuth gai LLM", () => { expect(res).toBeInstanceOf(AIMessage); expect(Array.isArray(res.content)).toEqual(true); expect(res.content[0]).toHaveProperty("text"); - console.log("res", res); + // console.log("res", res); }); test("invoke image", async () => { @@ -209,6 +199,6 @@ describe("Google WebAuth gai LLM", () => { const res = await model.invoke(input); expect(res).toBeDefined(); expect(res.length).toBeGreaterThan(0); - console.log("res", res); + // console.log("res", res); }); }); diff --git a/libs/langchain-google-vertexai/src/tests/llms.int.test.ts b/libs/langchain-google-vertexai/src/tests/llms.int.test.ts index 1393539424ab..a09dd7c880d5 100644 --- a/libs/langchain-google-vertexai/src/tests/llms.int.test.ts +++ b/libs/langchain-google-vertexai/src/tests/llms.int.test.ts @@ -21,17 +21,12 @@ describe("GAuth LLM", () => { test("call", async () => { const model = new VertexAI(); - try { - const res = await model.invoke("1 + 1 = "); - if (res.length === 1) { - expect(res).toBe("2"); - } else { - expect(res.length).toBeGreaterThan(0); - console.log("call result:", res); - } - } catch (xx) { - console.error(xx); - throw xx; + const res = await model.invoke("1 + 1 = "); + if (res.length === 1) { + expect(res).toBe("2"); + } else { + expect(res.length).toBeGreaterThan(0); + // console.log("call result:", res); } }); @@ -42,7 +37,7 @@ describe("GAuth LLM", () => { expect(res.generations.length).toBeGreaterThan(0); expect(res.generations[0].length).toBeGreaterThan(0); expect(res.generations[0][0]).toHaveProperty("text"); - console.log("generate result:", JSON.stringify(res, null, 2)); + // console.log("generate result:", JSON.stringify(res, null, 2)); }); test("stream", async () => { @@ -79,7 +74,7 @@ describe("GAuth LLM", () => { expect(res).toBeInstanceOf(AIMessage); expect(Array.isArray(res.content)).toEqual(true); expect(res.content[0]).toHaveProperty("text"); - console.log("res", res); + // console.log("res", res); }); test("invoke image", async () => { @@ -104,7 +99,7 @@ describe("GAuth LLM", () => { const res = await model.invoke(input); expect(res).toBeDefined(); expect(res.length).toBeGreaterThan(0); - console.log("res", res); + // console.log("res", res); }); }); @@ -123,17 +118,12 @@ describe("GAuth LLM gai", () => { const model = new VertexAI({ platformType: "gai", }); - try { - const res = await model.invoke("1 + 1 = "); - if (res.length === 1) { - expect(res).toBe("2"); - } else { - console.log("call result:", res); - expect(res.length).toBeGreaterThan(0); - } - } catch (xx) { - console.error(xx); - throw xx; + const res = await model.invoke("1 + 1 = "); + if (res.length === 1) { + expect(res).toBe("2"); + } else { + // console.log("call result:", res); + expect(res.length).toBeGreaterThan(0); } }); @@ -141,14 +131,9 @@ describe("GAuth LLM gai", () => { const model = new VertexAI({ platformType: "gai", }); - try { - const res = await model.invoke("If the time is 1:00, what time is it?"); - expect(res.length).toBeGreaterThan(0); - expect(res.substring(0, 4)).toEqual("1:00"); - } catch (xx) { - console.error(xx); - throw xx; - } + const res = await model.invoke("If the time is 1:00, what time is it?"); + expect(res.length).toBeGreaterThan(0); + expect(res.substring(0, 4)).toEqual("1:00"); }); test("generate", async () => { @@ -160,7 +145,7 @@ describe("GAuth LLM gai", () => { expect(res.generations.length).toBeGreaterThan(0); expect(res.generations[0].length).toBeGreaterThan(0); expect(res.generations[0][0]).toHaveProperty("text"); - console.log("generate result:", JSON.stringify(res, null, 2)); + // console.log("generate result:", JSON.stringify(res, null, 2)); }); test("stream", async () => { @@ -205,7 +190,7 @@ describe("GAuth LLM gai", () => { expect(res).toBeInstanceOf(AIMessage); expect(Array.isArray(res.content)).toEqual(true); expect(res.content[0]).toHaveProperty("text"); - console.log("res", res); + // console.log("res", res); }); test("invoke image", async () => { @@ -231,6 +216,6 @@ describe("GAuth LLM gai", () => { const res = await model.invoke(input); expect(res).toBeDefined(); expect(res.length).toBeGreaterThan(0); - console.log("res", res); + // console.log("res", res); }); }); diff --git a/libs/langchain-groq/src/tests/chat_models.int.test.ts b/libs/langchain-groq/src/tests/chat_models.int.test.ts index c2839786a39b..20b6d482356b 100644 --- a/libs/langchain-groq/src/tests/chat_models.int.test.ts +++ b/libs/langchain-groq/src/tests/chat_models.int.test.ts @@ -8,7 +8,7 @@ test("invoke", async () => { }); const message = new HumanMessage("What color is the sky?"); const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); expect(res.content.length).toBeGreaterThan(10); }); @@ -18,7 +18,7 @@ test("invoke with stop sequence", async () => { }); const message = new HumanMessage("Count to ten."); const res = await chat.bind({ stop: ["5", "five"] }).invoke([message]); - console.log({ res }); + // console.log({ res }); expect((res.content as string).toLowerCase()).not.toContain("6"); expect((res.content as string).toLowerCase()).not.toContain("six"); }); @@ -51,7 +51,7 @@ test("generate", async () => { const chat = new ChatGroq(); const message = new HumanMessage("Hello!"); const res = await chat.generate([[message]]); - console.log(JSON.stringify(res, null, 2)); + // console.log(JSON.stringify(res, null, 2)); expect(res.generations[0][0].text.length).toBeGreaterThan(10); }); @@ -65,7 +65,7 @@ test("streaming", async () => { iters += 1; finalRes += chunk.content; } - console.log({ finalRes, iters }); + // console.log({ finalRes, iters }); expect(iters).toBeGreaterThan(1); }); @@ -100,7 +100,7 @@ test("invoke with bound tools", async () => { tool_choice: "auto", }) .invoke([message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toEqual(1); expect( JSON.parse( @@ -139,8 +139,10 @@ test("stream with bound tools, yielding a single chunk", async () => { tool_choice: "auto", }) .stream([message]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(JSON.stringify(chunk)); + // console.log(JSON.stringify(chunk)); } }); @@ -192,6 +194,6 @@ test("Few shotting with tool calls", async () => { new AIMessage("It is currently 24 degrees in SF with hail in SF."), new HumanMessage("What did you say the weather was?"), ]); - console.log(res); + // console.log(res); expect(res.content).toContain("24"); }); diff --git a/libs/langchain-groq/src/tests/chat_models_structured_output.int.test.ts b/libs/langchain-groq/src/tests/chat_models_structured_output.int.test.ts index 215058c9f531..7c200d236d98 100644 --- a/libs/langchain-groq/src/tests/chat_models_structured_output.int.test.ts +++ b/libs/langchain-groq/src/tests/chat_models_structured_output.int.test.ts @@ -28,7 +28,7 @@ test("withStructuredOutput zod schema function calling", async () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -67,7 +67,7 @@ Respond with a JSON object containing three keys: ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -97,7 +97,7 @@ test("withStructuredOutput JSON schema function calling", async () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -127,7 +127,7 @@ test("withStructuredOutput OpenAI function definition function calling", async ( ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -166,7 +166,7 @@ Respond with a JSON object containing three keys: ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -207,7 +207,7 @@ Respond with a JSON object containing three keys: ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -238,7 +238,7 @@ test("withStructuredOutput includeRaw true", async () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("parsed" in result).toBe(true); // Need to make TS happy :) diff --git a/libs/langchain-mistralai/src/tests/chat_models.int.test.ts b/libs/langchain-mistralai/src/tests/chat_models.int.test.ts index 7280ceddacf4..93029ad4ecb1 100644 --- a/libs/langchain-mistralai/src/tests/chat_models.int.test.ts +++ b/libs/langchain-mistralai/src/tests/chat_models.int.test.ts @@ -23,7 +23,7 @@ test("Test ChatMistralAI can invoke", async () => { const response = await prompt.pipe(model).invoke({ input: "Hello", }); - console.log("response", response); + // console.log("response", response); expect(response.content.length).toBeGreaterThan(1); }); @@ -39,11 +39,11 @@ test("Test ChatMistralAI can stream", async () => { let itters = 0; let fullMessage = ""; for await (const item of response) { - console.log(item); + // console.log(item); itters += 1; fullMessage += item.content; } - console.log("fullMessage", fullMessage); + // console.log("fullMessage", fullMessage); expect(itters).toBeGreaterThan(1); }); @@ -81,7 +81,7 @@ test("Can call tools using structured tools", async () => { const chain = prompt.pipe(model); const response = await chain.invoke({}); expect("tool_calls" in response.additional_kwargs).toBe(true); - console.log(response.additional_kwargs.tool_calls?.[0]); + // console.log(response.additional_kwargs.tool_calls?.[0]); expect(response.additional_kwargs.tool_calls?.[0].function.name).toBe( "calculator" ); @@ -128,7 +128,7 @@ test("Can call tools", async () => { ]); const chain = prompt.pipe(model); const response = await chain.invoke({}); - console.log(response); + // console.log(response); expect(response.tool_calls?.length).toEqual(1); expect(response.tool_calls?.[0].args).toEqual( JSON.parse( @@ -181,7 +181,7 @@ test("Can call .stream with tool calling", async () => { const response = await chain.stream({}); let finalRes: BaseMessage | null = null; for await (const chunk of response) { - console.log(chunk); + // console.log(chunk); finalRes = chunk; } if (!finalRes) { @@ -189,7 +189,7 @@ test("Can call .stream with tool calling", async () => { } expect("tool_calls" in finalRes.additional_kwargs).toBe(true); - console.log(finalRes.additional_kwargs.tool_calls?.[0]); + // console.log(finalRes.additional_kwargs.tool_calls?.[0]); expect(finalRes.additional_kwargs.tool_calls?.[0].function.name).toBe( "calculator" ); @@ -222,7 +222,7 @@ To use a calculator respond with valid JSON containing a single key: 'calculator const chain = prompt.pipe(model); const response = await chain.invoke({}); - console.log(response); + // console.log(response); const parsedRes = JSON.parse(response.content as string); expect(parsedRes.calculator).toBeDefined(); }); @@ -250,11 +250,11 @@ To use a calculator respond with valid JSON containing a single key: 'calculator const response = await chain.stream({}); let finalRes = ""; for await (const chunk of response) { - console.log(chunk); + // console.log(chunk); finalRes += chunk.content; } - console.log(finalRes); + // console.log(finalRes); const parsedRes = JSON.parse(finalRes); expect(parsedRes.calculator).toBeDefined(); }); @@ -304,7 +304,7 @@ test("Can stream and concat responses for a complex tool", async () => { const response = await chain.stream({}); let finalRes: BaseMessage[] = []; for await (const chunk of response) { - console.log(chunk); + // console.log(chunk); finalRes = finalRes.concat(chunk); } if (!finalRes) { @@ -371,7 +371,7 @@ test("Few shotting with tool calls", async () => { new AIMessage("It is currently 24 degrees in SF with hail in SF."), new HumanMessage("What did you say the weather was?"), ]); - console.log(res); + // console.log(res); expect(res.content).toContain("24"); }); @@ -406,7 +406,7 @@ describe("withStructuredOutput", () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -444,7 +444,7 @@ describe("withStructuredOutput", () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -481,7 +481,7 @@ describe("withStructuredOutput", () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -516,7 +516,7 @@ describe("withStructuredOutput", () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -554,7 +554,7 @@ describe("withStructuredOutput", () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -591,7 +591,7 @@ describe("withStructuredOutput", () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("parsed" in result).toBe(true); // Need to make TS happy :) @@ -656,7 +656,7 @@ describe("ChatMistralAI aborting", () => { try { for await (const item of stream) { finalRes += item.content; - console.log(finalRes); + // console.log(finalRes); iters += 1; controller.abort(); } @@ -693,7 +693,7 @@ describe("ChatMistralAI aborting", () => { for await (const item of stream) { finalRes += item.content; - console.log(finalRes); + // console.log(finalRes); } // If the loop completes without error, fail the test fail( @@ -782,7 +782,7 @@ describe("codestral-latest", () => { const response = await prompt.pipe(model).invoke({ input: "How can I log 'Hello, World!' in Python?", }); - console.log("response", response); + // console.log("response", response); expect(response.content.length).toBeGreaterThan(1); expect((response.content as string).toLowerCase()).toContain("hello"); expect((response.content as string).toLowerCase()).toContain("world"); @@ -802,11 +802,11 @@ describe("codestral-latest", () => { let itters = 0; let fullMessage = ""; for await (const item of response) { - console.log(item); + // console.log(item); itters += 1; fullMessage += item.content; } - console.log("fullMessage", fullMessage); + // console.log("fullMessage", fullMessage); expect(itters).toBeGreaterThan(1); expect(fullMessage.toLowerCase()).toContain("hello"); expect(fullMessage.toLowerCase()).toContain("world"); @@ -849,9 +849,9 @@ describe("codestral-latest", () => { input: "Write a function that takes in a single argument and logs it to the console. Ensure the code is in Python.", }); - console.log(response); + // console.log(response); expect("tool_calls" in response.additional_kwargs).toBe(true); - console.log(response.additional_kwargs.tool_calls?.[0]); + // console.log(response.additional_kwargs.tool_calls?.[0]); if (!response.additional_kwargs.tool_calls?.[0]) { throw new Error("No tool call found"); } @@ -859,7 +859,7 @@ describe("codestral-latest", () => { expect(sandboxTool.function.name).toBe("code_sandbox"); const parsedArgs = JSON.parse(sandboxTool.function.arguments); expect(parsedArgs.code).toBeDefined(); - console.log(parsedArgs.code); + // console.log(parsedArgs.code); }); }); @@ -879,7 +879,7 @@ test("Stream token count usage_metadata", async () => { res = res.concat(chunk); } } - console.log(res); + // console.log(res); expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; @@ -907,7 +907,7 @@ test("streamUsage excludes token usage", async () => { res = res.concat(chunk); } } - console.log(res); + // console.log(res); expect(res?.usage_metadata).not.toBeDefined(); }); @@ -918,7 +918,7 @@ test("Invoke token count usage_metadata", async () => { maxTokens: 10, }); const res = await model.invoke("Why is the sky blue? Be concise."); - console.log(res); + // console.log(res); expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; diff --git a/libs/langchain-mistralai/src/tests/embeddings.int.test.ts b/libs/langchain-mistralai/src/tests/embeddings.int.test.ts index 6e8fef25853f..103218b5de33 100644 --- a/libs/langchain-mistralai/src/tests/embeddings.int.test.ts +++ b/libs/langchain-mistralai/src/tests/embeddings.int.test.ts @@ -6,7 +6,7 @@ test("Test MistralAIEmbeddings can embed query", async () => { // "Hello world" in French 🤓 const text = "Bonjour le monde"; const embeddings = await model.embedQuery(text); - console.log("embeddings", embeddings); + // console.log("embeddings", embeddings); expect(embeddings.length).toBe(1024); }); @@ -16,7 +16,7 @@ test("Test MistralAIEmbeddings can embed documents", async () => { const text = "Bonjour le monde"; const documents = [text, text]; const embeddings = await model.embedDocuments(documents); - console.log("embeddings", embeddings); + // console.log("embeddings", embeddings); expect(embeddings.length).toBe(2); expect(embeddings[0].length).toBe(1024); expect(embeddings[1].length).toBe(1024); diff --git a/libs/langchain-mistralai/src/tests/llms.int.test.ts b/libs/langchain-mistralai/src/tests/llms.int.test.ts index 252be8a6ba42..6fc263a0eb00 100644 --- a/libs/langchain-mistralai/src/tests/llms.int.test.ts +++ b/libs/langchain-mistralai/src/tests/llms.int.test.ts @@ -15,7 +15,7 @@ test("Test MistralAI", async () => { const res = await model.invoke( "Log 'Hello world' to the console in javascript: " ); - console.log({ res }, "Test MistralAI"); + // console.log({ res }, "Test MistralAI"); expect(res.length).toBeGreaterThan(1); }); @@ -24,10 +24,12 @@ test("Test MistralAI with stop in object", async () => { maxTokens: 5, model: "codestral-latest", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("console.log 'Hello world' in javascript:", { stop: ["world"], }); - console.log({ res }, "Test MistralAI with stop in object"); + // console.log({ res }, "Test MistralAI with stop in object"); }); test("Test MistralAI with timeout in call options", async () => { @@ -70,8 +72,10 @@ test("Test MistralAI with signal in call options", async () => { } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of ret) { - console.log({ chunk }, "Test MistralAI with signal in call options"); + // console.log({ chunk }, "Test MistralAI with signal in call options"); controller.abort(); } @@ -103,7 +107,7 @@ test("Test MistralAI in streaming mode", async () => { const res = await model.invoke( "Log 'Hello world' to the console in javascript: " ); - console.log({ res }, "Test MistralAI in streaming mode"); + // console.log({ res }, "Test MistralAI in streaming mode"); expect(nrNewTokens > 0).toBe(true); expect(res).toBe(streamedCompletion); @@ -141,8 +145,10 @@ test("Test MistralAI stream method with abort", async () => { signal: AbortSignal.timeout(1000), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log({ chunk }, "Test MistralAI stream method with abort"); + // console.log({ chunk }, "Test MistralAI stream method with abort"); } }).rejects.toThrow(); }); @@ -156,8 +162,10 @@ test("Test MistralAI stream method with early break", async () => { "How is your day going? Be extremely verbose." ); let i = 0; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log({ chunk }, "Test MistralAI stream method with early break"); + // console.log({ chunk }, "Test MistralAI stream method with early break"); i += 1; if (i > 5) { break; diff --git a/libs/langchain-mixedbread-ai/src/tests/reranker.int.test.ts b/libs/langchain-mixedbread-ai/src/tests/reranker.int.test.ts index 4d945d2d43c6..9f07a28e07c5 100644 --- a/libs/langchain-mixedbread-ai/src/tests/reranker.int.test.ts +++ b/libs/langchain-mixedbread-ai/src/tests/reranker.int.test.ts @@ -26,7 +26,7 @@ test("MixedbreadAIReranker can indeed rerank documents with compressDocuments me documents, query ); - console.log(rerankedDocuments); + // console.log(rerankedDocuments); expect(rerankedDocuments).toHaveLength(3); }); @@ -37,6 +37,6 @@ test("MixedbreadAIReranker can indeed rerank documents with rerank method", asyn documents.map((doc) => doc.pageContent), query ); - console.log(rerankedDocuments); + // console.log(rerankedDocuments); expect(rerankedDocuments).toHaveLength(3); }); diff --git a/libs/langchain-mongodb/src/tests/storage.int.test.ts b/libs/langchain-mongodb/src/tests/storage.int.test.ts index a7ae12ca7720..93aeb03a9f9d 100644 --- a/libs/langchain-mongodb/src/tests/storage.int.test.ts +++ b/libs/langchain-mongodb/src/tests/storage.int.test.ts @@ -18,7 +18,7 @@ test("MongoDBStore can set and retrieve", async () => { try { await client.connect(); } catch (e) { - console.error("Failed to connect"); + // console.error("Failed to connect"); throw Error(e as string); } diff --git a/libs/langchain-mongodb/src/tests/vectorstores.int.test.ts b/libs/langchain-mongodb/src/tests/vectorstores.int.test.ts index 069bb4ef3aff..ba204cd8c0fa 100644 --- a/libs/langchain-mongodb/src/tests/vectorstores.int.test.ts +++ b/libs/langchain-mongodb/src/tests/vectorstores.int.test.ts @@ -223,7 +223,7 @@ test("MongoDBAtlasVectorSearch upsert", async () => { "Sandwich", 1 ); - console.log(results2); + // console.log(results2); expect(results2.length).toEqual(1); expect(results2[0].pageContent).not.toContain("sandwich"); diff --git a/libs/langchain-openai/src/tests/azure/chat_models.int.test.ts b/libs/langchain-openai/src/tests/azure/chat_models.int.test.ts index 31de416cbfd4..1cbbe902ca9a 100644 --- a/libs/langchain-openai/src/tests/azure/chat_models.int.test.ts +++ b/libs/langchain-openai/src/tests/azure/chat_models.int.test.ts @@ -52,8 +52,10 @@ test("Test Azure ChatOpenAI call method", async () => { maxTokens: 10, }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.call([message]); - console.log({ res }); + // console.log({ res }); }); test("Test Azure ChatOpenAI with SystemChatMessage", async () => { @@ -63,8 +65,10 @@ test("Test Azure ChatOpenAI with SystemChatMessage", async () => { }); const system_message = new SystemMessage("You are to chat with a user."); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.call([system_message, message]); - console.log({ res }); + // console.log({ res }); }); test("Test Azure ChatOpenAI Generate", async () => { @@ -79,11 +83,11 @@ test("Test Azure ChatOpenAI Generate", async () => { for (const generation of res.generations) { expect(generation.length).toBe(2); for (const message of generation) { - console.log(message.text); + // console.log(message.text); expect(typeof message.text).toBe("string"); } } - console.log({ res }); + // console.log({ res }); }); test("Test Azure ChatOpenAI Generate throws when one of the calls fails", async () => { @@ -118,14 +122,16 @@ test("Test Azure ChatOpenAI tokenUsage", async () => { maxTokens: 10, callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { - console.log(output); + // console.log(output); tokenUsage = output.llmOutput?.tokenUsage; }, }), }); const message = new HumanMessage("Hello"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke([message]); - console.log({ res }); + // console.log({ res }); expect(tokenUsage.promptTokens).toBeGreaterThan(0); } finally { @@ -156,11 +162,13 @@ test("Test Azure ChatOpenAI tokenUsage with a batch", async () => { }, }), }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.generate([ [new HumanMessage("Hello")], [new HumanMessage("Hi")], ]); - console.log(res); + // console.log(res); expect(tokenUsage.promptTokens).toBeGreaterThan(0); } finally { @@ -255,11 +263,13 @@ test("Test Azure ChatOpenAI prompt value", async () => { expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(2); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for (const g of generation) { - console.log(g.text); + // console.log(g.text); } } - console.log({ res }); + // console.log({ res }); }); test("Test Azure OpenAI Chat, docs, prompt templates", async () => { @@ -274,6 +284,8 @@ test("Test Azure OpenAI Chat, docs, prompt templates", async () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ input_language: "English", @@ -282,24 +294,28 @@ test("Test Azure OpenAI Chat, docs, prompt templates", async () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }, 5000); test("Test Azure ChatOpenAI with stop", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.call( [new HumanMessage("Print hello world")], ["world"] ); - console.log({ res }); + // console.log({ res }); }); test("Test Azure ChatOpenAI with stop in object", async () => { const model = new AzureChatOpenAI({ maxTokens: 5 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke([new HumanMessage("Print hello world")], { stop: ["world"], }); - console.log({ res }); + // console.log({ res }); }); test("Test Azure ChatOpenAI with timeout in call options", async () => { @@ -357,8 +373,10 @@ test("Test Azure ChatOpenAI with specific roles in ChatMessage", async () => { "system" ); const user_message = new ChatMessage("Hello!", "user"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.call([system_message, user_message]); - console.log({ res }); + // console.log({ res }); }); test("Test Azure ChatOpenAI stream method", async () => { @@ -369,7 +387,7 @@ test("Test Azure ChatOpenAI stream method", async () => { const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); @@ -387,8 +405,10 @@ test("Test Azure ChatOpenAI stream method with abort", async () => { signal: AbortSignal.timeout(500), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -402,8 +422,10 @@ test("Test Azure ChatOpenAI stream method with early break", async () => { "How is your day going? Be extremely verbose." ); let i = 0; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); i += 1; if (i > 10) { break; @@ -422,8 +444,10 @@ test("Test Azure ChatOpenAI stream method, timeout error thrown from SDK", async const stream = await model.stream( "How is your day going? Be extremely verbose." ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -490,10 +514,10 @@ test("Test Azure ChatOpenAI Function calling with streaming", async () => { expect(finalResult?.additional_kwargs?.function_call?.name).toBe( "get_current_weather" ); - console.log( - JSON.parse(finalResult?.additional_kwargs?.function_call?.arguments ?? "") - .location - ); + // console.log( + // JSON.parse(finalResult?.additional_kwargs?.function_call?.arguments ?? "") + // .location + // ); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; @@ -714,9 +738,11 @@ test("Test Azure ChatOpenAI token usage reporting for streaming function calls", try { let streamingTokenUsed = -1; + let nonStreamingTokenUsed = -1; const humanMessage = "What a beautiful day!"; + const extractionFunctionSchema = { name: "extractor", description: "Extracts fields from the input.", @@ -753,13 +779,13 @@ test("Test Azure ChatOpenAI token usage reporting for streaming function calls", handleLLMEnd: async (output) => { streamingTokenUsed = output.llmOutput?.estimatedTokenUsage?.totalTokens; - console.log( - "streaming usage", - output.llmOutput?.estimatedTokenUsage - ); + // console.log( + // "streaming usage", + // output.llmOutput?.estimatedTokenUsage + // ); }, - handleLLMError: async (err) => { - console.error(err); + handleLLMError: async (_err) => { + // console.error(err); }, }, ], @@ -780,10 +806,10 @@ test("Test Azure ChatOpenAI token usage reporting for streaming function calls", { handleLLMEnd: async (output) => { nonStreamingTokenUsed = output.llmOutput?.tokenUsage?.totalTokens; - console.log("non-streaming usage", output.llmOutput?.tokenUsage); + // console.log("non-streaming usage", output.llmOutput?.tokenUsage); }, - handleLLMError: async (err) => { - console.error(err); + handleLLMError: async (_err) => { + // console.error(err); }, }, ], @@ -801,11 +827,11 @@ test("Test Azure ChatOpenAI token usage reporting for streaming function calls", nonStreamingResult.additional_kwargs.function_call?.arguments && streamingResult.additional_kwargs.function_call?.arguments ) { - console.log( - `Function Call: ${JSON.stringify( - nonStreamingResult.additional_kwargs.function_call - )}` - ); + // console.log( + // `Function Call: ${JSON.stringify( + // nonStreamingResult.additional_kwargs.function_call + // )}` + // ); const nonStreamingArguments = JSON.stringify( JSON.parse(nonStreamingResult.additional_kwargs.function_call.arguments) ); @@ -832,8 +858,11 @@ test("Test Azure ChatOpenAI token usage reporting for streaming calls", async () try { let streamingTokenUsed = -1; + let nonStreamingTokenUsed = -1; + const systemPrompt = "You are a helpful assistant"; + const question = "What is the color of the night sky?"; const streamingModel = new AzureChatOpenAI({ @@ -848,13 +877,13 @@ test("Test Azure ChatOpenAI token usage reporting for streaming calls", async () handleLLMEnd: async (output) => { streamingTokenUsed = output.llmOutput?.estimatedTokenUsage?.totalTokens; - console.log( - "streaming usage", - output.llmOutput?.estimatedTokenUsage - ); + // console.log( + // "streaming usage", + // output.llmOutput?.estimatedTokenUsage + // ); }, - handleLLMError: async (err) => { - console.error(err); + handleLLMError: async (_err) => { + // console.error(err); }, }, ], @@ -871,10 +900,10 @@ test("Test Azure ChatOpenAI token usage reporting for streaming calls", async () { handleLLMEnd: async (output) => { nonStreamingTokenUsed = output.llmOutput?.tokenUsage?.totalTokens; - console.log("non-streaming usage", output.llmOutput?.estimated); + // console.log("non-streaming usage", output.llmOutput?.estimated); }, - handleLLMError: async (err) => { - console.error(err); + handleLLMError: async (_err) => { + // console.error(err); }, }, ], @@ -912,8 +941,8 @@ const clientSecret: string = // eslint-disable-next-line @typescript-eslint/no-explicit-any let testFn: any = test; if (!tenantId || !clientId || !clientSecret) { - console.warn(`One or more required environment variables are not set. -Skipping "Test Azure ChatOpenAI with bearer token provider".`); + // console.warn(`One or more required environment variables are not set. + // Skipping "Test Azure ChatOpenAI with bearer token provider".`); testFn = test.skip; } @@ -934,6 +963,8 @@ testFn("Test Azure ChatOpenAI with bearer token provider", async () => { azureADTokenProvider, }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([["system", "Say hi"], message]); - console.log(res); + // console.log(res); }); diff --git a/libs/langchain-openai/src/tests/azure/llms.int.test.ts b/libs/langchain-openai/src/tests/azure/llms.int.test.ts index 42f84c163429..5ce8369aad1c 100644 --- a/libs/langchain-openai/src/tests/azure/llms.int.test.ts +++ b/libs/langchain-openai/src/tests/azure/llms.int.test.ts @@ -38,8 +38,10 @@ test("Test Azure OpenAI invoke", async () => { maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); }); test("Test Azure OpenAI call", async () => { @@ -47,8 +49,10 @@ test("Test Azure OpenAI call", async () => { maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); - console.log({ res }); + // console.log({ res }); }); test("Test Azure OpenAI with stop in object", async () => { @@ -56,8 +60,10 @@ test("Test Azure OpenAI with stop in object", async () => { maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world", { stop: ["world"] }); - console.log({ res }); + // console.log({ res }); }); test("Test Azure OpenAI with timeout in call options", async () => { @@ -124,11 +130,13 @@ test("Test Azure OpenAI with concurrency == 1", async () => { modelName: "gpt-3.5-turbo-instruct", maxConcurrency: 1, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await Promise.all([ model.invoke("Print hello world"), model.invoke("Print hello world"), ]); - console.log({ res }); + // console.log({ res }); }); test("Test Azure OpenAI with maxTokens -1", async () => { @@ -136,15 +144,17 @@ test("Test Azure OpenAI with maxTokens -1", async () => { maxTokens: -1, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); - console.log({ res }); + // console.log({ res }); }); test("Test Azure OpenAI with model name", async () => { const model = new AzureOpenAI({ modelName: "gpt-3.5-turbo-instruct" }); expect(model).toBeInstanceOf(AzureOpenAI); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(typeof res).toBe("string"); }); @@ -154,7 +164,7 @@ test("Test Azure OpenAI with versioned instruct model returns Azure OpenAI", asy }); expect(model).toBeInstanceOf(AzureOpenAI); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(typeof res).toBe("string"); }); @@ -180,8 +190,10 @@ test("Test Azure OpenAI tokenUsage", async () => { }, }), }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Hello"); - console.log({ res }); + // console.log({ res }); expect(tokenUsage.promptTokens).toBe(1); } finally { @@ -206,7 +218,7 @@ test("Test Azure OpenAI in streaming mode", async () => { }), }); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(nrNewTokens > 0).toBe(true); expect(res).toBe(streamedCompletion); @@ -232,10 +244,10 @@ test("Test Azure OpenAI in streaming mode with multiple prompts", async () => { }), }); const res = await model.generate(["Print hello world", "print hello sea"]); - console.log( - res.generations, - res.generations.map((g) => g[0].generationInfo) - ); + // console.log( + // res.generations, + // res.generations.map((g) => g[0].generationInfo) + // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); @@ -261,10 +273,10 @@ test("Test Azure OpenAI in streaming mode with multiple prompts", async () => { }), }); const res = await model.generate(["Print hello world", "print hello sea"]); - console.log( - res.generations, - res.generations.map((g) => g[0].generationInfo) - ); + // console.log( + // res.generations, + // res.generations.map((g) => g[0].generationInfo) + // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); @@ -284,11 +296,13 @@ test("Test Azure OpenAI prompt value", async () => { expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(1); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for (const g of generation) { - console.log(g.text); + // console.log(g.text); } } - console.log({ res }); + // console.log({ res }); }); test("Test Azure OpenAI stream method", async () => { @@ -316,8 +330,10 @@ test("Test Azure OpenAI stream method with abort", async () => { signal: AbortSignal.timeout(1000), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -331,8 +347,10 @@ test("Test Azure OpenAI stream method with early break", async () => { "How is your day going? Be extremely verbose." ); let i = 0; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); i += 1; if (i > 5) { break; @@ -361,6 +379,8 @@ test("Test Azure OpenAI with bearer token credentials", async () => { modelName: "davinci-002", azureADTokenProvider, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); }); diff --git a/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts b/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts index cf32376df624..99c8dc9c7c00 100644 --- a/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts +++ b/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts @@ -13,8 +13,10 @@ test("Test ChatOpenAI JSON mode", async () => { }, }); const message = new HumanMessage("Hello!"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([["system", "Only return JSON"], message]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); }); test("Test ChatOpenAI seed", async () => { @@ -81,7 +83,7 @@ test("Test ChatOpenAI tool calling", async () => { const res = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], ]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toEqual(3); expect(res.tool_calls?.[0].args).toEqual( JSON.parse(res.additional_kwargs.tool_calls?.[0].function.arguments ?? "{}") @@ -102,7 +104,7 @@ test("Test ChatOpenAI streaming logprobs", async () => { logprobs: true, }); const res = await model.invoke("Print hello world."); - console.log(res.response_metadata.logprobs.content); + // console.log(res.response_metadata.logprobs.content); expect(res.response_metadata.logprobs.content.length).toBeGreaterThan(0); }); @@ -149,7 +151,7 @@ test("Test ChatOpenAI tool calling with ToolMessages", async () => { const res = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], ]); - console.log(JSON.stringify(res)); + // console.log(JSON.stringify(res)); expect(res.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const toolMessages = res.additional_kwargs.tool_calls!.map( @@ -162,12 +164,14 @@ test("Test ChatOpenAI tool calling with ToolMessages", async () => { ), }) ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const finalResponse = await chat.invoke([ ["human", "What's the weather like in San Francisco, Tokyo, and Paris?"], res, ...toolMessages, ]); - console.log(finalResponse); + // console.log(finalResponse); }); test("Test ChatOpenAI tool calling with streaming", async () => { @@ -203,7 +207,7 @@ test("Test ChatOpenAI tool calling with streaming", async () => { let finalChunk; const chunks = []; for await (const chunk of stream) { - console.log(chunk.additional_kwargs.tool_calls); + // console.log(chunk.additional_kwargs.tool_calls); chunks.push(chunk); if (!finalChunk) { finalChunk = chunk; @@ -212,7 +216,7 @@ test("Test ChatOpenAI tool calling with streaming", async () => { } } expect(chunks.length).toBeGreaterThan(1); - console.log(finalChunk?.additional_kwargs.tool_calls); + // console.log(finalChunk?.additional_kwargs.tool_calls); expect(finalChunk?.additional_kwargs.tool_calls?.length).toBeGreaterThan(1); }); @@ -233,10 +237,10 @@ test("ChatOpenAI in JSON mode can cache generations", async () => { "Respond with a JSON object containing arbitrary fields." ); const res = await chat.invoke([message]); - console.log(res); + // console.log(res); const res2 = await chat.invoke([message]); - console.log(res2); + // console.log(res2); expect(res).toEqual(res2); @@ -295,7 +299,7 @@ test("Few shotting with tool calls", async () => { new AIMessage("It is currently 24 degrees in SF with hail in SF."), new HumanMessage("What did you say the weather was?"), ]); - console.log(res); + // console.log(res); expect(res.content).toContain("24"); }); diff --git a/libs/langchain-openai/src/tests/chat_models-vision.int.test.ts b/libs/langchain-openai/src/tests/chat_models-vision.int.test.ts index 94fa4c1cc998..23c00bbd0602 100644 --- a/libs/langchain-openai/src/tests/chat_models-vision.int.test.ts +++ b/libs/langchain-openai/src/tests/chat_models-vision.int.test.ts @@ -27,8 +27,10 @@ test("Test ChatOpenAI with a file", async () => { }, ], }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); test("Test ChatOpenAI with a URL", async () => { @@ -49,6 +51,8 @@ test("Test ChatOpenAI with a URL", async () => { }, ], }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([message]); - console.log({ res }); + // console.log({ res }); }); diff --git a/libs/langchain-openai/src/tests/chat_models.int.test.ts b/libs/langchain-openai/src/tests/chat_models.int.test.ts index fcd58ca4ed81..f51b5121a98c 100644 --- a/libs/langchain-openai/src/tests/chat_models.int.test.ts +++ b/libs/langchain-openai/src/tests/chat_models.int.test.ts @@ -36,11 +36,11 @@ test("Test ChatOpenAI Generate", async () => { for (const generation of res.generations) { expect(generation.length).toBe(2); for (const message of generation) { - console.log(message.text); + // console.log(message.text); expect(typeof message.text).toBe("string"); } } - console.log({ res }); + // console.log({ res }); }); test("Test ChatOpenAI Generate throws when one of the calls fails", async () => { @@ -148,7 +148,7 @@ test("Test ChatOpenAI in streaming mode", async () => { }); const message = new HumanMessage("Hello!"); const result = await model.invoke([message]); - console.log(result); + // console.log(result); expect(nrNewTokens > 0).toBe(true); expect(result.content).toBe(streamedCompletion); @@ -188,7 +188,7 @@ test("Test ChatOpenAI in streaming mode with n > 1 and multiple prompts", async const message1 = new HumanMessage("Hello!"); const message2 = new HumanMessage("Bye!"); const result = await model.generate([[message1], [message2]]); - console.log(result.generations); + // console.log(result.generations); expect(nrNewTokens > 0).toBe(true); expect(result.generations.map((g) => g.map((gg) => gg.text))).toEqual( @@ -211,11 +211,13 @@ test("Test ChatOpenAI prompt value", async () => { expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(2); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for (const g of generation) { - console.log(g.text); + // console.log(g.text); } } - console.log({ res }); + // console.log({ res }); }); test("OpenAI Chat, docs, prompt templates", async () => { @@ -230,6 +232,8 @@ test("OpenAI Chat, docs, prompt templates", async () => { HumanMessagePromptTemplate.fromTemplate("{text}"), ]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const responseA = await chat.generatePrompt([ await chatPrompt.formatPromptValue({ input_language: "English", @@ -238,23 +242,27 @@ test("OpenAI Chat, docs, prompt templates", async () => { }), ]); - console.log(responseA.generations); + // console.log(responseA.generations); }, 5000); test("Test OpenAI with stop", async () => { const model = new ChatOpenAI({ maxTokens: 5 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke([new HumanMessage("Print hello world")], { stop: ["world"], }); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with stop in object", async () => { const model = new ChatOpenAI({ maxTokens: 5 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke([new HumanMessage("Print hello world")], { stop: ["world"], }); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with timeout in call options", async () => { @@ -373,8 +381,10 @@ test("Test OpenAI with specific roles in ChatMessage", async () => { "system" ); const user_message = new ChatMessage("Hello!", "user"); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await chat.invoke([system_message, user_message]); - console.log({ res }); + // console.log({ res }); }); test("Test ChatOpenAI stream method", async () => { @@ -382,7 +392,7 @@ test("Test ChatOpenAI stream method", async () => { const stream = await model.stream("Print hello world."); const chunks = []; for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(1); @@ -400,8 +410,10 @@ test("Test ChatOpenAI stream method with abort", async () => { signal: AbortSignal.timeout(500), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -412,8 +424,10 @@ test("Test ChatOpenAI stream method with early break", async () => { "How is your day going? Be extremely verbose." ); let i = 0; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); i += 1; if (i > 10) { break; @@ -432,8 +446,10 @@ test("Test ChatOpenAI stream method, timeout error thrown from SDK", async () => const stream = await model.stream( "How is your day going? Be extremely verbose." ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -500,10 +516,10 @@ test("Function calling with streaming", async () => { expect(finalResult?.additional_kwargs?.function_call?.name).toBe( "get_current_weather" ); - console.log( - JSON.parse(finalResult?.additional_kwargs?.function_call?.arguments ?? "") - .location - ); + // console.log( + // JSON.parse(finalResult?.additional_kwargs?.function_call?.arguments ?? "") + // .location + // ); } finally { // Reset the environment variable process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; @@ -745,8 +761,11 @@ test("Test ChatOpenAI token usage reporting for streaming calls", async () => { try { let streamingTokenUsed = -1; + let nonStreamingTokenUsed = -1; + const systemPrompt = "You are a helpful assistant"; + const question = "What is the color of the night sky?"; const streamingModel = new ChatOpenAI({ @@ -761,13 +780,13 @@ test("Test ChatOpenAI token usage reporting for streaming calls", async () => { handleLLMEnd: async (output) => { streamingTokenUsed = output.llmOutput?.estimatedTokenUsage?.totalTokens; - console.log( - "streaming usage", - output.llmOutput?.estimatedTokenUsage - ); + // console.log( + // "streaming usage", + // output.llmOutput?.estimatedTokenUsage + // ); }, - handleLLMError: async (err) => { - console.error(err); + handleLLMError: async (_err) => { + // console.error(err); }, }, ], @@ -784,10 +803,10 @@ test("Test ChatOpenAI token usage reporting for streaming calls", async () => { { handleLLMEnd: async (output) => { nonStreamingTokenUsed = output.llmOutput?.tokenUsage?.totalTokens; - console.log("non-streaming usage", output.llmOutput?.estimated); + // console.log("non-streaming usage", output.llmOutput?.estimated); }, - handleLLMError: async (err) => { - console.error(err); + handleLLMError: async (_err) => { + // console.error(err); }, }, ], @@ -841,9 +860,9 @@ test("Streaming tokens can be found in usage_metadata field", async () => { finalResult = chunk; } } - console.log({ - usage_metadata: finalResult?.usage_metadata, - }); + // console.log({ + // usage_metadata: finalResult?.usage_metadata, + // }); expect(finalResult).toBeTruthy(); expect(finalResult?.usage_metadata).toBeTruthy(); expect(finalResult?.usage_metadata?.input_tokens).toBeGreaterThan(0); @@ -860,9 +879,9 @@ test("streaming: true tokens can be found in usage_metadata field", async () => include_usage: true, }, }); - console.log({ - usage_metadata: response?.usage_metadata, - }); + // console.log({ + // usage_metadata: response?.usage_metadata, + // }); expect(response).toBeTruthy(); expect(response?.usage_metadata).toBeTruthy(); expect(response?.usage_metadata?.input_tokens).toBeGreaterThan(0); @@ -877,9 +896,9 @@ test("streaming: streamUsage will not override stream_options", async () => { const response = await model.invoke("Hello, how are you?", { stream_options: { include_usage: false }, }); - console.log({ - usage_metadata: response?.usage_metadata, - }); + // console.log({ + // usage_metadata: response?.usage_metadata, + // }); expect(response).toBeTruthy(); expect(response?.usage_metadata).toBeFalsy(); }); @@ -887,9 +906,9 @@ test("streaming: streamUsage will not override stream_options", async () => { test("streaming: streamUsage default is true", async () => { const model = new ChatOpenAI(); const response = await model.invoke("Hello, how are you?"); - console.log({ - usage_metadata: response?.usage_metadata, - }); + // console.log({ + // usage_metadata: response?.usage_metadata, + // }); expect(response).toBeTruthy(); expect(response?.usage_metadata).toBeTruthy(); expect(response?.usage_metadata?.input_tokens).toBeGreaterThan(0); @@ -900,9 +919,9 @@ test("streaming: streamUsage default is true", async () => { test("populates ID field on AIMessage", async () => { const model = new ChatOpenAI(); const response = await model.invoke("Hell"); - console.log({ - invokeId: response.id, - }); + // console.log({ + // invokeId: response.id, + // }); expect(response.id?.length).toBeGreaterThan(1); expect(response?.id?.startsWith("chatcmpl-")).toBe(true); @@ -915,9 +934,9 @@ test("populates ID field on AIMessage", async () => { finalChunk = finalChunk.concat(chunk); } } - console.log({ - streamId: finalChunk?.id, - }); + // console.log({ + // streamId: finalChunk?.id, + // }); expect(finalChunk?.id?.length).toBeGreaterThan(1); expect(finalChunk?.id?.startsWith("chatcmpl-")).toBe(true); }); diff --git a/libs/langchain-openai/src/tests/chat_models_structured_output.int.test.ts b/libs/langchain-openai/src/tests/chat_models_structured_output.int.test.ts index dd511ff7e549..86bf0247bd49 100644 --- a/libs/langchain-openai/src/tests/chat_models_structured_output.int.test.ts +++ b/libs/langchain-openai/src/tests/chat_models_structured_output.int.test.ts @@ -30,7 +30,7 @@ test("withStructuredOutput zod schema function calling", async () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -68,7 +68,7 @@ Respond with a JSON object containing three keys: ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -98,7 +98,7 @@ test("withStructuredOutput JSON schema function calling", async () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -128,7 +128,7 @@ test("withStructuredOutput OpenAI function definition function calling", async ( ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -166,7 +166,7 @@ Respond with a JSON object containing three keys: ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -206,7 +206,7 @@ Respond with a JSON object containing three keys: ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("operation" in result).toBe(true); expect("number1" in result).toBe(true); expect("number2" in result).toBe(true); @@ -239,7 +239,7 @@ test("withStructuredOutput includeRaw true", async () => { ]); const chain = prompt.pipe(modelWithStructuredOutput); const result = await chain.invoke({}); - console.log(result); + // console.log(result); expect("parsed" in result).toBe(true); // Need to make TS happy :) @@ -317,6 +317,6 @@ test("parallelToolCalls param", async () => { parallel_tool_calls: false, } ); - console.log(response.tool_calls); + // console.log(response.tool_calls); expect(response.tool_calls?.length).toBe(1); }); diff --git a/libs/langchain-openai/src/tests/legacy.int.test.ts b/libs/langchain-openai/src/tests/legacy.int.test.ts index 533a83fa488c..b1cd78d3152a 100644 --- a/libs/langchain-openai/src/tests/legacy.int.test.ts +++ b/libs/langchain-openai/src/tests/legacy.int.test.ts @@ -9,8 +9,10 @@ const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; test("Test OpenAI", async () => { const model = new OpenAIChat({ modelName: "gpt-3.5-turbo", maxTokens: 10 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with prefix messages", async () => { @@ -21,8 +23,10 @@ test("Test OpenAI with prefix messages", async () => { ], maxTokens: 10, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("What is my name"); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI in streaming mode", async () => { @@ -47,7 +51,7 @@ test("Test OpenAI in streaming mode", async () => { }), }); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(nrNewTokens > 0).toBe(true); expect(res).toBe(streamedCompletion); @@ -59,14 +63,18 @@ test("Test OpenAI in streaming mode", async () => { test("Test OpenAI with stop", async () => { const model = new OpenAIChat({ maxTokens: 5 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with stop in object", async () => { const model = new OpenAIChat({ maxTokens: 5 }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world", { stop: ["world"] }); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with timeout in call options", async () => { @@ -121,7 +129,7 @@ test("Test OpenAIChat stream method", async () => { const chunks = []; for await (const chunk of stream) { chunks.push(chunk); - console.log(chunks); + // console.log(chunks); } expect(chunks.length).toBeGreaterThan(1); }); @@ -135,8 +143,10 @@ test("Test OpenAIChat stream method with abort", async () => { signal: AbortSignal.timeout(1000), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -147,8 +157,10 @@ test("Test OpenAIChat stream method with early break", async () => { "How is your day going? Be extremely verbose." ); let i = 0; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); i += 1; if (i > 5) { break; diff --git a/libs/langchain-openai/src/tests/llms.int.test.ts b/libs/langchain-openai/src/tests/llms.int.test.ts index 8706e1634c55..66b4bf8638bf 100644 --- a/libs/langchain-openai/src/tests/llms.int.test.ts +++ b/libs/langchain-openai/src/tests/llms.int.test.ts @@ -16,8 +16,10 @@ test("Test OpenAI", async () => { maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with stop", async () => { @@ -25,8 +27,10 @@ test("Test OpenAI with stop", async () => { maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with stop in object", async () => { @@ -34,8 +38,10 @@ test("Test OpenAI with stop in object", async () => { maxTokens: 5, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Print hello world", { stop: ["world"] }); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with timeout in call options", async () => { @@ -104,11 +110,13 @@ test("Test OpenAI with concurrency == 1", async () => { modelName: "gpt-3.5-turbo-instruct", maxConcurrency: 1, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await Promise.all([ model.invoke("Print hello world"), model.invoke("Print hello world"), ]); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with maxTokens -1", async () => { @@ -116,15 +124,17 @@ test("Test OpenAI with maxTokens -1", async () => { maxTokens: -1, modelName: "gpt-3.5-turbo-instruct", }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.call("Print hello world", ["world"]); - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI with chat model returns OpenAIChat", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo" }); expect(model).toBeInstanceOf(OpenAIChat); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(typeof res).toBe("string"); }); @@ -132,7 +142,7 @@ test("Test OpenAI with instruct model returns OpenAI", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" }); expect(model).toBeInstanceOf(OpenAI); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(typeof res).toBe("string"); }); @@ -140,7 +150,7 @@ test("Test OpenAI with versioned instruct model returns OpenAI", async () => { const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct-0914" }); expect(model).toBeInstanceOf(OpenAI); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(typeof res).toBe("string"); }); @@ -166,8 +176,10 @@ test("Test ChatOpenAI tokenUsage", async () => { }, }), }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = await model.invoke("Hello"); - console.log({ res }); + // console.log({ res }); expect(tokenUsage.promptTokens).toBe(1); } finally { @@ -192,7 +204,7 @@ test("Test OpenAI in streaming mode", async () => { }), }); const res = await model.invoke("Print hello world"); - console.log({ res }); + // console.log({ res }); expect(nrNewTokens > 0).toBe(true); expect(res).toBe(streamedCompletion); @@ -218,10 +230,10 @@ test("Test OpenAI in streaming mode with multiple prompts", async () => { }), }); const res = await model.generate(["Print hello world", "print hello sea"]); - console.log( - res.generations, - res.generations.map((g) => g[0].generationInfo) - ); + // console.log( + // res.generations, + // res.generations.map((g) => g[0].generationInfo) + // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); @@ -247,10 +259,10 @@ test("Test OpenAIChat in streaming mode with multiple prompts", async () => { }), }); const res = await model.generate(["Print hello world", "print hello sea"]); - console.log( - res.generations, - res.generations.map((g) => g[0].generationInfo) - ); + // console.log( + // res.generations, + // res.generations.map((g) => g[0].generationInfo) + // ); expect(nrNewTokens > 0).toBe(true); expect(res.generations.length).toBe(2); @@ -270,11 +282,13 @@ test("Test OpenAI prompt value", async () => { expect(res.generations.length).toBe(1); for (const generation of res.generations) { expect(generation.length).toBe(1); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for (const g of generation) { - console.log(g.text); + // console.log(g.text); } } - console.log({ res }); + // console.log({ res }); }); test("Test OpenAI stream method", async () => { @@ -303,8 +317,10 @@ test("Test OpenAI stream method with abort", async () => { signal: AbortSignal.timeout(1000), } ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); } }).rejects.toThrow(); }); @@ -318,8 +334,10 @@ test("Test OpenAI stream method with early break", async () => { "How is your day going? Be extremely verbose." ); let i = 0; + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var for await (const chunk of stream) { - console.log(chunk); + // console.log(chunk); i += 1; if (i > 5) { break; diff --git a/libs/langchain-pinecone/src/tests/translator.int.test.ts b/libs/langchain-pinecone/src/tests/translator.int.test.ts index 43b2751fbb23..69a2b8fca663 100644 --- a/libs/langchain-pinecone/src/tests/translator.int.test.ts +++ b/libs/langchain-pinecone/src/tests/translator.int.test.ts @@ -144,22 +144,30 @@ describe("Pinecone self query", () => { }, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query4 = await selfQueryRetriever.getRelevantDocuments( "Which movies are either comedy or drama and are less than 90 minutes?" ); const query5 = await selfQueryRetriever.getRelevantDocuments( "Awawawawa hello hello hello huh where am i?" ); - console.log(query1, query2, query3, query4, query5); // query 5 should return documents + // console.log(query1, query2, query3, query4, query5); // query 5 should return documents expect(query5.length).toBeGreaterThan(0); }); @@ -296,22 +304,30 @@ describe("Pinecone self query", () => { }, }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query1 = await selfQueryRetriever.getRelevantDocuments( "Which movies are less than 90 minutes?" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query4 = await selfQueryRetriever.getRelevantDocuments( "Which movies are either comedy or drama and are less than 90 minutes?" ); const query5 = await selfQueryRetriever.getRelevantDocuments( "Awawawawa hello hello hello huh where am i?" ); - console.log(query1, query2, query3, query4, query5); // query 5 should return documents + // console.log(query1, query2, query3, query4, query5); // query 5 should return documents expect(query5.length).toBeGreaterThan(0); }); }); diff --git a/libs/langchain-scripts/package.json b/libs/langchain-scripts/package.json index 4c183c1d436c..388c5222a2d6 100644 --- a/libs/langchain-scripts/package.json +++ b/libs/langchain-scripts/package.json @@ -67,7 +67,8 @@ "jest-environment-node": "^29.6.4", "prettier": "^2.8.3", "release-it": "^15.10.1", - "ts-jest": "^29.1.0" + "ts-jest": "^29.1.0", + "tsx": "^4.16.2" }, "publishConfig": { "access": "public" diff --git a/libs/langchain-scripts/src/tests/check_broken_links.test.ts b/libs/langchain-scripts/src/tests/check_broken_links.test.ts index ee1da33c8aad..d525e54821d9 100644 --- a/libs/langchain-scripts/src/tests/check_broken_links.test.ts +++ b/libs/langchain-scripts/src/tests/check_broken_links.test.ts @@ -23,6 +23,6 @@ test("Regex can find links in md files", () => { structured outputs from models more generally.`; const links = extractLinks(mdWithLinks); - console.log(links); + // console.log(links); expect(links).toEqual([link1, link2, link3]); }); diff --git a/libs/langchain-textsplitters/src/tests/text_splitter.test.ts b/libs/langchain-textsplitters/src/tests/text_splitter.test.ts index 104efd090320..772509a7269e 100644 --- a/libs/langchain-textsplitters/src/tests/text_splitter.test.ts +++ b/libs/langchain-textsplitters/src/tests/text_splitter.test.ts @@ -76,8 +76,10 @@ describe("Character text splitter", () => { test("Test invalid arguments.", () => { expect(() => { + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const res = new CharacterTextSplitter({ chunkSize: 2, chunkOverlap: 4 }); - console.log(res); + // console.log(res); }).toThrow(); }); diff --git a/libs/langchain-weaviate/src/tests/translator.int.test.ts b/libs/langchain-weaviate/src/tests/translator.int.test.ts index 4c3a85867f0a..f66c7de45923 100644 --- a/libs/langchain-weaviate/src/tests/translator.int.test.ts +++ b/libs/langchain-weaviate/src/tests/translator.int.test.ts @@ -105,16 +105,20 @@ test.skip("Weaviate Self Query Retriever Test", async () => { structuredQueryTranslator: new WeaviateTranslator(), }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query2 = await selfQueryRetriever.getRelevantDocuments( "Which movies are rated higher than 8.5?" ); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var const query3 = await selfQueryRetriever.getRelevantDocuments( "Which movies are directed by Greta Gerwig?" ); const query4 = await selfQueryRetriever.getRelevantDocuments( "Wau wau wau wau hello gello hello?" ); - console.log(query2, query3, query4); // query4 has to return empty array + // console.log(query2, query3, query4); // query4 has to return empty array expect(query4.length).toBe(0); }); @@ -261,7 +265,7 @@ test.skip("Weaviate Vector Store Self Query Retriever Test With Default Filter O const query4 = await selfQueryRetriever.getRelevantDocuments( "Wau wau wau wau hello gello hello?" ); - console.log(query4); // query4 has to return documents, since the default filter takes over with + // console.log(query4); // query4 has to return documents, since the default filter takes over with expect(query4.length).toEqual(7); }); @@ -408,6 +412,6 @@ test.skip("Weaviate Vector Store Self Query Retriever Test With Default Filter A const query4 = await selfQueryRetriever.getRelevantDocuments( "Wau wau wau wau hello gello hello?" ); - console.log(query4); // query4 has to return empty array, since the default filter takes over with and filter + // console.log(query4); // query4 has to return empty array, since the default filter takes over with and filter expect(query4.length).toEqual(0); }); diff --git a/yarn.lock b/yarn.lock index 94223547d02b..c387d6eb2ded 100644 --- a/yarn.lock +++ b/yarn.lock @@ -9251,6 +9251,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/aix-ppc64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/aix-ppc64@npm:0.21.5" + conditions: os=aix & cpu=ppc64 + languageName: node + linkType: hard + "@esbuild/android-arm64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/android-arm64@npm:0.17.11" @@ -9265,6 +9272,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/android-arm64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/android-arm64@npm:0.21.5" + conditions: os=android & cpu=arm64 + languageName: node + linkType: hard + "@esbuild/android-arm@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/android-arm@npm:0.17.11" @@ -9279,6 +9293,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/android-arm@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/android-arm@npm:0.21.5" + conditions: os=android & cpu=arm + languageName: node + linkType: hard + "@esbuild/android-x64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/android-x64@npm:0.17.11" @@ -9293,6 +9314,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/android-x64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/android-x64@npm:0.21.5" + conditions: os=android & cpu=x64 + languageName: node + linkType: hard + "@esbuild/darwin-arm64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/darwin-arm64@npm:0.17.11" @@ -9307,6 +9335,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/darwin-arm64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/darwin-arm64@npm:0.21.5" + conditions: os=darwin & cpu=arm64 + languageName: node + linkType: hard + "@esbuild/darwin-x64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/darwin-x64@npm:0.17.11" @@ -9321,6 +9356,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/darwin-x64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/darwin-x64@npm:0.21.5" + conditions: os=darwin & cpu=x64 + languageName: node + linkType: hard + "@esbuild/freebsd-arm64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/freebsd-arm64@npm:0.17.11" @@ -9335,6 +9377,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/freebsd-arm64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/freebsd-arm64@npm:0.21.5" + conditions: os=freebsd & cpu=arm64 + languageName: node + linkType: hard + "@esbuild/freebsd-x64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/freebsd-x64@npm:0.17.11" @@ -9349,6 +9398,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/freebsd-x64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/freebsd-x64@npm:0.21.5" + conditions: os=freebsd & cpu=x64 + languageName: node + linkType: hard + "@esbuild/linux-arm64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/linux-arm64@npm:0.17.11" @@ -9363,6 +9419,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-arm64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/linux-arm64@npm:0.21.5" + conditions: os=linux & cpu=arm64 + languageName: node + linkType: hard + "@esbuild/linux-arm@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/linux-arm@npm:0.17.11" @@ -9377,6 +9440,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-arm@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/linux-arm@npm:0.21.5" + conditions: os=linux & cpu=arm + languageName: node + linkType: hard + "@esbuild/linux-ia32@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/linux-ia32@npm:0.17.11" @@ -9391,6 +9461,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-ia32@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/linux-ia32@npm:0.21.5" + conditions: os=linux & cpu=ia32 + languageName: node + linkType: hard + "@esbuild/linux-loong64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/linux-loong64@npm:0.17.11" @@ -9405,6 +9482,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-loong64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/linux-loong64@npm:0.21.5" + conditions: os=linux & cpu=loong64 + languageName: node + linkType: hard + "@esbuild/linux-mips64el@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/linux-mips64el@npm:0.17.11" @@ -9419,6 +9503,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-mips64el@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/linux-mips64el@npm:0.21.5" + conditions: os=linux & cpu=mips64el + languageName: node + linkType: hard + "@esbuild/linux-ppc64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/linux-ppc64@npm:0.17.11" @@ -9433,6 +9524,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-ppc64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/linux-ppc64@npm:0.21.5" + conditions: os=linux & cpu=ppc64 + languageName: node + linkType: hard + "@esbuild/linux-riscv64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/linux-riscv64@npm:0.17.11" @@ -9447,6 +9545,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-riscv64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/linux-riscv64@npm:0.21.5" + conditions: os=linux & cpu=riscv64 + languageName: node + linkType: hard + "@esbuild/linux-s390x@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/linux-s390x@npm:0.17.11" @@ -9461,6 +9566,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-s390x@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/linux-s390x@npm:0.21.5" + conditions: os=linux & cpu=s390x + languageName: node + linkType: hard + "@esbuild/linux-x64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/linux-x64@npm:0.17.11" @@ -9475,6 +9587,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/linux-x64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/linux-x64@npm:0.21.5" + conditions: os=linux & cpu=x64 + languageName: node + linkType: hard + "@esbuild/netbsd-x64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/netbsd-x64@npm:0.17.11" @@ -9489,6 +9608,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/netbsd-x64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/netbsd-x64@npm:0.21.5" + conditions: os=netbsd & cpu=x64 + languageName: node + linkType: hard + "@esbuild/openbsd-x64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/openbsd-x64@npm:0.17.11" @@ -9503,6 +9629,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/openbsd-x64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/openbsd-x64@npm:0.21.5" + conditions: os=openbsd & cpu=x64 + languageName: node + linkType: hard + "@esbuild/sunos-x64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/sunos-x64@npm:0.17.11" @@ -9517,6 +9650,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/sunos-x64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/sunos-x64@npm:0.21.5" + conditions: os=sunos & cpu=x64 + languageName: node + linkType: hard + "@esbuild/win32-arm64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/win32-arm64@npm:0.17.11" @@ -9531,6 +9671,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/win32-arm64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/win32-arm64@npm:0.21.5" + conditions: os=win32 & cpu=arm64 + languageName: node + linkType: hard + "@esbuild/win32-ia32@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/win32-ia32@npm:0.17.11" @@ -9545,6 +9692,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/win32-ia32@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/win32-ia32@npm:0.21.5" + conditions: os=win32 & cpu=ia32 + languageName: node + linkType: hard + "@esbuild/win32-x64@npm:0.17.11": version: 0.17.11 resolution: "@esbuild/win32-x64@npm:0.17.11" @@ -9559,6 +9713,13 @@ __metadata: languageName: node linkType: hard +"@esbuild/win32-x64@npm:0.21.5": + version: 0.21.5 + resolution: "@esbuild/win32-x64@npm:0.21.5" + conditions: os=win32 & cpu=x64 + languageName: node + linkType: hard + "@eslint-community/eslint-utils@npm:^4.2.0, @eslint-community/eslint-utils@npm:^4.4.0": version: 4.4.0 resolution: "@eslint-community/eslint-utils@npm:4.4.0" @@ -12149,6 +12310,7 @@ __metadata: rollup: ^4.5.2 ts-jest: ^29.1.0 ts-morph: ^21.0.1 + tsx: ^4.16.2 typescript: ^5.4.5 bin: lc-build: bin/build.js @@ -24949,6 +25111,86 @@ __metadata: languageName: node linkType: hard +"esbuild@npm:~0.21.5": + version: 0.21.5 + resolution: "esbuild@npm:0.21.5" + dependencies: + "@esbuild/aix-ppc64": 0.21.5 + "@esbuild/android-arm": 0.21.5 + "@esbuild/android-arm64": 0.21.5 + "@esbuild/android-x64": 0.21.5 + "@esbuild/darwin-arm64": 0.21.5 + "@esbuild/darwin-x64": 0.21.5 + "@esbuild/freebsd-arm64": 0.21.5 + "@esbuild/freebsd-x64": 0.21.5 + "@esbuild/linux-arm": 0.21.5 + "@esbuild/linux-arm64": 0.21.5 + "@esbuild/linux-ia32": 0.21.5 + "@esbuild/linux-loong64": 0.21.5 + "@esbuild/linux-mips64el": 0.21.5 + "@esbuild/linux-ppc64": 0.21.5 + "@esbuild/linux-riscv64": 0.21.5 + "@esbuild/linux-s390x": 0.21.5 + "@esbuild/linux-x64": 0.21.5 + "@esbuild/netbsd-x64": 0.21.5 + "@esbuild/openbsd-x64": 0.21.5 + "@esbuild/sunos-x64": 0.21.5 + "@esbuild/win32-arm64": 0.21.5 + "@esbuild/win32-ia32": 0.21.5 + "@esbuild/win32-x64": 0.21.5 + dependenciesMeta: + "@esbuild/aix-ppc64": + optional: true + "@esbuild/android-arm": + optional: true + "@esbuild/android-arm64": + optional: true + "@esbuild/android-x64": + optional: true + "@esbuild/darwin-arm64": + optional: true + "@esbuild/darwin-x64": + optional: true + "@esbuild/freebsd-arm64": + optional: true + "@esbuild/freebsd-x64": + optional: true + "@esbuild/linux-arm": + optional: true + "@esbuild/linux-arm64": + optional: true + "@esbuild/linux-ia32": + optional: true + "@esbuild/linux-loong64": + optional: true + "@esbuild/linux-mips64el": + optional: true + "@esbuild/linux-ppc64": + optional: true + "@esbuild/linux-riscv64": + optional: true + "@esbuild/linux-s390x": + optional: true + "@esbuild/linux-x64": + optional: true + "@esbuild/netbsd-x64": + optional: true + "@esbuild/openbsd-x64": + optional: true + "@esbuild/sunos-x64": + optional: true + "@esbuild/win32-arm64": + optional: true + "@esbuild/win32-ia32": + optional: true + "@esbuild/win32-x64": + optional: true + bin: + esbuild: bin/esbuild + checksum: 2911c7b50b23a9df59a7d6d4cdd3a4f85855787f374dce751148dbb13305e0ce7e880dde1608c2ab7a927fc6cec3587b80995f7fc87a64b455f8b70b55fd8ec1 + languageName: node + linkType: hard + "escalade@npm:^3.1.1": version: 3.1.1 resolution: "escalade@npm:3.1.1" @@ -26924,6 +27166,16 @@ __metadata: languageName: node linkType: hard +"fsevents@npm:~2.3.3": + version: 2.3.3 + resolution: "fsevents@npm:2.3.3" + dependencies: + node-gyp: latest + checksum: 11e6ea6fea15e42461fc55b4b0e4a0a3c654faa567f1877dbd353f39156f69def97a69936d1746619d656c4b93de2238bf731f6085a03a50cabf287c9d024317 + conditions: os=darwin + languageName: node + linkType: hard + "fsevents@patch:fsevents@2.3.2#~builtin, fsevents@patch:fsevents@^2.3.2#~builtin, fsevents@patch:fsevents@~2.3.2#~builtin": version: 2.3.2 resolution: "fsevents@patch:fsevents@npm%3A2.3.2#~builtin::version=2.3.2&hash=df0bf1" @@ -26933,6 +27185,15 @@ __metadata: languageName: node linkType: hard +"fsevents@patch:fsevents@~2.3.3#~builtin": + version: 2.3.3 + resolution: "fsevents@patch:fsevents@npm%3A2.3.3#~builtin::version=2.3.3&hash=df0bf1" + dependencies: + node-gyp: latest + conditions: os=darwin + languageName: node + linkType: hard + "ftp@npm:^0.3.10": version: 0.3.10 resolution: "ftp@npm:0.3.10" @@ -27255,6 +27516,15 @@ __metadata: languageName: node linkType: hard +"get-tsconfig@npm:^4.7.5": + version: 4.7.6 + resolution: "get-tsconfig@npm:4.7.6" + dependencies: + resolve-pkg-maps: ^1.0.0 + checksum: ebfd86f0b356cde98e2a7afe63b58d92e02b8e413ff95551933d277702bf725386ee82c5c0092fe45fb2ba60002340c94ee70777b3220bbfeca83ab45dda1544 + languageName: node + linkType: hard + "get-uri@npm:3": version: 3.0.2 resolution: "get-uri@npm:3.0.2" @@ -39428,6 +39698,22 @@ __metadata: languageName: node linkType: hard +"tsx@npm:^4.16.2": + version: 4.16.2 + resolution: "tsx@npm:4.16.2" + dependencies: + esbuild: ~0.21.5 + fsevents: ~2.3.3 + get-tsconfig: ^4.7.5 + dependenciesMeta: + fsevents: + optional: true + bin: + tsx: dist/cli.mjs + checksum: bd481097d4614b9d40e7e2c44f7078d9f92b0050e959574a7a88ad8af33327d787f9d76c89689ee19b1275270038705fb9851055ae1a20e15d1c62a34d51a8fd + languageName: node + linkType: hard + "tunnel-agent@npm:^0.6.0": version: 0.6.0 resolution: "tunnel-agent@npm:0.6.0"