diff --git a/e2e/extractor_template.spec.ts b/e2e/extractor_template.spec.ts index 4644cd799..0818e7c7b 100644 --- a/e2e/extractor_template.spec.ts +++ b/e2e/extractor_template.spec.ts @@ -32,16 +32,16 @@ if ( cwd = await createTestDir(); frontendPort = Math.floor(Math.random() * 10000) + 10000; backendPort = frontendPort + 1; - const result = await runCreateLlama( + const result = await runCreateLlama({ cwd, - "extractor", - "fastapi", - "--example-file", - "none", - frontendPort, - backendPort, - "runApp", - ); + templateType: "extractor", + templateFramework: "fastapi", + dataSource: "--example-file", + vectorDb: "none", + port: frontendPort, + externalPort: backendPort, + postInstallAction: "runApp", + }); name = result.projectName; appProcess = result.appProcess; }); diff --git a/e2e/multiagent_template.spec.ts b/e2e/multiagent_template.spec.ts index c69e34c2a..619b8cd15 100644 --- a/e2e/multiagent_template.spec.ts +++ b/e2e/multiagent_template.spec.ts @@ -36,18 +36,18 @@ test.describe(`Test multiagent template ${templateFramework} ${dataSource} ${tem port = Math.floor(Math.random() * 10000) + 10000; externalPort = port + 1; cwd = await createTestDir(); - const result = await runCreateLlama( + const result = await runCreateLlama({ cwd, - "multiagent", + templateType: "multiagent", templateFramework, dataSource, vectorDb, port, externalPort, - templatePostInstallAction, + postInstallAction: templatePostInstallAction, templateUI, appType, - ); + }); name = result.projectName; appProcess = result.appProcess; }); diff --git a/e2e/resolve_python_dependencies.spec.ts b/e2e/resolve_python_dependencies.spec.ts index d48e82d5c..b678a107e 100644 --- a/e2e/resolve_python_dependencies.spec.ts +++ b/e2e/resolve_python_dependencies.spec.ts @@ -53,21 +53,21 @@ if ( test(`options: ${optionDescription}`, async () => { const cwd = await createTestDir(); - const result = await runCreateLlama( + const result = await runCreateLlama({ cwd, - "streaming", - "fastapi", + templateType: "streaming", + templateFramework: "fastapi", dataSource, vectorDb, - 3000, // port - 8000, // externalPort - "none", // postInstallAction - undefined, // ui - "--no-frontend", // appType - undefined, // llamaCloudProjectName - undefined, // llamaCloudIndexName - tool, - ); + port: 3000, // port + externalPort: 8000, // externalPort + postInstallAction: "none", // postInstallAction + templateUI: undefined, // ui + appType: "--no-frontend", // appType + llamaCloudProjectName: undefined, // llamaCloudProjectName + llamaCloudIndexName: undefined, // llamaCloudIndexName + tools: tool, + }); const name = result.projectName; // Check if the app folder exists diff --git a/e2e/resolve_ts_dependencies.spec.ts b/e2e/resolve_ts_dependencies.spec.ts index d8cfd7314..7e7753034 100644 --- a/e2e/resolve_ts_dependencies.spec.ts +++ b/e2e/resolve_ts_dependencies.spec.ts @@ -19,6 +19,7 @@ if ( templateFramework == "nextjs" || templateFramework == "express" // test is only relevant for TS projects ) { + const llamaParseOptions = [true, false]; // vectorDBs combinations to test const vectorDbs: TemplateVectorDB[] = [ "mongo", @@ -33,67 +34,69 @@ if ( ]; test.describe("Test resolve TS dependencies", () => { - for (const vectorDb of vectorDbs) { - const optionDescription = `vectorDb: ${vectorDb}, dataSource: ${dataSource}`; + for (const llamaParseOpt of llamaParseOptions) { + for (const vectorDb of vectorDbs) { + const optionDescription = `vectorDb: ${vectorDb}, dataSource: ${dataSource}, llamaParse: ${llamaParseOpt}`; - test(`options: ${optionDescription}`, async () => { - const cwd = await createTestDir(); + test(`options: ${optionDescription}`, async () => { + const cwd = await createTestDir(); - const result = await runCreateLlama( - cwd, - "streaming", - templateFramework, - dataSource, - vectorDb, - 3000, // port - 8000, // externalPort - "none", // postInstallAction - undefined, // ui - templateFramework === "nextjs" ? "" : "--no-frontend", // appType - undefined, // llamaCloudProjectName - undefined, // llamaCloudIndexName - undefined, // tools - true, // useLlamaParse - ); - const name = result.projectName; + const result = await runCreateLlama({ + cwd: cwd, + templateType: "streaming", + templateFramework: templateFramework, + dataSource: dataSource, + vectorDb: vectorDb, + port: 3000, + externalPort: 8000, + postInstallAction: "none", + templateUI: undefined, + appType: templateFramework === "nextjs" ? "" : "--no-frontend", + llamaCloudProjectName: undefined, + llamaCloudIndexName: undefined, + tools: undefined, + useLlamaParse: llamaParseOpt, + }); + const name = result.projectName; - // Check if the app folder exists - const appDir = path.join(cwd, name); - const dirExists = fs.existsSync(appDir); - expect(dirExists).toBeTruthy(); + // Check if the app folder exists + const appDir = path.join(cwd, name); + const dirExists = fs.existsSync(appDir); + expect(dirExists).toBeTruthy(); - // Install dependencies using pnpm - try { - const { stderr: installStderr } = await execAsync( - "pnpm install --prefer-offline", - { - cwd: appDir, - }, - ); - expect(installStderr).toBeFalsy(); - } catch (error) { - console.error("Error installing dependencies:", error); - throw error; - } + // Install dependencies using pnpm + try { + const { stderr: installStderr } = await execAsync( + "pnpm install --prefer-offline", + { + cwd: appDir, + }, + ); + expect(installStderr).toBeFalsy(); + } catch (error) { + console.error("Error installing dependencies:", error); + throw error; + } - // Run tsc type check and capture the output - try { - const { stdout, stderr } = await execAsync( - "pnpm exec tsc -b --diagnostics", - { - cwd: appDir, - }, - ); - // Check if there's any error output - expect(stderr).toBeFalsy(); + // Run tsc type check and capture the output + try { + const { stdout, stderr } = await execAsync( + "pnpm exec tsc -b --diagnostics", + { + cwd: appDir, + }, + ); + // Check if there's any error output + expect(stderr).toBeFalsy(); - // Log the stdout for debugging purposes - console.log("TypeScript type-check output:", stdout); - } catch (error) { - console.error("Error running tsc:", error); - throw error; - } - }); + // Log the stdout for debugging purposes + console.log("TypeScript type-check output:", stdout); + } catch (error) { + console.error("Error running tsc:", error); + throw error; + } + }); + } } }); } diff --git a/e2e/streaming_template.spec.ts b/e2e/streaming_template.spec.ts index 73c5b1465..53eb2318f 100644 --- a/e2e/streaming_template.spec.ts +++ b/e2e/streaming_template.spec.ts @@ -39,20 +39,20 @@ test.describe(`Test streaming template ${templateFramework} ${dataSource} ${temp port = Math.floor(Math.random() * 10000) + 10000; externalPort = port + 1; cwd = await createTestDir(); - const result = await runCreateLlama( + const result = await runCreateLlama({ cwd, - "streaming", + templateType: "streaming", templateFramework, dataSource, vectorDb, port, externalPort, - templatePostInstallAction, + postInstallAction: templatePostInstallAction, templateUI, appType, llamaCloudProjectName, llamaCloudIndexName, - ); + }); name = result.projectName; appProcess = result.appProcess; }); diff --git a/e2e/utils.ts b/e2e/utils.ts index 50f7da3d5..301d57be8 100644 --- a/e2e/utils.ts +++ b/e2e/utils.ts @@ -18,23 +18,40 @@ export type CreateLlamaResult = { appProcess: ChildProcess; }; +export type RunCreateLlamaOptions = { + cwd: string; + templateType: TemplateType; + templateFramework: TemplateFramework; + dataSource: string; + vectorDb: TemplateVectorDB; + port: number; + externalPort: number; + postInstallAction: TemplatePostInstallAction; + templateUI?: TemplateUI; + appType?: AppType; + llamaCloudProjectName?: string; + llamaCloudIndexName?: string; + tools?: string; + useLlamaParse?: boolean; +}; + // eslint-disable-next-line max-params -export async function runCreateLlama( - cwd: string, - templateType: TemplateType, - templateFramework: TemplateFramework, - dataSource: string, - vectorDb: TemplateVectorDB, - port: number, - externalPort: number, - postInstallAction: TemplatePostInstallAction, - templateUI?: TemplateUI, - appType?: AppType, - llamaCloudProjectName?: string, - llamaCloudIndexName?: string, - tools?: string, - useLlamaParse?: boolean, -): Promise { +export async function runCreateLlama({ + cwd, + templateType, + templateFramework, + dataSource, + vectorDb, + port, + externalPort, + postInstallAction, + templateUI, + appType, + llamaCloudProjectName, + llamaCloudIndexName, + tools, + useLlamaParse, +}: RunCreateLlamaOptions): Promise { if (!process.env.OPENAI_API_KEY || !process.env.LLAMA_CLOUD_API_KEY) { throw new Error( "Setting the OPENAI_API_KEY and LLAMA_CLOUD_API_KEY is mandatory to run tests",