From 1d5b847645223078f7ea2aacffb33cd17f0697be Mon Sep 17 00:00:00 2001 From: Marcus Schiesser Date: Thu, 19 Jun 2025 11:15:59 +0700 Subject: [PATCH 1/6] chore: clean e2e tests --- .../e2e/python/resolve_dependencies.spec.ts | 17 +---- .../shared/llamaindexserver_template.spec.ts | 44 ++---------- .../create-llama/e2e/typescript/eject.spec.ts | 70 +++++++++++++++++++ .../typescript/resolve_dependencies.spec.ts | 16 +---- packages/create-llama/helpers/types.ts | 9 +++ 5 files changed, 92 insertions(+), 64 deletions(-) create mode 100644 packages/create-llama/e2e/typescript/eject.spec.ts diff --git a/packages/create-llama/e2e/python/resolve_dependencies.spec.ts b/packages/create-llama/e2e/python/resolve_dependencies.spec.ts index 74106828c..7a97aa450 100644 --- a/packages/create-llama/e2e/python/resolve_dependencies.spec.ts +++ b/packages/create-llama/e2e/python/resolve_dependencies.spec.ts @@ -4,18 +4,16 @@ import fs from "fs"; import path from "path"; import util from "util"; import { + ALL_USE_CASES, TemplateFramework, TemplateType, - TemplateUseCase, TemplateVectorDB, } from "../../helpers/types"; import { RunCreateLlamaOptions, createTestDir, runCreateLlama } from "../utils"; const execAsync = util.promisify(exec); -const templateFramework: TemplateFramework = process.env.FRAMEWORK - ? (process.env.FRAMEWORK as TemplateFramework) - : "fastapi"; +const templateFramework: TemplateFramework = "fastapi"; const templateType: TemplateType = process.env.TEMPLATE_TYPE ? (process.env.TEMPLATE_TYPE as TemplateType) : "llamaindexserver"; @@ -23,20 +21,11 @@ const vectorDb: TemplateVectorDB = process.env.VECTORDB ? (process.env.VECTORDB as TemplateVectorDB) : "none"; -const useCases: TemplateUseCase[] = [ - "agentic_rag", - "deep_research", - "financial_report", - "code_generator", - "document_generator", - "hitl", -]; - test.describe("Mypy check", () => { test.describe.configure({ retries: 0 }); test.describe("LlamaIndexServer", async () => { - for (const useCase of useCases) { + for (const useCase of ALL_USE_CASES) { test(`should pass mypy for use case: ${useCase}`, async () => { const cwd = await createTestDir(); await createAndCheckLlamaProject({ diff --git a/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts b/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts index 0077a8e30..6905468e8 100644 --- a/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts +++ b/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts @@ -1,8 +1,12 @@ import { expect, test } from "@playwright/test"; -import { ChildProcess, execSync } from "child_process"; +import { ChildProcess } from "child_process"; import fs from "fs"; import path from "path"; -import type { TemplateFramework, TemplateVectorDB } from "../../helpers"; +import { + ALL_USE_CASES, + type TemplateFramework, + type TemplateVectorDB, +} from "../../helpers"; import { createTestDir, runCreateLlama } from "../utils"; const templateFramework: TemplateFramework = process.env.FRAMEWORK @@ -15,16 +19,8 @@ const llamaCloudProjectName = "create-llama"; const llamaCloudIndexName = "e2e-test"; const userMessage = "Write a blog post about physical standards for letters"; -const templateUseCases = [ - "agentic_rag", - "financial_report", - "deep_research", - "code_generator", - "hitl", -]; -const ejectDir = "next"; -for (const useCase of templateUseCases) { +for (const useCase of ALL_USE_CASES) { test.describe(`Test use case ${useCase} ${templateFramework} ${vectorDb}`, async () => { let port: number; let cwd: string; @@ -65,10 +61,6 @@ for (const useCase of templateUseCases) { test("Frontend should be able to submit a message and receive the start of a streamed response", async ({ page, }) => { - test.skip( - useCase === "financial_report" || useCase === "deep_research", - "Skip chat tests for financial report and deep research.", - ); await page.goto(`http://localhost:${port}`); await page.fill("form textarea", userMessage); @@ -88,28 +80,6 @@ for (const useCase of templateUseCases) { expect(response.ok()).toBeTruthy(); }); - test("Should successfully eject, install dependencies and build without errors", async () => { - test.skip( - templateFramework !== "nextjs" || - useCase !== "code_generator" || - vectorDb === "llamacloud", - "Eject test only applies to Next.js framework, code generator use case, and non-llamacloud", - ); - - // Run eject command - execSync("npm run eject", { cwd: path.join(cwd, name) }); - - // Verify next directory exists - const nextDirExists = fs.existsSync(path.join(cwd, name, ejectDir)); - expect(nextDirExists).toBeTruthy(); - - // Install dependencies in next directory - execSync("npm install", { cwd: path.join(cwd, name, ejectDir) }); - - // Run build - execSync("npm run build", { cwd: path.join(cwd, name, ejectDir) }); - }); - // clean processes test.afterAll(async () => { appProcess?.kill(); diff --git a/packages/create-llama/e2e/typescript/eject.spec.ts b/packages/create-llama/e2e/typescript/eject.spec.ts new file mode 100644 index 000000000..f5679f080 --- /dev/null +++ b/packages/create-llama/e2e/typescript/eject.spec.ts @@ -0,0 +1,70 @@ +import { expect, test } from "@playwright/test"; +import { ChildProcess, execSync } from "child_process"; +import fs from "fs"; +import path from "path"; +import { type TemplateFramework, type TemplateVectorDB } from "../../helpers"; +import { createTestDir, runCreateLlama } from "../utils"; + +const templateFramework: TemplateFramework = "nextjs"; +const useCase = "code_generator"; +const vectorDb: TemplateVectorDB = process.env.VECTORDB + ? (process.env.VECTORDB as TemplateVectorDB) + : "none"; + +const llamaCloudProjectName = "create-llama"; +const llamaCloudIndexName = "e2e-test"; + +const ejectDir = "next"; + +test.describe(`Test eject command for ${useCase} ${templateFramework} ${vectorDb}`, async () => { + let port: number; + let cwd: string; + let name: string; + let appProcess: ChildProcess; + + test.beforeAll(async () => { + port = Math.floor(Math.random() * 10000) + 10000; + cwd = await createTestDir(); + const result = await runCreateLlama({ + cwd, + templateType: "llamaindexserver", + templateFramework, + vectorDb, + port, + postInstallAction: "runApp", + useCase, + llamaCloudProjectName, + llamaCloudIndexName, + useLlamaParse: false, + }); + name = result.projectName; + appProcess = result.appProcess; + }); + + test("Should successfully eject, install dependencies and build without errors", async ({ + page, + }) => { + await page.goto(`http://localhost:${port}`); + await expect(page.getByText("Built by LlamaIndex")).toBeVisible({ + timeout: 5 * 60 * 1000, + }); + + // Run eject command + execSync("pnpm run eject", { cwd: path.join(cwd, name) }); + + // Verify next directory exists + const nextDirExists = fs.existsSync(path.join(cwd, name, ejectDir)); + expect(nextDirExists).toBeTruthy(); + + // Install dependencies in next directory + execSync("pnpm install", { cwd: path.join(cwd, name, ejectDir) }); + + // Run build + execSync("pnpm run build", { cwd: path.join(cwd, name, ejectDir) }); + }); + + // clean processes + test.afterAll(async () => { + appProcess?.kill(); + }); +}); diff --git a/packages/create-llama/e2e/typescript/resolve_dependencies.spec.ts b/packages/create-llama/e2e/typescript/resolve_dependencies.spec.ts index 5ba85d7e8..2b4ff9945 100644 --- a/packages/create-llama/e2e/typescript/resolve_dependencies.spec.ts +++ b/packages/create-llama/e2e/typescript/resolve_dependencies.spec.ts @@ -4,6 +4,7 @@ import fs from "fs"; import path from "path"; import util from "util"; import { + ALL_USE_CASES, TemplateFramework, TemplateType, TemplateUseCase, @@ -13,9 +14,7 @@ import { createTestDir, runCreateLlama } from "../utils"; const execAsync = util.promisify(exec); -const templateFramework: TemplateFramework = process.env.FRAMEWORK - ? (process.env.FRAMEWORK as TemplateFramework) - : "nextjs"; +const templateFramework: TemplateFramework = "nextjs"; const templateType: TemplateType = process.env.TEMPLATE_TYPE ? (process.env.TEMPLATE_TYPE as TemplateType) : "llamaindexserver"; @@ -23,19 +22,10 @@ const vectorDb: TemplateVectorDB = process.env.VECTORDB ? (process.env.VECTORDB as TemplateVectorDB) : "none"; -const useCases: TemplateUseCase[] = [ - "agentic_rag", - "deep_research", - "financial_report", - "code_generator", - "document_generator", - "hitl", -]; - test.describe("Test resolve TS dependencies", () => { test.describe.configure({ retries: 0 }); - for (const useCase of useCases) { + for (const useCase of ALL_USE_CASES) { const optionDescription = `templateType: ${templateType}, useCase: ${useCase}, vectorDb: ${vectorDb}, llamaParse: ${vectorDb === "llamacloud"}`; test.describe(`${optionDescription}`, () => { test(`${optionDescription}`, async () => { diff --git a/packages/create-llama/helpers/types.ts b/packages/create-llama/helpers/types.ts index ab5dd1a0f..0a428ccbb 100644 --- a/packages/create-llama/helpers/types.ts +++ b/packages/create-llama/helpers/types.ts @@ -48,6 +48,15 @@ export type TemplateUseCase = | "code_generator" | "document_generator" | "hitl"; + +export const ALL_USE_CASES: TemplateUseCase[] = [ + "agentic_rag", + "deep_research", + "financial_report", + "code_generator", + "document_generator", + "hitl", +]; // Config for both file and folder export type FileSourceConfig = | { From 59ee68f5212532f6e81e944a6a3e4a3a615a0e25 Mon Sep 17 00:00:00 2001 From: Marcus Schiesser Date: Thu, 19 Jun 2025 12:06:44 +0700 Subject: [PATCH 2/6] remove non-working test --- .../create-llama/e2e/shared/llamaindexserver_template.spec.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts b/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts index 6905468e8..05bff3235 100644 --- a/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts +++ b/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts @@ -61,6 +61,10 @@ for (const useCase of ALL_USE_CASES) { test("Frontend should be able to submit a message and receive the start of a streamed response", async ({ page, }) => { + test.skip( + useCase === "financial_report" || useCase === "deep_research", + "Skip chat tests for financial report and deep research.", + ); await page.goto(`http://localhost:${port}`); await page.fill("form textarea", userMessage); From bb146bdcc546013ebe7af12c6eaa791b2c6c77b2 Mon Sep 17 00:00:00 2001 From: Marcus Schiesser Date: Thu, 19 Jun 2025 12:34:11 +0700 Subject: [PATCH 3/6] fix test --- .../create-llama/e2e/typescript/eject.spec.ts | 17 ++++++++--------- packages/create-llama/package.json | 2 +- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/packages/create-llama/e2e/typescript/eject.spec.ts b/packages/create-llama/e2e/typescript/eject.spec.ts index f5679f080..6d1916e90 100644 --- a/packages/create-llama/e2e/typescript/eject.spec.ts +++ b/packages/create-llama/e2e/typescript/eject.spec.ts @@ -31,7 +31,7 @@ test.describe(`Test eject command for ${useCase} ${templateFramework} ${vectorDb templateFramework, vectorDb, port, - postInstallAction: "runApp", + postInstallAction: "dependencies", useCase, llamaCloudProjectName, llamaCloudIndexName, @@ -44,23 +44,22 @@ test.describe(`Test eject command for ${useCase} ${templateFramework} ${vectorDb test("Should successfully eject, install dependencies and build without errors", async ({ page, }) => { - await page.goto(`http://localhost:${port}`); - await expect(page.getByText("Built by LlamaIndex")).toBeVisible({ - timeout: 5 * 60 * 1000, - }); - + test.skip( + vectorDb === "llamacloud", + "Eject test only works with non-llamacloud", + ); // Run eject command - execSync("pnpm run eject", { cwd: path.join(cwd, name) }); + execSync("npm run eject", { cwd: path.join(cwd, name) }); // Verify next directory exists const nextDirExists = fs.existsSync(path.join(cwd, name, ejectDir)); expect(nextDirExists).toBeTruthy(); // Install dependencies in next directory - execSync("pnpm install", { cwd: path.join(cwd, name, ejectDir) }); + execSync("npm install", { cwd: path.join(cwd, name, ejectDir) }); // Run build - execSync("pnpm run build", { cwd: path.join(cwd, name, ejectDir) }); + execSync("npm run build", { cwd: path.join(cwd, name, ejectDir) }); }); // clean processes diff --git a/packages/create-llama/package.json b/packages/create-llama/package.json index d3a238e7f..45a4df590 100644 --- a/packages/create-llama/package.json +++ b/packages/create-llama/package.json @@ -30,7 +30,7 @@ "dev": "ncc build ./index.ts -w -o dist/", "e2e": "playwright test", "e2e:python": "playwright test e2e/shared e2e/python", - "e2e:ts:server": "playwright test e2e/shared/llamaindexserver_template.spec.ts e2e/typescript", + "e2e:ts:server": "playwright test e2e/typescript/eject.spec.ts", "pack-install": "bash ./scripts/pack.sh" }, "dependencies": { From aa52bbcd027f45d1b69a119dc98163634caa3dd6 Mon Sep 17 00:00:00 2001 From: Marcus Schiesser Date: Thu, 19 Jun 2025 13:41:20 +0700 Subject: [PATCH 4/6] reactivate test --- .github/workflows/e2e.yml | 2 +- packages/create-llama/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index cb4720ce8..d2fc2c845 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -152,7 +152,7 @@ jobs: - name: Run Playwright tests for TypeScript run: | - pnpm run e2e:ts:server + pnpm run e2e:ts env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} LLAMA_CLOUD_API_KEY: ${{ secrets.LLAMA_CLOUD_API_KEY }} diff --git a/packages/create-llama/package.json b/packages/create-llama/package.json index 45a4df590..3b2757f4b 100644 --- a/packages/create-llama/package.json +++ b/packages/create-llama/package.json @@ -30,7 +30,7 @@ "dev": "ncc build ./index.ts -w -o dist/", "e2e": "playwright test", "e2e:python": "playwright test e2e/shared e2e/python", - "e2e:ts:server": "playwright test e2e/typescript/eject.spec.ts", + "e2e:ts": "playwright test e2e/shared e2e/typescript", "pack-install": "bash ./scripts/pack.sh" }, "dependencies": { From 5db360af5d0f6c192b75c6ab4108b95309c1be30 Mon Sep 17 00:00:00 2001 From: Marcus Schiesser Date: Thu, 19 Jun 2025 14:33:00 +0700 Subject: [PATCH 5/6] chore: remove template and usellamaparse params --- .github/workflows/e2e.yml | 8 +- README.md | 26 ++-- .../e2e/python/resolve_dependencies.spec.ts | 5 - .../shared/llamaindexserver_template.spec.ts | 2 - .../create-llama/e2e/typescript/eject.spec.ts | 2 - .../typescript/resolve_dependencies.spec.ts | 12 +- packages/create-llama/e2e/utils.ts | 28 +--- packages/create-llama/index.ts | 23 +-- packages/create-llama/questions/index.ts | 147 +++++++++++++++++- packages/create-llama/questions/pro.ts | 26 ---- packages/create-llama/questions/simple.ts | 147 ------------------ packages/create-llama/questions/types.ts | 17 +- packages/create-llama/questions/usecases.ts | 20 +-- 13 files changed, 181 insertions(+), 282 deletions(-) delete mode 100644 packages/create-llama/questions/pro.ts delete mode 100644 packages/create-llama/questions/simple.ts diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index d2fc2c845..b0183c642 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -23,7 +23,6 @@ jobs: os: [macos-latest, windows-latest, ubuntu-22.04] frameworks: ["fastapi"] vectordbs: ["none", "llamacloud"] - template-types: ["llamaindexserver"] defaults: run: shell: bash @@ -80,7 +79,6 @@ jobs: LLAMA_CLOUD_API_KEY: ${{ secrets.LLAMA_CLOUD_API_KEY }} FRAMEWORK: ${{ matrix.frameworks }} VECTORDB: ${{ matrix.vectordbs }} - TEMPLATE_TYPE: ${{ matrix.template-types }} PYTHONIOENCODING: utf-8 PYTHONLEGACYWINDOWSSTDIO: utf-8 SERVER_PACKAGE_PATH: ${{ env.SERVER_PACKAGE_PATH }} @@ -89,7 +87,7 @@ jobs: - uses: actions/upload-artifact@v4 if: always() with: - name: playwright-report-python-${{ matrix.os }}-${{ matrix.frameworks }}-${{ matrix.vectordbs }}-${{ matrix.template-types }} + name: playwright-report-python-${{ matrix.os }}-${{ matrix.frameworks }}-${{ matrix.vectordbs }} path: packages/create-llama/playwright-report/ overwrite: true retention-days: 30 @@ -104,7 +102,6 @@ jobs: os: [macos-latest, windows-latest, ubuntu-22.04] frameworks: ["nextjs"] vectordbs: ["none", "llamacloud"] - template-types: ["llamaindexserver"] defaults: run: shell: bash @@ -158,14 +155,13 @@ jobs: LLAMA_CLOUD_API_KEY: ${{ secrets.LLAMA_CLOUD_API_KEY }} FRAMEWORK: ${{ matrix.frameworks }} VECTORDB: ${{ matrix.vectordbs }} - TEMPLATE_TYPE: ${{ matrix.template-types }} SERVER_PACKAGE_PATH: ${{ runner.temp }}/llamaindex-server.tgz working-directory: packages/create-llama - uses: actions/upload-artifact@v4 if: always() with: - name: playwright-report-typescript-${{ matrix.os }}-${{ matrix.frameworks }}-${{ matrix.vectordbs}}-node${{ matrix.node-version }}-${{ matrix.template-types }} + name: playwright-report-typescript-${{ matrix.os }}-${{ matrix.frameworks }}-${{ matrix.vectordbs}}-node${{ matrix.node-version }} path: packages/create-llama/playwright-report/ overwrite: true retention-days: 30 diff --git a/README.md b/README.md index dd35d0833..dd391f925 100644 --- a/README.md +++ b/README.md @@ -25,13 +25,10 @@ to start the development server. You can then visit [http://localhost:3000](http ## What you'll get - A set of pre-configured use cases to get you started, e.g. Agentic RAG, Data Analysis, Report Generation, etc. -- A Next.js-powered front-end using components from [shadcn/ui](https://ui.shadcn.com/). The app is set up as a chat interface that can answer questions about your data or interact with your agent -- Your choice of two back-ends: - - **Next.js**: if you select this option, you’ll have a full-stack Next.js application that you can deploy to a host like [Vercel](https://vercel.com/) in just a few clicks. This uses [LlamaIndex.TS](https://www.npmjs.com/package/llamaindex), our TypeScript library. - - **Python FastAPI**: if you select this option, you’ll get a separate backend powered by the [llama-index Python package](https://pypi.org/project/llama-index/), which you can deploy to a service like [Render](https://render.com/) or [fly.io](https://fly.io/). The separate Next.js front-end will connect to this backend. -- Each back-end has two endpoints: - - One streaming chat endpoint, that allow you to send the state of your chat and receive additional responses - - One endpoint to upload private files which can be used in your chat +- A front-end using components from [shadcn/ui](https://ui.shadcn.com/). The app is set up as a chat interface that can answer questions about your data or interact with your agent +- Your choice of two frameworks: + - **Next.js**: if you select this option, you’ll have a full-stack Next.js application that you can deploy to a host like [Vercel](https://vercel.com/) in just a few clicks. This uses [LlamaIndex.TS](https://www.npmjs.com/package/llamaindex), our TypeScript library with [LlamaIndex Server for TS](https://npmjs.com/package/@llamaindex/server). + - **Python FastAPI**: if you select this option, you’ll get full-stack Python application powered by the [llama-index Python package](https://pypi.org/project/llama-index/) and [LlamaIndex Server for Python](https://pypi.org/project/llama-index-server/) - The app uses OpenAI by default, so you'll need an OpenAI API key, or you can customize it to use any of the dozens of LLMs we support. Here's how it looks like: @@ -40,11 +37,11 @@ https://github.com/user-attachments/assets/d57af1a1-d99b-4e9c-98d9-4cbd1327eff8 ## Using your data -Optionally, you can supply your own data; the app will index it and make use of it, e.g. to answer questions. Your generated app will have a folder called `data` (If you're using Express or Python and generate a frontend, it will be `./backend/data`). +Optionally, you can supply your own data; the app will index it and make use of it, e.g. to answer questions. Your generated app will have a folder called `data`. -The app will ingest any supported files you put in this directory. Your Next.js and Express apps use LlamaIndex.TS, so they will be able to ingest any PDF, text, CSV, Markdown, Word and HTML files. The Python backend can read even more types, including video and audio files. +The app will ingest any supported files you put in this directory. Your Next.js apps use LlamaIndex.TS, so they will be able to ingest any PDF, text, CSV, Markdown, Word and HTML files. The Python backend can read even more types, including video and audio files. -Before you can use your data, you need to index it. If you're using the Next.js or Express apps, run: +Before you can use your data, you need to index it. If you're using the Next.js apps, run: ```bash npm run generate @@ -60,11 +57,11 @@ uv run generate ## Customizing the AI models -The app will default to OpenAI's `gpt-4o-mini` LLM and `text-embedding-3-large` embedding model. +The app will default to OpenAI's `gpt-4.1` LLM and `text-embedding-3-large` embedding model. -If you want to use different OpenAI models, add the `--ask-models` CLI parameter. +If you want to use different models, add the `--ask-models` CLI parameter. -You can also replace OpenAI with one of our [dozens of other supported LLMs](https://docs.llamaindex.ai/en/stable/module_guides/models/llms/modules.html). +You can also replace one of the default models with one of our [dozens of other supported LLMs](https://docs.llamaindex.ai/en/stable/module_guides/models/llms/modules.html). To do so, you have to manually change the generated code (edit the `settings.ts` file for Typescript projects or the `settings.py` file for Python projects) @@ -90,11 +87,10 @@ Need to install the following packages: create-llama@latest Ok to proceed? (y) y ✔ What is your project named? … my-app -✔ What app do you want to build? › Agentic RAG +✔ What use case do you want to build? › Agentic RAG ✔ What language do you want to use? › Python (FastAPI) ✔ Do you want to use LlamaCloud services? … No / Yes ✔ Please provide your LlamaCloud API key (leave blank to skip): … -✔ Please provide your OpenAI API key (leave blank to skip): … ? How would you like to proceed? › - Use arrow-keys. Return to submit. Just generate code (~1 sec) ❯ Start in VSCode (~1 sec) diff --git a/packages/create-llama/e2e/python/resolve_dependencies.spec.ts b/packages/create-llama/e2e/python/resolve_dependencies.spec.ts index 7a97aa450..6075cf121 100644 --- a/packages/create-llama/e2e/python/resolve_dependencies.spec.ts +++ b/packages/create-llama/e2e/python/resolve_dependencies.spec.ts @@ -6,7 +6,6 @@ import util from "util"; import { ALL_USE_CASES, TemplateFramework, - TemplateType, TemplateVectorDB, } from "../../helpers/types"; import { RunCreateLlamaOptions, createTestDir, runCreateLlama } from "../utils"; @@ -14,9 +13,6 @@ import { RunCreateLlamaOptions, createTestDir, runCreateLlama } from "../utils"; const execAsync = util.promisify(exec); const templateFramework: TemplateFramework = "fastapi"; -const templateType: TemplateType = process.env.TEMPLATE_TYPE - ? (process.env.TEMPLATE_TYPE as TemplateType) - : "llamaindexserver"; const vectorDb: TemplateVectorDB = process.env.VECTORDB ? (process.env.VECTORDB as TemplateVectorDB) : "none"; @@ -31,7 +27,6 @@ test.describe("Mypy check", () => { await createAndCheckLlamaProject({ options: { cwd, - templateType, templateFramework, vectorDb, port: 3000, diff --git a/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts b/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts index 05bff3235..1c6d9343d 100644 --- a/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts +++ b/packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts @@ -32,7 +32,6 @@ for (const useCase of ALL_USE_CASES) { cwd = await createTestDir(); const result = await runCreateLlama({ cwd, - templateType: "llamaindexserver", templateFramework, vectorDb, port, @@ -40,7 +39,6 @@ for (const useCase of ALL_USE_CASES) { useCase, llamaCloudProjectName, llamaCloudIndexName, - useLlamaParse: vectorDb === "llamacloud", }); name = result.projectName; appProcess = result.appProcess; diff --git a/packages/create-llama/e2e/typescript/eject.spec.ts b/packages/create-llama/e2e/typescript/eject.spec.ts index 6d1916e90..50bb1d25e 100644 --- a/packages/create-llama/e2e/typescript/eject.spec.ts +++ b/packages/create-llama/e2e/typescript/eject.spec.ts @@ -27,7 +27,6 @@ test.describe(`Test eject command for ${useCase} ${templateFramework} ${vectorDb cwd = await createTestDir(); const result = await runCreateLlama({ cwd, - templateType: "llamaindexserver", templateFramework, vectorDb, port, @@ -35,7 +34,6 @@ test.describe(`Test eject command for ${useCase} ${templateFramework} ${vectorDb useCase, llamaCloudProjectName, llamaCloudIndexName, - useLlamaParse: false, }); name = result.projectName; appProcess = result.appProcess; diff --git a/packages/create-llama/e2e/typescript/resolve_dependencies.spec.ts b/packages/create-llama/e2e/typescript/resolve_dependencies.spec.ts index 2b4ff9945..a19456f7a 100644 --- a/packages/create-llama/e2e/typescript/resolve_dependencies.spec.ts +++ b/packages/create-llama/e2e/typescript/resolve_dependencies.spec.ts @@ -6,7 +6,6 @@ import util from "util"; import { ALL_USE_CASES, TemplateFramework, - TemplateType, TemplateUseCase, TemplateVectorDB, } from "../../helpers/types"; @@ -15,9 +14,6 @@ import { createTestDir, runCreateLlama } from "../utils"; const execAsync = util.promisify(exec); const templateFramework: TemplateFramework = "nextjs"; -const templateType: TemplateType = process.env.TEMPLATE_TYPE - ? (process.env.TEMPLATE_TYPE as TemplateType) - : "llamaindexserver"; const vectorDb: TemplateVectorDB = process.env.VECTORDB ? (process.env.VECTORDB as TemplateVectorDB) : "none"; @@ -26,12 +22,10 @@ test.describe("Test resolve TS dependencies", () => { test.describe.configure({ retries: 0 }); for (const useCase of ALL_USE_CASES) { - const optionDescription = `templateType: ${templateType}, useCase: ${useCase}, vectorDb: ${vectorDb}, llamaParse: ${vectorDb === "llamacloud"}`; + const optionDescription = `useCase: ${useCase}, vectorDb: ${vectorDb}`; test.describe(`${optionDescription}`, () => { test(`${optionDescription}`, async () => { await runTest({ - templateType: templateType, - useLlamaParse: vectorDb === "llamacloud", useCase: useCase, vectorDb: vectorDb, }); @@ -41,8 +35,6 @@ test.describe("Test resolve TS dependencies", () => { }); async function runTest(options: { - templateType: TemplateType; - useLlamaParse: boolean; useCase: TemplateUseCase; vectorDb: TemplateVectorDB; }) { @@ -50,14 +42,12 @@ async function runTest(options: { const result = await runCreateLlama({ cwd: cwd, - templateType: options.templateType, templateFramework: templateFramework, vectorDb: options.vectorDb, port: 3000, postInstallAction: "none", llamaCloudProjectName: undefined, llamaCloudIndexName: undefined, - useLlamaParse: options.useLlamaParse, useCase: options.useCase, }); const name = result.projectName; diff --git a/packages/create-llama/e2e/utils.ts b/packages/create-llama/e2e/utils.ts index 2534a02d9..b80516228 100644 --- a/packages/create-llama/e2e/utils.ts +++ b/packages/create-llama/e2e/utils.ts @@ -6,7 +6,6 @@ import waitPort from "wait-port"; import { TemplateFramework, TemplatePostInstallAction, - TemplateType, TemplateVectorDB, } from "../helpers"; @@ -17,43 +16,34 @@ export type CreateLlamaResult = { export type RunCreateLlamaOptions = { cwd: string; - templateType: TemplateType; templateFramework: TemplateFramework; vectorDb: TemplateVectorDB; port: number; postInstallAction: TemplatePostInstallAction; + useCase: string; llamaCloudProjectName?: string; llamaCloudIndexName?: string; - useLlamaParse?: boolean; - useCase?: string; }; export async function runCreateLlama({ cwd, - templateType, templateFramework, vectorDb, port, postInstallAction, + useCase, llamaCloudProjectName, llamaCloudIndexName, - useLlamaParse, - useCase, }: RunCreateLlamaOptions): Promise { if (!process.env.OPENAI_API_KEY || !process.env.LLAMA_CLOUD_API_KEY) { throw new Error( "Setting the OPENAI_API_KEY and LLAMA_CLOUD_API_KEY is mandatory to run tests", ); } - const name = [templateType, templateFramework].join("-"); - - // Handle different data source types - + const name = [templateFramework, useCase, vectorDb].join("-"); const commandArgs = [ "create-llama", name, - "--template", - templateType, "--framework", templateFramework, "--vector-db", @@ -63,18 +53,10 @@ export async function runCreateLlama({ port, "--post-install-action", postInstallAction, + "--use-case", + useCase, ]; - if (useLlamaParse) { - commandArgs.push("--use-llama-parse"); - } else { - commandArgs.push("--no-llama-parse"); - } - - if (useCase) { - commandArgs.push("--use-case", useCase); - } - const command = commandArgs.join(" "); console.log(`running command '${command}' in ${cwd}`); const appProcess = exec(command, { diff --git a/packages/create-llama/index.ts b/packages/create-llama/index.ts index 9de4606da..4a6242f1c 100644 --- a/packages/create-llama/index.ts +++ b/packages/create-llama/index.ts @@ -54,13 +54,6 @@ const program = new Command(packageJson.name) ` Explicitly tell the CLI to bootstrap the application using Yarn -`, - ) - .option( - "--template