Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 3 additions & 7 deletions .github/workflows/e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ jobs:
os: [macos-latest, windows-latest, ubuntu-22.04]
frameworks: ["fastapi"]
vectordbs: ["none", "llamacloud"]
template-types: ["llamaindexserver"]
defaults:
run:
shell: bash
Expand Down Expand Up @@ -80,7 +79,6 @@ jobs:
LLAMA_CLOUD_API_KEY: ${{ secrets.LLAMA_CLOUD_API_KEY }}
FRAMEWORK: ${{ matrix.frameworks }}
VECTORDB: ${{ matrix.vectordbs }}
TEMPLATE_TYPE: ${{ matrix.template-types }}
PYTHONIOENCODING: utf-8
PYTHONLEGACYWINDOWSSTDIO: utf-8
SERVER_PACKAGE_PATH: ${{ env.SERVER_PACKAGE_PATH }}
Expand All @@ -89,7 +87,7 @@ jobs:
- uses: actions/upload-artifact@v4
if: always()
with:
name: playwright-report-python-${{ matrix.os }}-${{ matrix.frameworks }}-${{ matrix.vectordbs }}-${{ matrix.template-types }}
name: playwright-report-python-${{ matrix.os }}-${{ matrix.frameworks }}-${{ matrix.vectordbs }}
path: packages/create-llama/playwright-report/
overwrite: true
retention-days: 30
Expand All @@ -104,7 +102,6 @@ jobs:
os: [macos-latest, windows-latest, ubuntu-22.04]
frameworks: ["nextjs"]
vectordbs: ["none", "llamacloud"]
template-types: ["llamaindexserver"]
defaults:
run:
shell: bash
Expand Down Expand Up @@ -152,20 +149,19 @@ jobs:

- name: Run Playwright tests for TypeScript
run: |
pnpm run e2e:ts:server
pnpm run e2e:ts
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
LLAMA_CLOUD_API_KEY: ${{ secrets.LLAMA_CLOUD_API_KEY }}
FRAMEWORK: ${{ matrix.frameworks }}
VECTORDB: ${{ matrix.vectordbs }}
TEMPLATE_TYPE: ${{ matrix.template-types }}
SERVER_PACKAGE_PATH: ${{ runner.temp }}/llamaindex-server.tgz
working-directory: packages/create-llama

- uses: actions/upload-artifact@v4
if: always()
with:
name: playwright-report-typescript-${{ matrix.os }}-${{ matrix.frameworks }}-${{ matrix.vectordbs}}-node${{ matrix.node-version }}-${{ matrix.template-types }}
name: playwright-report-typescript-${{ matrix.os }}-${{ matrix.frameworks }}-${{ matrix.vectordbs}}-node${{ matrix.node-version }}
path: packages/create-llama/playwright-report/
overwrite: true
retention-days: 30
26 changes: 11 additions & 15 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,10 @@ to start the development server. You can then visit [http://localhost:3000](http
## What you'll get

- A set of pre-configured use cases to get you started, e.g. Agentic RAG, Data Analysis, Report Generation, etc.
- A Next.js-powered front-end using components from [shadcn/ui](https://ui.shadcn.com/). The app is set up as a chat interface that can answer questions about your data or interact with your agent
- Your choice of two back-ends:
- **Next.js**: if you select this option, you’ll have a full-stack Next.js application that you can deploy to a host like [Vercel](https://vercel.com/) in just a few clicks. This uses [LlamaIndex.TS](https://www.npmjs.com/package/llamaindex), our TypeScript library.
- **Python FastAPI**: if you select this option, you’ll get a separate backend powered by the [llama-index Python package](https://pypi.org/project/llama-index/), which you can deploy to a service like [Render](https://render.com/) or [fly.io](https://fly.io/). The separate Next.js front-end will connect to this backend.
- Each back-end has two endpoints:
- One streaming chat endpoint, that allow you to send the state of your chat and receive additional responses
- One endpoint to upload private files which can be used in your chat
- A front-end using components from [shadcn/ui](https://ui.shadcn.com/). The app is set up as a chat interface that can answer questions about your data or interact with your agent
- Your choice of two frameworks:
- **Next.js**: if you select this option, you’ll have a full-stack Next.js application that you can deploy to a host like [Vercel](https://vercel.com/) in just a few clicks. This uses [LlamaIndex.TS](https://www.npmjs.com/package/llamaindex), our TypeScript library with [LlamaIndex Server for TS](https://npmjs.com/package/@llamaindex/server).
- **Python FastAPI**: if you select this option, you’ll get full-stack Python application powered by the [llama-index Python package](https://pypi.org/project/llama-index/) and [LlamaIndex Server for Python](https://pypi.org/project/llama-index-server/)
- The app uses OpenAI by default, so you'll need an OpenAI API key, or you can customize it to use any of the dozens of LLMs we support.

Here's how it looks like:
Expand All @@ -40,11 +37,11 @@ https://github.com/user-attachments/assets/d57af1a1-d99b-4e9c-98d9-4cbd1327eff8

## Using your data

Optionally, you can supply your own data; the app will index it and make use of it, e.g. to answer questions. Your generated app will have a folder called `data` (If you're using Express or Python and generate a frontend, it will be `./backend/data`).
Optionally, you can supply your own data; the app will index it and make use of it, e.g. to answer questions. Your generated app will have a folder called `data`.

The app will ingest any supported files you put in this directory. Your Next.js and Express apps use LlamaIndex.TS, so they will be able to ingest any PDF, text, CSV, Markdown, Word and HTML files. The Python backend can read even more types, including video and audio files.
The app will ingest any supported files you put in this directory. Your Next.js apps use LlamaIndex.TS, so they will be able to ingest any PDF, text, CSV, Markdown, Word and HTML files. The Python backend can read even more types, including video and audio files.

Before you can use your data, you need to index it. If you're using the Next.js or Express apps, run:
Before you can use your data, you need to index it. If you're using the Next.js apps, run:

```bash
npm run generate
Expand All @@ -60,11 +57,11 @@ uv run generate

## Customizing the AI models

The app will default to OpenAI's `gpt-4o-mini` LLM and `text-embedding-3-large` embedding model.
The app will default to OpenAI's `gpt-4.1` LLM and `text-embedding-3-large` embedding model.

If you want to use different OpenAI models, add the `--ask-models` CLI parameter.
If you want to use different models, add the `--ask-models` CLI parameter.

You can also replace OpenAI with one of our [dozens of other supported LLMs](https://docs.llamaindex.ai/en/stable/module_guides/models/llms/modules.html).
You can also replace one of the default models with one of our [dozens of other supported LLMs](https://docs.llamaindex.ai/en/stable/module_guides/models/llms/modules.html).

To do so, you have to manually change the generated code (edit the `settings.ts` file for Typescript projects or the `settings.py` file for Python projects)

Expand All @@ -90,11 +87,10 @@ Need to install the following packages:
create-llama@latest
Ok to proceed? (y) y
✔ What is your project named? … my-app
✔ What app do you want to build? › Agentic RAG
✔ What use case do you want to build? › Agentic RAG
✔ What language do you want to use? › Python (FastAPI)
✔ Do you want to use LlamaCloud services? … No / Yes
✔ Please provide your LlamaCloud API key (leave blank to skip): …
✔ Please provide your OpenAI API key (leave blank to skip): …
? How would you like to proceed? › - Use arrow-keys. Return to submit.
Just generate code (~1 sec)
❯ Start in VSCode (~1 sec)
Expand Down
22 changes: 3 additions & 19 deletions packages/create-llama/e2e/python/resolve_dependencies.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,45 +4,29 @@ import fs from "fs";
import path from "path";
import util from "util";
import {
ALL_USE_CASES,
TemplateFramework,
TemplateType,
TemplateUseCase,
TemplateVectorDB,
} from "../../helpers/types";
import { RunCreateLlamaOptions, createTestDir, runCreateLlama } from "../utils";

const execAsync = util.promisify(exec);

const templateFramework: TemplateFramework = process.env.FRAMEWORK
? (process.env.FRAMEWORK as TemplateFramework)
: "fastapi";
const templateType: TemplateType = process.env.TEMPLATE_TYPE
? (process.env.TEMPLATE_TYPE as TemplateType)
: "llamaindexserver";
const templateFramework: TemplateFramework = "fastapi";
const vectorDb: TemplateVectorDB = process.env.VECTORDB
? (process.env.VECTORDB as TemplateVectorDB)
: "none";

const useCases: TemplateUseCase[] = [
"agentic_rag",
"deep_research",
"financial_report",
"code_generator",
"document_generator",
"hitl",
];

test.describe("Mypy check", () => {
test.describe.configure({ retries: 0 });

test.describe("LlamaIndexServer", async () => {
for (const useCase of useCases) {
for (const useCase of ALL_USE_CASES) {
test(`should pass mypy for use case: ${useCase}`, async () => {
const cwd = await createTestDir();
await createAndCheckLlamaProject({
options: {
cwd,
templateType,
templateFramework,
vectorDb,
port: 3000,
Expand Down
42 changes: 7 additions & 35 deletions packages/create-llama/e2e/shared/llamaindexserver_template.spec.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
import { expect, test } from "@playwright/test";
import { ChildProcess, execSync } from "child_process";
import { ChildProcess } from "child_process";
import fs from "fs";
import path from "path";
import type { TemplateFramework, TemplateVectorDB } from "../../helpers";
import {
ALL_USE_CASES,
type TemplateFramework,
type TemplateVectorDB,
} from "../../helpers";
import { createTestDir, runCreateLlama } from "../utils";

const templateFramework: TemplateFramework = process.env.FRAMEWORK
Expand All @@ -15,16 +19,8 @@ const llamaCloudProjectName = "create-llama";
const llamaCloudIndexName = "e2e-test";

const userMessage = "Write a blog post about physical standards for letters";
const templateUseCases = [
"agentic_rag",
"financial_report",
"deep_research",
"code_generator",
"hitl",
];
const ejectDir = "next";

for (const useCase of templateUseCases) {
for (const useCase of ALL_USE_CASES) {
test.describe(`Test use case ${useCase} ${templateFramework} ${vectorDb}`, async () => {
let port: number;
let cwd: string;
Expand All @@ -36,15 +32,13 @@ for (const useCase of templateUseCases) {
cwd = await createTestDir();
const result = await runCreateLlama({
cwd,
templateType: "llamaindexserver",
templateFramework,
vectorDb,
port,
postInstallAction: "runApp",
useCase,
llamaCloudProjectName,
llamaCloudIndexName,
useLlamaParse: vectorDb === "llamacloud",
});
name = result.projectName;
appProcess = result.appProcess;
Expand Down Expand Up @@ -88,28 +82,6 @@ for (const useCase of templateUseCases) {
expect(response.ok()).toBeTruthy();
});

test("Should successfully eject, install dependencies and build without errors", async () => {
test.skip(
templateFramework !== "nextjs" ||
useCase !== "code_generator" ||
vectorDb === "llamacloud",
"Eject test only applies to Next.js framework, code generator use case, and non-llamacloud",
);

// Run eject command
execSync("npm run eject", { cwd: path.join(cwd, name) });

// Verify next directory exists
const nextDirExists = fs.existsSync(path.join(cwd, name, ejectDir));
expect(nextDirExists).toBeTruthy();

// Install dependencies in next directory
execSync("npm install", { cwd: path.join(cwd, name, ejectDir) });

// Run build
execSync("npm run build", { cwd: path.join(cwd, name, ejectDir) });
});

// clean processes
test.afterAll(async () => {
appProcess?.kill();
Expand Down
70 changes: 70 additions & 0 deletions packages/create-llama/e2e/typescript/eject.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import { expect, test } from "@playwright/test";
import { ChildProcess, execSync } from "child_process";
import fs from "fs";
import path from "path";
import { type TemplateFramework, type TemplateVectorDB } from "../../helpers";
import { createTestDir, runCreateLlama } from "../utils";

const templateFramework: TemplateFramework = "nextjs";
const useCase = "code_generator";
const vectorDb: TemplateVectorDB = process.env.VECTORDB
? (process.env.VECTORDB as TemplateVectorDB)
: "none";

const llamaCloudProjectName = "create-llama";
const llamaCloudIndexName = "e2e-test";

const ejectDir = "next";

test.describe.skip(
`Test eject command for ${useCase} ${templateFramework} ${vectorDb}`,
async () => {
let port: number;
let cwd: string;
let name: string;
let appProcess: ChildProcess;

test.beforeAll(async () => {
port = Math.floor(Math.random() * 10000) + 10000;
cwd = await createTestDir();
const result = await runCreateLlama({
cwd,
templateFramework,
vectorDb,
port,
postInstallAction: "dependencies",
useCase,
llamaCloudProjectName,
llamaCloudIndexName,
});
name = result.projectName;
appProcess = result.appProcess;
});

test("Should successfully eject, install dependencies and build without errors", async ({
page,
}) => {
test.skip(
vectorDb === "llamacloud",
"Eject test only works with non-llamacloud",
);
// Run eject command
execSync("npm run eject", { cwd: path.join(cwd, name) });

// Verify next directory exists
const nextDirExists = fs.existsSync(path.join(cwd, name, ejectDir));
expect(nextDirExists).toBeTruthy();

// Install dependencies in next directory
execSync("npm install", { cwd: path.join(cwd, name, ejectDir) });

// Run build
execSync("npm run build", { cwd: path.join(cwd, name, ejectDir) });
});

// clean processes
test.afterAll(async () => {
appProcess?.kill();
});
},
);
28 changes: 4 additions & 24 deletions packages/create-llama/e2e/typescript/resolve_dependencies.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,44 +4,28 @@ import fs from "fs";
import path from "path";
import util from "util";
import {
ALL_USE_CASES,
TemplateFramework,
TemplateType,
TemplateUseCase,
TemplateVectorDB,
} from "../../helpers/types";
import { createTestDir, runCreateLlama } from "../utils";

const execAsync = util.promisify(exec);

const templateFramework: TemplateFramework = process.env.FRAMEWORK
? (process.env.FRAMEWORK as TemplateFramework)
: "nextjs";
const templateType: TemplateType = process.env.TEMPLATE_TYPE
? (process.env.TEMPLATE_TYPE as TemplateType)
: "llamaindexserver";
const templateFramework: TemplateFramework = "nextjs";
const vectorDb: TemplateVectorDB = process.env.VECTORDB
? (process.env.VECTORDB as TemplateVectorDB)
: "none";

const useCases: TemplateUseCase[] = [
"agentic_rag",
"deep_research",
"financial_report",
"code_generator",
"document_generator",
"hitl",
];

test.describe("Test resolve TS dependencies", () => {
test.describe.configure({ retries: 0 });

for (const useCase of useCases) {
const optionDescription = `templateType: ${templateType}, useCase: ${useCase}, vectorDb: ${vectorDb}, llamaParse: ${vectorDb === "llamacloud"}`;
for (const useCase of ALL_USE_CASES) {
const optionDescription = `useCase: ${useCase}, vectorDb: ${vectorDb}`;
test.describe(`${optionDescription}`, () => {
test(`${optionDescription}`, async () => {
await runTest({
templateType: templateType,
useLlamaParse: vectorDb === "llamacloud",
useCase: useCase,
vectorDb: vectorDb,
});
Expand All @@ -51,23 +35,19 @@ test.describe("Test resolve TS dependencies", () => {
});

async function runTest(options: {
templateType: TemplateType;
useLlamaParse: boolean;
useCase: TemplateUseCase;
vectorDb: TemplateVectorDB;
}) {
const cwd = await createTestDir();

const result = await runCreateLlama({
cwd: cwd,
templateType: options.templateType,
templateFramework: templateFramework,
vectorDb: options.vectorDb,
port: 3000,
postInstallAction: "none",
llamaCloudProjectName: undefined,
llamaCloudIndexName: undefined,
useLlamaParse: options.useLlamaParse,
useCase: options.useCase,
});
const name = result.projectName;
Expand Down
Loading
Loading