Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/chilly-foxes-remain.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"create-llama": patch
---

Add artifacts use case (python)
2 changes: 1 addition & 1 deletion packages/create-llama/helpers/python.ts
Original file line number Diff line number Diff line change
Expand Up @@ -562,7 +562,7 @@ const installLlamaIndexServerTemplate = async ({
process.exit(1);
}

await copy("workflow.py", path.join(root, "app"), {
await copy("*.py", path.join(root, "app"), {
parents: true,
cwd: path.join(templatesDir, "components", "workflows", "python", useCase),
});
Expand Down
3 changes: 2 additions & 1 deletion packages/create-llama/helpers/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ export type TemplateUseCase =
| "form_filling"
| "extractor"
| "contract_review"
| "agentic_rag";
| "agentic_rag"
| "artifacts";
// Config for both file and folder
export type FileSourceConfig =
| {
Expand Down
28 changes: 22 additions & 6 deletions packages/create-llama/questions/simple.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,11 @@ import { ModelConfig, TemplateFramework } from "../helpers/types";
import { PureQuestionArgs, QuestionResults } from "./types";
import { askPostInstallAction, questionHandlers } from "./utils";

type AppType = "agentic_rag" | "financial_report" | "deep_research";
type AppType =
| "agentic_rag"
| "financial_report"
| "deep_research"
| "artifacts";

type SimpleAnswers = {
appType: AppType;
Expand Down Expand Up @@ -42,6 +46,12 @@ export const askSimpleQuestions = async (
description:
"Researches and analyzes provided documents from multiple perspectives, generating a comprehensive report with citations to support key findings and insights.",
},
{
title: "Artifacts",
value: "artifacts",
description:
"Build your own Vercel's v0 or OpenAI's canvas-styled UI.",
},
],
},
questionHandlers,
Expand All @@ -52,7 +62,7 @@ export const askSimpleQuestions = async (

let useLlamaCloud = false;

if (appType !== "extractor" && appType !== "contract_review") {
if (appType !== "artifacts") {
const { language: newLanguage } = await prompts(
{
type: "select",
Expand Down Expand Up @@ -111,10 +121,10 @@ const convertAnswers = async (
args: PureQuestionArgs,
answers: SimpleAnswers,
): Promise<QuestionResults> => {
const MODEL_GPT4o: ModelConfig = {
const MODEL_GPT41: ModelConfig = {
provider: "openai",
apiKey: args.openAiKey,
model: "gpt-4o",
model: "gpt-4.1",
embeddingModel: "text-embedding-3-large",
dimensions: 1536,
isConfigured(): boolean {
Expand All @@ -135,13 +145,19 @@ const convertAnswers = async (
template: "llamaindexserver",
dataSources: EXAMPLE_10K_SEC_FILES,
tools: getTools(["interpreter", "document_generator"]),
modelConfig: MODEL_GPT4o,
modelConfig: MODEL_GPT41,
},
deep_research: {
template: "llamaindexserver",
dataSources: EXAMPLE_10K_SEC_FILES,
tools: [],
modelConfig: MODEL_GPT4o,
modelConfig: MODEL_GPT41,
},
artifacts: {
template: "llamaindexserver",
dataSources: [],
tools: [],
modelConfig: MODEL_GPT41,
},
};

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
import { Badge } from "@/components/ui/badge";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Progress } from "@/components/ui/progress";
import { Skeleton } from "@/components/ui/skeleton";
import { cn } from "@/lib/utils";
import { Markdown } from "@llamaindex/chat-ui/widgets";
import { ListChecks, Loader2, Wand2 } from "lucide-react";
import { useEffect, useState } from "react";

const STAGE_META = {
plan: {
icon: ListChecks,
badgeText: "Step 1/2: Planning",
gradient: "from-blue-100 via-blue-50 to-white",
progress: 33,
iconBg: "bg-blue-100 text-blue-600",
badge: "bg-blue-100 text-blue-700",
},
generate: {
icon: Wand2,
badgeText: "Step 2/2: Generating",
gradient: "from-violet-100 via-violet-50 to-white",
progress: 66,
iconBg: "bg-violet-100 text-violet-600",
badge: "bg-violet-100 text-violet-700",
},
};

function ArtifactWorkflowCard({ event }) {
const [visible, setVisible] = useState(event?.state !== "completed");
const [fade, setFade] = useState(false);

useEffect(() => {
if (event?.state === "completed") {
setVisible(false);
} else {
setVisible(true);
setFade(false);
}
}, [event?.state]);

if (!event || !visible) return null;

const { state, requirement } = event;
const meta = STAGE_META[state];

if (!meta) return null;

return (
<div className="flex justify-center items-center w-full min-h-[180px] py-2">
<Card
className={cn(
"w-full shadow-md rounded-xl transition-all duration-500",
"border-0",
fade && "opacity-0 pointer-events-none",
`bg-gradient-to-br ${meta.gradient}`,
)}
style={{
boxShadow:
"0 2px 12px 0 rgba(80, 80, 120, 0.08), 0 1px 3px 0 rgba(80, 80, 120, 0.04)",
}}
>
<CardHeader className="flex flex-row items-center gap-2 pb-1 pt-2 px-3">
<div
className={cn(
"rounded-full p-1 flex items-center justify-center",
meta.iconBg,
)}
>
<meta.icon className="w-5 h-5" />
</div>
<CardTitle className="text-base font-semibold flex items-center gap-2">
<Badge className={cn("ml-1", meta.badge, "text-xs px-2 py-0.5")}>
{meta.badgeText}
</Badge>
</CardTitle>
</CardHeader>
<CardContent className="px-3 py-1">
{state === "plan" && (
<div className="flex flex-col items-center gap-2 py-2">
<Loader2 className="animate-spin text-blue-400 w-6 h-6 mb-1" />
<div className="text-sm text-blue-900 font-medium text-center">
Analyzing your request...
</div>
<Skeleton className="w-1/2 h-3 rounded-full mt-1" />
</div>
)}
{state === "generate" && (
<div className="flex flex-col gap-2 py-2">
<div className="flex items-center gap-1">
<Loader2 className="animate-spin text-violet-400 w-4 h-4" />
<span className="text-violet-900 font-medium text-sm">
Working on the requirement:
</span>
</div>
<div className="rounded-lg border border-violet-200 bg-violet-50 px-2 py-1 max-h-24 overflow-auto text-xs">
{requirement ? (
<Markdown content={requirement} />
) : (
<span className="text-violet-400 italic">
No requirements available yet.
</span>
)}
</div>
</div>
)}
</CardContent>
<div className="px-3 pb-2 pt-1">
<Progress
value={meta.progress}
className={cn(
"h-1 rounded-full bg-gray-200",
state === "plan" && "bg-blue-200",
state === "generate" && "bg-violet-200",
)}
indicatorClassName={cn(
"transition-all duration-500",
state === "plan" && "bg-blue-500",
state === "generate" && "bg-violet-500",
)}
/>
</div>
</Card>
</div>
);
}

export default function Component({ events }) {
const aggregateEvents = () => {
if (!events || events.length === 0) return null;
return events[events.length - 1];
};

const event = aggregateEvents();

return <ArtifactWorkflowCard event={event} />;
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
This is a [LlamaIndex](https://www.llamaindex.ai/) project using [Workflows](https://docs.llamaindex.ai/en/stable/understanding/workflows/).

## Getting Started

First, setup the environment with uv:

> **_Note:_** This step is not needed if you are using the dev-container.

```shell
uv sync
```

Then check the parameters that have been pre-configured in the `.env` file in this directory.
Make sure you have set the `OPENAI_API_KEY` for the LLM.

Then, run the development server:

```shell
uv run fastapi dev
```

Then open [http://localhost:8000](http://localhost:8000) with your browser to start the chat UI.

To start the app optimized for **production**, run:

```
uv run fastapi run
```

## Configure LLM and Embedding Model

You can configure [LLM model](https://docs.llamaindex.ai/en/stable/module_guides/models/llms) and [embedding model](https://docs.llamaindex.ai/en/stable/module_guides/models/embeddings) in [settings.py](app/settings.py).

## Use Case

We have prepared two artifact workflows:

- [Code Workflow](app/code_workflow.py): To generate code and display it in the UI like Vercel's v0.
- [Document Workflow](app/document_workflow.py): Generate and update a document like OpenAI's canvas.

Modify the factory method in [`workflow.py`](app/workflow.py) to decide which artifact workflow to use. Without any changes the Code Workflow is used.

You can start by sending an request on the [chat UI](http://localhost:8000) or you can test the `/api/chat` endpoint with the following curl request:

```
curl --location 'localhost:8000/api/chat' \
--header 'Content-Type: application/json' \
--data '{ "messages": [{ "role": "user", "content": "Create a report comparing the finances of Apple and Tesla" }] }'
```

## Customize the UI

To customize the UI, you can start by modifying the [./components/ui_event.jsx](./components/ui_event.jsx) file.

You can also generate a new code for the workflow using LLM by running the following command:

```
uv run generate_ui
```

## Learn More

To learn more about LlamaIndex, take a look at the following resources:

- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex.
- [Workflows Introduction](https://docs.llamaindex.ai/en/stable/understanding/workflows/) - learn about LlamaIndex workflows.
- [LlamaIndex Server](https://pypi.org/project/llama-index-server/)

You can check out [the LlamaIndex GitHub repository](https://github.com/run-llama/llama_index) - your feedback and contributions are welcome!
Loading
Loading