|
| 1 | +import { createWorkflow, getContext, workflowEvent } from "@llama-flow/core"; |
| 2 | +import { withStore } from "@llama-flow/core/middleware/store"; |
| 3 | +import { extractLastArtifact } from "@llamaindex/server"; |
| 4 | +import { |
| 5 | + ChatMemoryBuffer, |
| 6 | + ChatMessage, |
| 7 | + ChatResponseChunk, |
| 8 | + LLM, |
| 9 | + PromptTemplate, |
| 10 | + Settings, |
| 11 | +} from "llamaindex"; |
| 12 | + |
| 13 | +import { z } from "zod"; |
| 14 | + |
| 15 | +export const RequirementSchema = z.object({ |
| 16 | + next_step: z.enum(["answering", "coding"]), |
| 17 | + language: z.string().nullable().optional(), |
| 18 | + file_name: z.string().nullable().optional(), |
| 19 | + requirement: z.string(), |
| 20 | +}); |
| 21 | + |
| 22 | +export type Requirement = z.infer<typeof RequirementSchema>; |
| 23 | + |
| 24 | +const planEvent = workflowEvent<{ |
| 25 | + userInput: string; |
| 26 | + context?: string | undefined; |
| 27 | +}>(); |
| 28 | + |
| 29 | +const generateArtifactEvent = workflowEvent<{ |
| 30 | + requirement: Requirement; |
| 31 | +}>(); |
| 32 | + |
| 33 | +const synthesizeAnswerEvent = workflowEvent<{}>(); |
| 34 | + |
| 35 | +const startEvent = workflowEvent<{ |
| 36 | + userInput: string; |
| 37 | + chatHistory: ChatMessage[]; |
| 38 | +}>(); |
| 39 | +const stopEvent = workflowEvent<AsyncIterable<ChatResponseChunk>>(); |
| 40 | + |
| 41 | +const uiEvent = workflowEvent<{ |
| 42 | + type: "ui_event"; |
| 43 | + data: { |
| 44 | + state: "plan" | "generate" | "completed"; |
| 45 | + requirement?: string | undefined; |
| 46 | + }; |
| 47 | +}>(); |
| 48 | + |
| 49 | +const artifactEvent = workflowEvent<{ |
| 50 | + type: "artifact"; |
| 51 | + data: { |
| 52 | + type: "code"; |
| 53 | + created_at: number; |
| 54 | + data: { |
| 55 | + language: string; |
| 56 | + file_name: string; |
| 57 | + code: string; |
| 58 | + }; |
| 59 | + }; |
| 60 | +}>(); |
| 61 | + |
| 62 | +export function getWorkflow(reqBody: any, llm?: LLM) { |
| 63 | + if (!llm) { |
| 64 | + llm = Settings.llm; |
| 65 | + } |
| 66 | + const workflow = withStore(() => { |
| 67 | + return { |
| 68 | + memory: new ChatMemoryBuffer({ |
| 69 | + llm, |
| 70 | + chatHistory: reqBody.chatHistory, |
| 71 | + }), |
| 72 | + lastArtifact: extractLastArtifact(reqBody), |
| 73 | + }; |
| 74 | + }, createWorkflow()); |
| 75 | + const { getStore } = workflow.createContext(); |
| 76 | + |
| 77 | + workflow.handle([startEvent], async ({ data: { userInput } }) => { |
| 78 | + // Prepare chat history |
| 79 | + const { memory } = getStore(); |
| 80 | + // Put user input to the memory |
| 81 | + memory.put({ |
| 82 | + role: "user", |
| 83 | + content: userInput, |
| 84 | + }); |
| 85 | + return planEvent.with({ |
| 86 | + userInput, |
| 87 | + }); |
| 88 | + }); |
| 89 | + |
| 90 | + workflow.handle([planEvent], async ({ data: planData }) => { |
| 91 | + const { sendEvent } = getContext(); |
| 92 | + const { memory } = getStore(); |
| 93 | + sendEvent( |
| 94 | + uiEvent.with({ |
| 95 | + type: "ui_event", |
| 96 | + data: { |
| 97 | + state: "plan", |
| 98 | + }, |
| 99 | + }), |
| 100 | + ); |
| 101 | + const prompt = new PromptTemplate({ |
| 102 | + template: ` |
| 103 | +You are a product analyst responsible for analyzing the user's request and providing the next step for code or document generation. |
| 104 | + You are helping user with their code artifact. To update the code, you need to plan a coding step. |
| 105 | + |
| 106 | + Follow these instructions: |
| 107 | + 1. Carefully analyze the conversation history and the user's request to determine what has been done and what the next step should be. |
| 108 | + 2. The next step must be one of the following two options: |
| 109 | + - "coding": To make the changes to the current code. |
| 110 | + - "answering": If you don't need to update the current code or need clarification from the user. |
| 111 | + Important: Avoid telling the user to update the code themselves, you are the one who will update the code (by planning a coding step). |
| 112 | + 3. If the next step is "coding", you may specify the language ("typescript" or "python") and file_name if known, otherwise set them to null. |
| 113 | + 4. The requirement must be provided clearly what is the user request and what need to be done for the next step in details |
| 114 | + as precise and specific as possible, don't be stingy with in the requirement. |
| 115 | + 5. If the next step is "answering", set language and file_name to null, and the requirement should describe what to answer or explain to the user. |
| 116 | + 6. Be concise; only return the requirements for the next step. |
| 117 | + 7. The requirements must be in the following format: |
| 118 | + \`\`\`json |
| 119 | + { |
| 120 | + "next_step": "answering" | "coding", |
| 121 | + "language": "typescript" | "python" | null, |
| 122 | + "file_name": string | null, |
| 123 | + "requirement": string |
| 124 | + } |
| 125 | + \`\`\` |
| 126 | +
|
| 127 | + ## Example 1: |
| 128 | + User request: Create a calculator app. |
| 129 | + You should return: |
| 130 | + \`\`\`json |
| 131 | + { |
| 132 | + "next_step": "coding", |
| 133 | + "language": "typescript", |
| 134 | + "file_name": "calculator.tsx", |
| 135 | + "requirement": "Generate code for a calculator app that has a simple UI with a display and button layout. The display should show the current input and the result. The buttons should include basic operators, numbers, clear, and equals. The calculation should work correctly." |
| 136 | + } |
| 137 | + \`\`\` |
| 138 | +
|
| 139 | + ## Example 2: |
| 140 | + User request: Explain how the game loop works. |
| 141 | + Context: You have already generated the code for a snake game. |
| 142 | + You should return: |
| 143 | + \`\`\`json |
| 144 | + { |
| 145 | + "next_step": "answering", |
| 146 | + "language": null, |
| 147 | + "file_name": null, |
| 148 | + "requirement": "The user is asking about the game loop. Explain how the game loop works." |
| 149 | + } |
| 150 | + \`\`\` |
| 151 | +
|
| 152 | + {context} |
| 153 | +
|
| 154 | + Now, plan the user's next step for this request: |
| 155 | + {user_msg} |
| 156 | + `, |
| 157 | + templateVars: ["context", "user_msg"], |
| 158 | + }); |
| 159 | + |
| 160 | + const response = await llm.complete({ |
| 161 | + prompt: prompt.format({ |
| 162 | + context: planData.context ?? "", |
| 163 | + user_msg: planData.userInput, |
| 164 | + }), |
| 165 | + }); |
| 166 | + // parse the response to Requirement |
| 167 | + // 1. use regex to find the json block |
| 168 | + const jsonBlock = response.text.match(/```json\s*([\s\S]*?)\s*```/); |
| 169 | + if (!jsonBlock) { |
| 170 | + throw new Error("No JSON block found in the response."); |
| 171 | + } |
| 172 | + const requirement = RequirementSchema.parse(JSON.parse(jsonBlock[1])); |
| 173 | + sendEvent( |
| 174 | + uiEvent.with({ |
| 175 | + type: "ui_event", |
| 176 | + data: { |
| 177 | + state: "generate", |
| 178 | + requirement: requirement.requirement, |
| 179 | + }, |
| 180 | + }), |
| 181 | + ); |
| 182 | + memory.put({ |
| 183 | + role: "assistant", |
| 184 | + content: `The plan for next step: \n${response.text}`, |
| 185 | + }); |
| 186 | + |
| 187 | + if (requirement.next_step === "coding") { |
| 188 | + return generateArtifactEvent.with({ |
| 189 | + requirement, |
| 190 | + }); |
| 191 | + } else { |
| 192 | + return synthesizeAnswerEvent.with({}); |
| 193 | + } |
| 194 | + }); |
| 195 | + |
| 196 | + workflow.handle([generateArtifactEvent], async ({ data: planData }) => { |
| 197 | + const { sendEvent } = getContext(); |
| 198 | + const { memory } = getStore(); |
| 199 | + |
| 200 | + sendEvent( |
| 201 | + uiEvent.with({ |
| 202 | + type: "ui_event", |
| 203 | + data: { |
| 204 | + state: "generate", |
| 205 | + requirement: planData.requirement.requirement, |
| 206 | + }, |
| 207 | + }), |
| 208 | + ); |
| 209 | + |
| 210 | + const previousArtifact = getStore().lastArtifact |
| 211 | + ? JSON.stringify(getStore().lastArtifact) |
| 212 | + : "There is no previous artifact"; |
| 213 | + const requirementText = planData.requirement.requirement; |
| 214 | + |
| 215 | + // TODO: Check why PromptTemplate is not working |
| 216 | + // Error: Replacement index 0 out of range for positional args tuple |
| 217 | + const prompt = ` |
| 218 | + You are a skilled developer who can help user with coding. |
| 219 | + You are given a task to generate or update a code for a given requirement. |
| 220 | +
|
| 221 | + ## Follow these instructions: |
| 222 | + **1. Carefully read the user's requirements.** |
| 223 | + If any details are ambiguous or missing, make reasonable assumptions and clearly reflect those in your output. |
| 224 | + If the previous code is provided: |
| 225 | + + Carefully analyze the code with the request to make the right changes. |
| 226 | + + Avoid making a lot of changes from the previous code if the request is not to write the code from scratch again. |
| 227 | + **2. For code requests:** |
| 228 | + - If the user does not specify a framework or language, default to a React component using the Next.js framework. |
| 229 | + - For Next.js, use Shadcn UI components, Typescript, @types/node, @types/react, @types/react-dom, PostCSS, and TailwindCSS. |
| 230 | + The import pattern should be: |
| 231 | + \`\`\`typescript |
| 232 | + import { ComponentName } from "@/components/ui/component-name" |
| 233 | + import { Markdown } from "@llamaindex/chat-ui" |
| 234 | + import { cn } from "@/lib/utils" |
| 235 | + \`\`\` |
| 236 | + - Ensure the code is idiomatic, production-ready, and includes necessary imports. |
| 237 | + - Only generate code relevant to the user's request—do not add extra boilerplate. |
| 238 | + **3. Don't be verbose on response** |
| 239 | + - No other text or comments only return the code which wrapped by \`\`\`language\`\`\` block. |
| 240 | + - If the user's request is to update the code, only return the updated code. |
| 241 | + **4. Only the following languages are allowed: "typescript", "python".** |
| 242 | + **5. If there is no code to update, return the reason without any code block.** |
| 243 | + |
| 244 | + ## Example: |
| 245 | + \`\`\`typescript |
| 246 | + import React from "react"; |
| 247 | + import { Button } from "@/components/ui/button"; |
| 248 | + import { cn } from "@/lib/utils"; |
| 249 | +
|
| 250 | + export default function MyComponent() { |
| 251 | + return ( |
| 252 | + <div className="flex flex-col items-center justify-center h-screen"> |
| 253 | + <Button>Click me</Button> |
| 254 | + </div> |
| 255 | + ); |
| 256 | + } |
| 257 | + \`\`\` |
| 258 | +
|
| 259 | + The previous code is: |
| 260 | + {previousArtifact} |
| 261 | +
|
| 262 | + Now, i have to generate the code for the following requirement: |
| 263 | + {requirement} |
| 264 | + ` |
| 265 | + .replace("{previousArtifact}", previousArtifact) |
| 266 | + .replace("{requirement}", requirementText); |
| 267 | + |
| 268 | + const response = await llm.complete({ |
| 269 | + prompt, |
| 270 | + }); |
| 271 | + |
| 272 | + // Extract the code from the response |
| 273 | + const codeMatch = response.text.match(/```(\w+)([\s\S]*)```/); |
| 274 | + if (!codeMatch) { |
| 275 | + return synthesizeAnswerEvent.with({}); |
| 276 | + } |
| 277 | + |
| 278 | + const code = codeMatch[2].trim(); |
| 279 | + |
| 280 | + // Put the generated code to the memory |
| 281 | + memory.put({ |
| 282 | + role: "assistant", |
| 283 | + content: `Updated the code: \n${response.text}`, |
| 284 | + }); |
| 285 | + |
| 286 | + // To show the Canvas panel for the artifact |
| 287 | + sendEvent( |
| 288 | + artifactEvent.with({ |
| 289 | + type: "artifact", |
| 290 | + data: { |
| 291 | + type: "code", |
| 292 | + created_at: Date.now(), |
| 293 | + data: { |
| 294 | + language: planData.requirement.language || "", |
| 295 | + file_name: planData.requirement.file_name || "", |
| 296 | + code, |
| 297 | + }, |
| 298 | + }, |
| 299 | + }), |
| 300 | + ); |
| 301 | + |
| 302 | + return synthesizeAnswerEvent.with({}); |
| 303 | + }); |
| 304 | + |
| 305 | + workflow.handle([synthesizeAnswerEvent], async () => { |
| 306 | + const { sendEvent } = getContext(); |
| 307 | + const { memory } = getStore(); |
| 308 | + |
| 309 | + const chatHistory = await memory.getMessages(); |
| 310 | + const messages = [ |
| 311 | + ...chatHistory, |
| 312 | + { |
| 313 | + role: "system" as const, |
| 314 | + content: ` |
| 315 | + You are a helpful assistant who is responsible for explaining the work to the user. |
| 316 | + Based on the conversation history, provide an answer to the user's question. |
| 317 | + The user has access to the code so avoid mentioning the whole code again in your response. |
| 318 | + `, |
| 319 | + }, |
| 320 | + ]; |
| 321 | + |
| 322 | + const responseStream = await llm.chat({ |
| 323 | + messages, |
| 324 | + stream: true, |
| 325 | + }); |
| 326 | + |
| 327 | + sendEvent( |
| 328 | + uiEvent.with({ |
| 329 | + type: "ui_event", |
| 330 | + data: { |
| 331 | + state: "completed", |
| 332 | + }, |
| 333 | + }), |
| 334 | + ); |
| 335 | + |
| 336 | + return stopEvent.with(responseStream); |
| 337 | + }); |
| 338 | + |
| 339 | + return workflow; |
| 340 | +} |
| 341 | + |
| 342 | +// Test the workflow |
| 343 | +const run = async () => { |
| 344 | + const workflow = getWorkflow({ |
| 345 | + userMsg: "Create a calculator app.", |
| 346 | + chatHistory: [], |
| 347 | + }); |
| 348 | + const { stream, sendEvent } = workflow.createContext(); |
| 349 | + sendEvent( |
| 350 | + startEvent.with({ |
| 351 | + userInput: "Create a calculator app.", |
| 352 | + chatHistory: [], |
| 353 | + }), |
| 354 | + ); |
| 355 | + for await (const event of stream) { |
| 356 | + console.log(event.data); |
| 357 | + // If the event is a stop event and contains an async iterable, consume and print its chunks |
| 358 | + if (event.data && typeof event.data[Symbol.asyncIterator] === "function") { |
| 359 | + for await (const chunk of event.data) { |
| 360 | + console.log(chunk.delta); |
| 361 | + } |
| 362 | + } |
| 363 | + } |
| 364 | +}; |
| 365 | + |
| 366 | +run(); |
0 commit comments