Skip to content

Commit fe90a7e

Browse files
authored
chore: bump ai v4 (#449)
1 parent 02b2473 commit fe90a7e

File tree

8 files changed

+56
-33
lines changed

8 files changed

+56
-33
lines changed

.changeset/chilly-eels-retire.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"create-llama": patch
3+
---
4+
5+
chore: bump ai v4

templates/components/multiagent/typescript/express/chat.controller.ts

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { Message, streamToResponse } from "ai";
1+
import { LlamaIndexAdapter, Message } from "ai";
22
import { Request, Response } from "express";
33
import {
44
convertToChatHistory,
@@ -28,7 +28,20 @@ export const chat = async (req: Request, res: Response) => {
2828
const { stream, dataStream } =
2929
await createStreamFromWorkflowContext(context);
3030

31-
return streamToResponse(stream, res, {}, dataStream);
31+
const streamResponse = LlamaIndexAdapter.toDataStreamResponse(stream, {
32+
data: dataStream,
33+
});
34+
if (streamResponse.body) {
35+
const reader = streamResponse.body.getReader();
36+
while (true) {
37+
const { done, value } = await reader.read();
38+
if (done) {
39+
res.end();
40+
return;
41+
}
42+
res.write(value);
43+
}
44+
}
3245
} catch (error) {
3346
console.error("[LlamaIndex]", error);
3447
return res.status(500).json({

templates/components/multiagent/typescript/nextjs/route.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { initObservability } from "@/app/observability";
2-
import { StreamingTextResponse, type Message } from "ai";
2+
import { LlamaIndexAdapter, type Message } from "ai";
33
import { NextRequest, NextResponse } from "next/server";
44
import { initSettings } from "./engine/settings";
55
import {
@@ -41,9 +41,9 @@ export async function POST(request: NextRequest) {
4141
});
4242
const { stream, dataStream } =
4343
await createStreamFromWorkflowContext(context);
44-
45-
// Return the two streams in one response
46-
return new StreamingTextResponse(stream, {}, dataStream);
44+
return LlamaIndexAdapter.toDataStreamResponse(stream, {
45+
data: dataStream,
46+
});
4747
} catch (error) {
4848
console.error("[LlamaIndex]", error);
4949
return NextResponse.json(

templates/components/multiagent/typescript/workflow/stream.ts

Lines changed: 10 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -3,31 +3,26 @@ import {
33
WorkflowContext,
44
WorkflowEvent,
55
} from "@llamaindex/workflow";
6-
import {
7-
StreamData,
8-
createStreamDataTransformer,
9-
trimStartOfStreamHelper,
10-
} from "ai";
11-
import { ChatResponseChunk } from "llamaindex";
6+
import { StreamData } from "ai";
7+
import { ChatResponseChunk, EngineResponse } from "llamaindex";
8+
import { ReadableStream } from "stream/web";
129
import { AgentRunEvent } from "./type";
1310

1411
export async function createStreamFromWorkflowContext<Input, Output, Context>(
1512
context: WorkflowContext<Input, Output, Context>,
16-
): Promise<{ stream: ReadableStream<string>; dataStream: StreamData }> {
17-
const trimStartOfStream = trimStartOfStreamHelper();
13+
): Promise<{ stream: ReadableStream<EngineResponse>; dataStream: StreamData }> {
1814
const dataStream = new StreamData();
19-
const encoder = new TextEncoder();
2015
let generator: AsyncGenerator<ChatResponseChunk> | undefined;
2116

2217
const closeStreams = (controller: ReadableStreamDefaultController) => {
2318
controller.close();
2419
dataStream.close();
2520
};
2621

27-
const mainStream = new ReadableStream({
22+
const stream = new ReadableStream<EngineResponse>({
2823
async start(controller) {
2924
// Kickstart the stream by sending an empty string
30-
controller.enqueue(encoder.encode(""));
25+
controller.enqueue({ delta: "" } as EngineResponse);
3126
},
3227
async pull(controller) {
3328
while (!generator) {
@@ -46,17 +41,14 @@ export async function createStreamFromWorkflowContext<Input, Output, Context>(
4641
closeStreams(controller);
4742
return;
4843
}
49-
const text = trimStartOfStream(chunk.delta ?? "");
50-
if (text) {
51-
controller.enqueue(encoder.encode(text));
44+
const delta = chunk.delta ?? "";
45+
if (delta) {
46+
controller.enqueue({ delta } as EngineResponse);
5247
}
5348
},
5449
});
5550

56-
return {
57-
stream: mainStream.pipeThrough(createStreamDataTransformer()),
58-
dataStream,
59-
};
51+
return { stream, dataStream };
6052
}
6153

6254
function handleEvent(

templates/types/streaming/express/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
"lint": "eslint ."
1717
},
1818
"dependencies": {
19-
"ai": "3.3.42",
19+
"ai": "4.0.3",
2020
"cors": "^2.8.5",
2121
"dotenv": "^16.3.1",
2222
"duck-duck-scrape": "^2.2.5",

templates/types/streaming/express/src/controllers/chat.controller.ts

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { LlamaIndexAdapter, Message, StreamData, streamToResponse } from "ai";
1+
import { LlamaIndexAdapter, Message, StreamData } from "ai";
22
import { Request, Response } from "express";
33
import { ChatMessage, Settings } from "llamaindex";
44
import { createChatEngine } from "./engine/chat";
@@ -43,7 +43,7 @@ export const chat = async (req: Request, res: Response) => {
4343
});
4444
});
4545

46-
const onFinal = (content: string) => {
46+
const onCompletion = (content: string) => {
4747
chatHistory.push({ role: "assistant", content: content });
4848
generateNextQuestions(chatHistory)
4949
.then((questions: string[]) => {
@@ -59,8 +59,21 @@ export const chat = async (req: Request, res: Response) => {
5959
});
6060
};
6161

62-
const stream = LlamaIndexAdapter.toDataStream(response, { onFinal });
63-
return streamToResponse(stream, res, {}, vercelStreamData);
62+
const streamResponse = LlamaIndexAdapter.toDataStreamResponse(response, {
63+
data: vercelStreamData,
64+
callbacks: { onCompletion },
65+
});
66+
if (streamResponse.body) {
67+
const reader = streamResponse.body.getReader();
68+
while (true) {
69+
const { done, value } = await reader.read();
70+
if (done) {
71+
res.end();
72+
return;
73+
}
74+
res.write(value);
75+
}
76+
}
6477
} catch (error) {
6578
console.error("[LlamaIndex]", error);
6679
return res.status(500).json({

templates/types/streaming/nextjs/app/api/chat/route.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ export async function POST(request: NextRequest) {
5656
});
5757
});
5858

59-
const onFinal = (content: string) => {
59+
const onCompletion = (content: string) => {
6060
chatHistory.push({ role: "assistant", content: content });
6161
generateNextQuestions(chatHistory)
6262
.then((questions: string[]) => {
@@ -74,7 +74,7 @@ export async function POST(request: NextRequest) {
7474

7575
return LlamaIndexAdapter.toDataStreamResponse(response, {
7676
data: vercelStreamData,
77-
callbacks: { onFinal },
77+
callbacks: { onCompletion },
7878
});
7979
} catch (error) {
8080
console.error("[LlamaIndex]", error);

templates/types/streaming/nextjs/package.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@
1616
"@radix-ui/react-select": "^2.1.1",
1717
"@radix-ui/react-slot": "^1.0.2",
1818
"@radix-ui/react-tabs": "^1.1.0",
19-
"@llamaindex/chat-ui": "0.0.9",
20-
"ai": "3.4.33",
19+
"@llamaindex/chat-ui": "0.0.11",
20+
"ai": "4.0.3",
2121
"ajv": "^8.12.0",
2222
"class-variance-authority": "^0.7.0",
2323
"clsx": "^2.1.1",

0 commit comments

Comments
 (0)