Skip to content

Commit 820b86f

Browse files
feat: use LlamaIndexAdapter
1 parent 8c1087f commit 820b86f

File tree

3 files changed

+46
-78
lines changed

3 files changed

+46
-78
lines changed

templates/components/llamaindex/typescript/streaming/stream.ts

Lines changed: 0 additions & 57 deletions
This file was deleted.

templates/types/streaming/express/src/controllers/chat.controller.ts

Lines changed: 23 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { JSONValue, Message, StreamData, streamToResponse } from "ai";
1+
import { JSONValue, LlamaIndexAdapter, Message, StreamData } from "ai";
22
import { Request, Response } from "express";
33
import { ChatMessage, Settings } from "llamaindex";
44
import { createChatEngine } from "./engine/chat";
@@ -10,7 +10,7 @@ import {
1010
createCallbackManager,
1111
createStreamTimeout,
1212
} from "./llamaindex/streaming/events";
13-
import { LlamaIndexStream } from "./llamaindex/streaming/stream";
13+
import { generateNextQuestions } from "./llamaindex/streaming/suggestion";
1414

1515
export const chat = async (req: Request, res: Response) => {
1616
// Init Vercel AI StreamData and timeout
@@ -56,24 +56,37 @@ export const chat = async (req: Request, res: Response) => {
5656

5757
// Setup callbacks
5858
const callbackManager = createCallbackManager(vercelStreamData);
59+
const chatHistory: ChatMessage[] = messages as ChatMessage[];
5960

6061
// Calling LlamaIndex's ChatEngine to get a streamed response
6162
const response = await Settings.withCallbackManager(callbackManager, () => {
6263
return chatEngine.chat({
6364
message: userMessageContent,
64-
chatHistory: messages as ChatMessage[],
65+
chatHistory,
6566
stream: true,
6667
});
6768
});
6869

69-
// Return a stream, which can be consumed by the Vercel/AI client
70-
const stream = LlamaIndexStream(
71-
response,
72-
vercelStreamData,
73-
messages as ChatMessage[],
74-
);
70+
const onFinal = (content: string) => {
71+
chatHistory.push({ role: "assistant", content: content });
72+
generateNextQuestions(chatHistory)
73+
.then((questions: string[]) => {
74+
if (questions.length > 0) {
75+
vercelStreamData.appendMessageAnnotation({
76+
type: "suggested_questions",
77+
data: questions,
78+
});
79+
}
80+
})
81+
.finally(() => {
82+
vercelStreamData.close();
83+
});
84+
};
7585

76-
return streamToResponse(stream, res, {}, vercelStreamData);
86+
return LlamaIndexAdapter.toDataStreamResponse(response, {
87+
data: vercelStreamData,
88+
callbacks: { onFinal },
89+
});
7790
} catch (error) {
7891
console.error("[LlamaIndex]", error);
7992
return res.status(500).json({

templates/types/streaming/nextjs/app/api/chat/route.ts

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { initObservability } from "@/app/observability";
2-
import { JSONValue, Message, StreamData, StreamingTextResponse } from "ai";
2+
import { JSONValue, LlamaIndexAdapter, Message, StreamData } from "ai";
33
import { ChatMessage, Settings } from "llamaindex";
44
import { NextRequest, NextResponse } from "next/server";
55
import { createChatEngine } from "./engine/chat";
@@ -12,7 +12,7 @@ import {
1212
createCallbackManager,
1313
createStreamTimeout,
1414
} from "./llamaindex/streaming/events";
15-
import { LlamaIndexStream } from "./llamaindex/streaming/stream";
15+
import { generateNextQuestions } from "./llamaindex/streaming/suggestion";
1616

1717
initObservability();
1818
initSettings();
@@ -69,25 +69,37 @@ export async function POST(request: NextRequest) {
6969

7070
// Setup callbacks
7171
const callbackManager = createCallbackManager(vercelStreamData);
72+
const chatHistory: ChatMessage[] = messages as ChatMessage[];
7273

7374
// Calling LlamaIndex's ChatEngine to get a streamed response
7475
const response = await Settings.withCallbackManager(callbackManager, () => {
7576
return chatEngine.chat({
7677
message: userMessageContent,
77-
chatHistory: messages as ChatMessage[],
78+
chatHistory,
7879
stream: true,
7980
});
8081
});
8182

82-
// Transform LlamaIndex stream to Vercel/AI format
83-
const stream = LlamaIndexStream(
84-
response,
85-
vercelStreamData,
86-
messages as ChatMessage[],
87-
);
83+
const onFinal = (content: string) => {
84+
chatHistory.push({ role: "assistant", content: content });
85+
generateNextQuestions(chatHistory)
86+
.then((questions: string[]) => {
87+
if (questions.length > 0) {
88+
vercelStreamData.appendMessageAnnotation({
89+
type: "suggested_questions",
90+
data: questions,
91+
});
92+
}
93+
})
94+
.finally(() => {
95+
vercelStreamData.close();
96+
});
97+
};
8898

89-
// Return a StreamingTextResponse, which can be consumed by the Vercel/AI client
90-
return new StreamingTextResponse(stream, {}, vercelStreamData);
99+
return LlamaIndexAdapter.toDataStreamResponse(response, {
100+
data: vercelStreamData,
101+
callbacks: { onFinal },
102+
});
91103
} catch (error) {
92104
console.error("[LlamaIndex]", error);
93105
return NextResponse.json(

0 commit comments

Comments
 (0)