-
-
Notifications
You must be signed in to change notification settings - Fork 710
Implement example of streamText fallback using an AI data stream #2068
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,9 +1,16 @@ | ||
import { anthropic } from "@ai-sdk/anthropic"; | ||
import { openai } from "@ai-sdk/openai"; | ||
import { ai } from "@trigger.dev/sdk/ai"; | ||
import { logger, metadata, schemaTask, tasks, wait } from "@trigger.dev/sdk/v3"; | ||
import { logger, metadata, runs, schemaTask, tasks, wait } from "@trigger.dev/sdk/v3"; | ||
import { sql } from "@vercel/postgres"; | ||
import { streamText, TextStreamPart, tool } from "ai"; | ||
import { | ||
CoreMessage, | ||
createDataStream, | ||
DataStreamWriter, | ||
streamText, | ||
TextStreamPart, | ||
tool, | ||
} from "ai"; | ||
import { nanoid } from "nanoid"; | ||
import { z } from "zod"; | ||
import { sendSQLApprovalMessage } from "../lib/slack"; | ||
|
@@ -267,3 +274,347 @@ export const interruptibleChat = schemaTask({ | |
} | ||
}, | ||
}); | ||
|
||
async function createStreamWithProvider(params: { | ||
model: ReturnType<typeof anthropic> | ReturnType<typeof openai>; | ||
messages: CoreMessage[]; | ||
message_request_id: string; | ||
chat_id: string; | ||
userId?: string; | ||
}) { | ||
const { model, messages, message_request_id, chat_id, userId } = params; | ||
|
||
return new Promise<string>((resolve, reject) => { | ||
const dataStreamResponse = createDataStream({ | ||
execute: async (dataStream) => { | ||
const result = streamText({ | ||
model, | ||
system: "This is the system prompt, please be nice.", | ||
messages, | ||
maxSteps: 20, | ||
toolCallStreaming: true, | ||
onError: (error) => { | ||
logger.error("Error in chatStream task (streamText)", { | ||
error: error instanceof Error ? error.message : "Unknown error", | ||
stack: error instanceof Error ? error.stack : undefined, | ||
provider: model.provider, | ||
}); | ||
reject(error); | ||
}, | ||
onChunk: async (chunk) => { | ||
console.log("Chunk:", chunk); | ||
}, | ||
onFinish: async ({ response, reasoning }) => { | ||
metadata.flush(); | ||
logger.info("AI stream finished", { | ||
chat_id, | ||
userId, | ||
messageCount: response.messages.length, | ||
provider: model.provider, | ||
}); | ||
|
||
if (userId) { | ||
try { | ||
// Pretend to save messages | ||
await new Promise((resolve) => setTimeout(resolve, 1000)); | ||
|
||
logger.info("Successfully saved AI response messages", { | ||
chat_id, | ||
userId, | ||
messageCount: response.messages.length, | ||
message: JSON.stringify(response.messages, null, 2), | ||
provider: model.provider, | ||
}); | ||
} catch (error) { | ||
logger.error("Failed to save AI response messages", { | ||
error: error instanceof Error ? error.message : "Unknown error", | ||
stack: error instanceof Error ? error.stack : undefined, | ||
chat_id, | ||
userId, | ||
provider: model.provider, | ||
}); | ||
} | ||
} | ||
}, | ||
}); | ||
|
||
result.consumeStream(); | ||
|
||
result.mergeIntoDataStream(dataStream, { | ||
sendReasoning: true, | ||
}); | ||
}, | ||
onError: (error) => { | ||
logger.error("Error in chatStream task (createDataStream)", { | ||
error: error instanceof Error ? error.message : "Unknown error", | ||
stack: error instanceof Error ? error.stack : undefined, | ||
provider: model.provider, | ||
}); | ||
reject(error); | ||
return error instanceof Error ? error.message : String(error); | ||
}, | ||
}); | ||
|
||
// Process the stream | ||
(async () => { | ||
try { | ||
const stream = await metadata.stream("dataStream", dataStreamResponse); | ||
let fullResponse = ""; | ||
|
||
for await (const chunk of stream) { | ||
fullResponse += chunk; | ||
} | ||
|
||
// Only resolve if we haven't rejected due to an error | ||
resolve(fullResponse); | ||
} catch (error) { | ||
reject(error); | ||
} | ||
})(); | ||
}); | ||
} | ||
|
||
export const chatStream = schemaTask({ | ||
id: "chat-stream", | ||
description: "Stream data from the AI SDK and use tools", | ||
schema: z.object({ | ||
chat_id: z.string().default("chat"), | ||
messages: z.array(z.unknown()).describe("Array of chat messages"), | ||
message_request_id: z.string().describe("Unique identifier for the message request"), | ||
model: z.string().default("claude-3-7-sonnet-20250219"), | ||
userId: z.string().optional().describe("User ID for authentication"), | ||
existingProject: z.boolean().default(false).describe("Whether the project already exists"), | ||
}), | ||
Comment on lines
+381
to
+387
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Strengthen the Zod schema for
Consider validating the expected shape: - messages: z.array(z.unknown()).describe("Array of chat messages"),
+ messages: z.array(
+ z.object({
+ role: z.enum(["system", "assistant", "user", "tool"]),
+ content: z.string(),
+ })
+ ).describe("Array of chat messages"), This prevents malformed inputs from reaching the model and removes the need for 🤖 Prompt for AI Agents
|
||
machine: "large-2x", | ||
run: async ({ chat_id, messages, model, userId, message_request_id }) => { | ||
logger.info("Running chat stream", { | ||
chat_id, | ||
messages, | ||
model, | ||
userId, | ||
message_request_id, | ||
}); | ||
|
||
try { | ||
// First try with Anthropic | ||
return await createStreamWithProvider({ | ||
model: anthropic(model), | ||
messages: messages as CoreMessage[], | ||
message_request_id, | ||
chat_id, | ||
userId, | ||
}); | ||
} catch (error) { | ||
logger.info("Anthropic stream failed, falling back to OpenAI", { | ||
error: error instanceof Error ? error.message : "Unknown error", | ||
stack: error instanceof Error ? error.stack : undefined, | ||
chat_id, | ||
userId, | ||
message_request_id, | ||
}); | ||
|
||
try { | ||
// Fallback to OpenAI | ||
return await createStreamWithProvider({ | ||
model: openai("gpt-4"), | ||
messages: messages as CoreMessage[], | ||
message_request_id, | ||
chat_id, | ||
userId, | ||
}); | ||
} catch (fallbackError) { | ||
logger.error("Both Anthropic and OpenAI streams failed", { | ||
error: fallbackError instanceof Error ? fallbackError.message : "Unknown error", | ||
stack: fallbackError instanceof Error ? fallbackError.stack : undefined, | ||
chat_id, | ||
userId, | ||
message_request_id, | ||
}); | ||
throw fallbackError; | ||
} | ||
} | ||
}, | ||
}); | ||
|
||
export const chatStreamCaller = schemaTask({ | ||
id: "chat-stream-caller", | ||
description: "Call the chat stream", | ||
schema: z.object({ | ||
prompt: z.string().describe("The prompt to chat with the AI"), | ||
}), | ||
run: async ({ prompt }, { ctx }) => { | ||
const result = await chatStream.trigger({ | ||
messages: [ | ||
{ | ||
role: "user", | ||
content: prompt, | ||
}, | ||
], | ||
message_request_id: ctx.run.id, | ||
}); | ||
|
||
const stream = await runs.fetchStream(result.id, "dataStream"); | ||
|
||
for await (const chunk of stream) { | ||
console.log("Chunk:", chunk); | ||
} | ||
|
||
return result; | ||
}, | ||
}); | ||
|
||
export const streamFetcher = schemaTask({ | ||
id: "stream-fetcher", | ||
description: "Fetch a stream", | ||
schema: z.object({ | ||
runId: z.string().describe("The run ID to fetch the stream for"), | ||
streamId: z.string().describe("The stream ID to fetch"), | ||
}), | ||
run: async ({ runId, streamId }) => { | ||
const result = await runs.fetchStream(runId, streamId); | ||
|
||
for await (const chunk of result) { | ||
console.log("Chunk:", chunk); | ||
} | ||
|
||
return result; | ||
}, | ||
}); | ||
|
||
export const chatStream2 = schemaTask({ | ||
id: "chat-stream-2", | ||
description: "Stream data from the AI SDK and use tools", | ||
schema: z.object({ | ||
chat_id: z.string().default("chat"), | ||
messages: z.array(z.unknown()).describe("Array of chat messages"), | ||
message_request_id: z.string().describe("Unique identifier for the message request"), | ||
model: z.string().default("claude-3-7-sonnet-20250219"), | ||
userId: z.string().optional().describe("User ID for authentication"), | ||
existingProject: z.boolean().default(false).describe("Whether the project already exists"), | ||
}), | ||
machine: "large-2x", | ||
run: async ({ chat_id, messages, model, userId, message_request_id }) => { | ||
logger.info("Running chat stream", { | ||
chat_id, | ||
messages, | ||
model, | ||
userId, | ||
message_request_id, | ||
}); | ||
|
||
const dataStreamResponse = createDataStream({ | ||
execute: async (dataStream) => { | ||
streamTextWithModel( | ||
dataStream, | ||
anthropic(model), | ||
messages as CoreMessage[], | ||
chat_id, | ||
openai("gpt-4"), | ||
userId | ||
); | ||
}, | ||
}); | ||
|
||
const stream = await metadata.stream("dataStream", dataStreamResponse); | ||
|
||
for await (const chunk of stream) { | ||
console.log("Chunk:", chunk); | ||
} | ||
}, | ||
}); | ||
|
||
function streamTextWithModel( | ||
dataStream: DataStreamWriter, | ||
model: ReturnType<typeof anthropic> | ReturnType<typeof openai>, | ||
messages: CoreMessage[], | ||
chat_id: string, | ||
fallbackModel?: ReturnType<typeof anthropic> | ReturnType<typeof openai>, | ||
userId?: string | ||
) { | ||
const result = streamText({ | ||
model, | ||
system: "This is the system prompt, please be nice.", | ||
messages, | ||
maxSteps: 20, | ||
toolCallStreaming: true, | ||
onError: (error) => { | ||
logger.error("Error in chatStream task (streamText)", { | ||
error: error instanceof Error ? error.message : "Unknown error", | ||
stack: error instanceof Error ? error.stack : undefined, | ||
provider: model.provider, | ||
}); | ||
|
||
if (fallbackModel) { | ||
streamTextWithModel(dataStream, fallbackModel, messages, chat_id, undefined, userId); | ||
} | ||
}, | ||
onChunk: async (chunk) => { | ||
Comment on lines
+540
to
+551
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Fallback recursion can leak streams & duplicate output
Introduce an const controller = new AbortController();
...
onError: (err) => {
controller.abort(); // cancel first stream
if (fallbackModel) {
streamTextWithModel(dataStream, fallbackModel, messages, chat_id, undefined, userId);
}
},
...
const result = streamText({ ..., abortSignal: controller.signal, ... }); This guarantees a clean hand-over to the fallback provider. 🤖 Prompt for AI Agents
|
||
console.log("Chunk:", chunk); | ||
}, | ||
onFinish: async ({ response, reasoning }) => { | ||
metadata.flush(); | ||
logger.info("AI stream finished", { | ||
chat_id, | ||
userId, | ||
messageCount: response.messages.length, | ||
provider: model.provider, | ||
}); | ||
|
||
if (userId) { | ||
try { | ||
// Pretend to save messages | ||
await new Promise((resolve) => setTimeout(resolve, 1000)); | ||
|
||
logger.info("Successfully saved AI response messages", { | ||
chat_id, | ||
userId, | ||
messageCount: response.messages.length, | ||
message: JSON.stringify(response.messages, null, 2), | ||
provider: model.provider, | ||
}); | ||
} catch (error) { | ||
logger.error("Failed to save AI response messages", { | ||
error: error instanceof Error ? error.message : "Unknown error", | ||
stack: error instanceof Error ? error.stack : undefined, | ||
chat_id, | ||
userId, | ||
provider: model.provider, | ||
}); | ||
} | ||
} | ||
}, | ||
}); | ||
|
||
result.consumeStream(); | ||
|
||
result.mergeIntoDataStream(dataStream, { | ||
sendReasoning: true, | ||
}); | ||
} | ||
|
||
export const chatStreamCaller2 = schemaTask({ | ||
id: "chat-stream-caller-2", | ||
description: "Call the chat stream", | ||
schema: z.object({ | ||
prompt: z.string().describe("The prompt to chat with the AI"), | ||
}), | ||
run: async ({ prompt }, { ctx }) => { | ||
const result = await chatStream2.trigger({ | ||
messages: [ | ||
{ | ||
role: "user", | ||
content: prompt, | ||
}, | ||
], | ||
message_request_id: ctx.run.id, | ||
}); | ||
|
||
const stream = await runs.fetchStream(result.id, "dataStream"); | ||
|
||
for await (const chunk of stream) { | ||
console.log("Chunk:", chunk); | ||
} | ||
|
||
return result; | ||
}, | ||
}); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Guard against multiple
resolve
/reject
executions increateStreamWithProvider
streamText
andcreateDataStream
can emit bothonError
andonFinish
; additionally the outer IIFE that reads the stream can complete even afteronError
fired.Because the promise returned by
createStreamWithProvider
callsreject()
insideonError
and later callsresolve()
when the reader loop ends, the same promise may attempt to settle twice, which is illegal (the second call is silently ignored) and makes debugging harder.This single-settle guard ensures only the first outcome wins, preventing confusing “handled after rejection” warnings and potential memory leaks.
📝 Committable suggestion
🤖 Prompt for AI Agents