Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat (rsc): add streamUI onFinish callback #1920

Merged
merged 13 commits into from
Jun 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/violet-horses-accept.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'ai': patch
---

feat (rsc): add streamUI onFinish callback
71 changes: 71 additions & 0 deletions content/docs/07-reference/ai-sdk-rsc/01-stream-ui.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -333,6 +333,77 @@ A helper function to create a streamable UI from LLM providers. This function is
},
],
},
{
name: 'onFinish',
type: '(result: OnFinishResult) => void',
isOptional: true,
description:
'Callback that is called when the LLM response and all request tool executions (for tools that have a `generate` function) are finished.',
properties: [
{
type: 'OnFinishResult',
parameters: [
{
name: 'usage',
type: 'TokenUsage',
description: 'The token usage of the generated text.',
properties: [
{
type: 'TokenUsage',
parameters: [
{
name: 'promptTokens',
type: 'number',
description: 'The total number of tokens in the prompt.',
},
{
name: 'completionTokens',
type: 'number',
description:
'The total number of tokens in the completion.',
},
{
name: 'totalTokens',
type: 'number',
description: 'The total number of tokens generated.',
},
],
},
],
},
{
name: 'value',
type: 'ReactNode',
description: 'The final ui node that was generated.',
},
{
name: 'warnings',
type: 'Warning[] | undefined',
description:
'Warnings from the model provider (e.g. unsupported settings).',
},
{
name: 'rawResponse',
type: 'RawResponse',
description: 'Optional raw response data.',
properties: [
{
type: 'RawResponse',
parameters: [
{
name: 'header',
optional: true,
type: 'Record<string, string>',
description: 'Response headers.',
},
],
},
],
},
],
},
],
},
]}
/>

Expand Down
156 changes: 156 additions & 0 deletions content/examples/01-next-app/05-interface/03-token-usage.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
---
title: Recording Token Usage
description: Examples of how to record token usage when streaming user interfaces.
---

# Recording Token Usage

When you're streaming structured data with [`streamUI`](/docs/reference/ai-sdk-rsc/stream-ui),
you may want to record the token usage for billing purposes.

## `onFinish` Callback

You can use the `onFinish` callback to record token usage.
It is called when the stream is finished.

```tsx filename='app/page.tsx'
'use client';

import { useState } from 'react';
import { ClientMessage } from './actions';
import { useActions, useUIState } from 'ai/rsc';
import { generateId } from 'ai';

// Force the page to be dynamic and allow streaming responses up to 30 seconds
export const dynamic = 'force-dynamic';
export const maxDuration = 30;

export default function Home() {
const [input, setInput] = useState<string>('');
const [conversation, setConversation] = useUIState();
const { continueConversation } = useActions();

return (
<div>
<div>
{conversation.map((message: ClientMessage) => (
<div key={message.id}>
{message.role}: {message.display}
</div>
))}
</div>

<div>
<input
type="text"
value={input}
onChange={event => {
setInput(event.target.value);
}}
/>
<button
onClick={async () => {
setConversation((currentConversation: ClientMessage[]) => [
...currentConversation,
{ id: generateId(), role: 'user', display: input },
]);

const message = await continueConversation(input);

setConversation((currentConversation: ClientMessage[]) => [
...currentConversation,
message,
]);
}}
>
Send Message
</button>
</div>
</div>
);
}
```

## Server

```tsx filename='app/actions.tsx' highlight={"57-63"}
'use server';

import { createAI, getMutableAIState, streamUI } from 'ai/rsc';
import { openai } from '@ai-sdk/openai';
import { ReactNode } from 'react';
import { z } from 'zod';
import { generateId } from 'ai';

export interface ServerMessage {
role: 'user' | 'assistant';
content: string;
}

export interface ClientMessage {
id: string;
role: 'user' | 'assistant';
display: ReactNode;
}

export async function continueConversation(
input: string,
): Promise<ClientMessage> {
'use server';

const history = getMutableAIState();

const result = await streamUI({
model: openai('gpt-3.5-turbo'),
messages: [...history.get(), { role: 'user', content: input }],
text: ({ content, done }) => {
if (done) {
history.done((messages: ServerMessage[]) => [
...messages,
{ role: 'assistant', content },
]);
}

return <div>{content}</div>;
},
tools: {
deploy: {
description: 'Deploy repository to vercel',
parameters: z.object({
repositoryName: z
.string()
.describe('The name of the repository, example: vercel/ai-chatbot'),
}),
generate: async function* ({ repositoryName }) {
yield <div>Cloning repository {repositoryName}...</div>; // [!code highlight:5]
await new Promise(resolve => setTimeout(resolve, 3000));
yield <div>Building repository {repositoryName}...</div>;
await new Promise(resolve => setTimeout(resolve, 2000));
return <div>{repositoryName} deployed!</div>;
},
},
},
onFinish: ({ usage }) => {
const { promptTokens, completionTokens, totalTokens } = usage;
// your own logic, e.g. for saving the chat history or recording usage
console.log('Prompt tokens:', promptTokens);
console.log('Completion tokens:', completionTokens);
console.log('Total tokens:', totalTokens);
},
});

return {
id: generateId(),
role: 'assistant',
display: result.value,
};
}

export const AI = createAI<ServerMessage[], ClientMessage[]>({
actions: {
continueConversation,
},
initialAIState: [],
initialUIState: [],
});
```
119 changes: 119 additions & 0 deletions examples/next-openai/app/stream-ui/actions.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
import { openai } from '@ai-sdk/openai';
import { CoreMessage, generateId } from 'ai';
import {
createAI,
createStreamableValue,
getMutableAIState as $getMutableAIState,
streamUI,
} from 'ai/rsc';
import { Message, BotMessage } from './message';
import { z } from 'zod';

type AIProviderNoActions = ReturnType<typeof createAI<AIState, UIState>>;
// typed wrapper *without* actions defined to avoid circular dependencies
const getMutableAIState = $getMutableAIState<AIProviderNoActions>;

// mock function to fetch weather data
const fetchWeatherData = async (location: string) => {
await new Promise(resolve => setTimeout(resolve, 1000));
return { temperature: '72°F' };
};

export async function submitUserMessage(content: string) {
'use server';

const aiState = getMutableAIState();

aiState.update({
...aiState.get(),
messages: [
...aiState.get().messages,
{ id: generateId(), role: 'user', content },
],
});

let textStream: undefined | ReturnType<typeof createStreamableValue<string>>;
let textNode: React.ReactNode;

const result = await streamUI({
model: openai('gpt-4-turbo'),
initial: <Message role="assistant">Working on that...</Message>,
system: 'You are a weather assistant.',
messages: aiState
.get()
.messages.map(({ role, content }) => ({ role, content } as CoreMessage)),

text: ({ content, done, delta }) => {
if (!textStream) {
textStream = createStreamableValue('');
textNode = <BotMessage textStream={textStream.value} />;
}

if (done) {
textStream.done();
aiState.update({
...aiState.get(),
messages: [
...aiState.get().messages,
{ id: generateId(), role: 'assistant', content },
],
});
} else {
textStream.append(delta);
}

return textNode;
},
tools: {
get_current_weather: {
description: 'Get the current weather',
parameters: z.object({
location: z.string(),
}),
generate: async function* ({ location }) {
yield (
<Message role="assistant">Loading weather for {location}</Message>
);
const { temperature } = await fetchWeatherData(location);
return (
<Message role="assistant">
<span>
The temperature in {location} is{' '}
<span className="font-semibold">{temperature}</span>
</span>
</Message>
);
},
},
},
onFinish: event => {
// your own logic, e.g. for saving the chat history or recording usage
console.log(`[onFinish]: ${JSON.stringify(event, null, 2)}`);
},
});

return {
id: generateId(),
display: result.value,
};
}

export type ClientMessage = CoreMessage & {
id: string;
};

export type AIState = {
chatId: string;
messages: ClientMessage[];
};

export type UIState = {
id: string;
display: React.ReactNode;
}[];

export const AI = createAI({
actions: { submitUserMessage },
initialUIState: [] as UIState,
initialAIState: { chatId: generateId(), messages: [] } as AIState,
});
5 changes: 5 additions & 0 deletions examples/next-openai/app/stream-ui/layout.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import { AI } from './actions';

export default function Layout({ children }: { children: React.ReactNode }) {
return <AI>{children}</AI>;
}
25 changes: 25 additions & 0 deletions examples/next-openai/app/stream-ui/message.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
'use client';

import { StreamableValue, useStreamableValue } from 'ai/rsc';

export function BotMessage({ textStream }: { textStream: StreamableValue }) {
const [text] = useStreamableValue(textStream);
return <Message role="assistant">{text}</Message>;
}

export function Message({
role,
children,
}: {
role: string;
children: React.ReactNode;
}) {
return (
<div className="flex flex-col gap-1 border-b p-2">
<div className="flex flex-row justify-between">
<div className="text-sm text-zinc-500">{role}</div>
</div>
{children}
</div>
);
}
Loading
Loading