Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions packages/sdk/server-ai/examples/vercel-ai/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,10 @@
"author": "LaunchDarkly",
"license": "Apache-2.0",
"dependencies": {
"@ai-sdk/openai": "2.0.30",
"@launchdarkly/node-server-sdk": "9.7.1",
"@launchdarkly/server-sdk-ai": "0.12.3",
"@ai-sdk/openai": "^2.0.30",
"@launchdarkly/node-server-sdk": "^9.7.1",
"@launchdarkly/server-sdk-ai": "^0.12.3",
"@launchdarkly/server-sdk-ai-vercel": "^0.1.2",
"ai": "5.0.0",
"zod": "^3.23.8"
},
Expand Down
45 changes: 37 additions & 8 deletions packages/sdk/server-ai/examples/vercel-ai/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import { generateText, streamText } from 'ai';

import { init, type LDClient, type LDContext } from '@launchdarkly/node-server-sdk';
import { initAi } from '@launchdarkly/server-sdk-ai';
import { VercelProvider } from '@launchdarkly/server-sdk-ai-vercel';

// Environment variables
const sdkKey = process.env.LAUNCHDARKLY_SDK_KEY ?? '';
Expand Down Expand Up @@ -41,31 +42,59 @@ async function main() {
// Get AI configuration from LaunchDarkly
const aiConfig = await aiClient.config(aiConfigKey, context, { model: { name: 'gpt-4' } });

if (!aiConfig.enabled) {
if (!aiConfig.enabled || !aiConfig.tracker) {
console.log('*** AI configuration is not enabled');
process.exit(0);
}

console.log('Using model:', aiConfig.model?.name);

// Example of using generateText (non-streaming)
console.log('\n*** Generating text:');
try {
const userMessage = {
role: 'user' as const,
content: 'What can you help me with?',
};

const result = await aiConfig.tracker.trackVercelAISDKGenerateTextMetrics(() =>
generateText(aiConfig.toVercelAISDK(openai, { nonInterpolatedMessages: [userMessage] })),
// Example of using generateText (non-streaming)
console.log('\n*** Generating text:');

// Convert config to Vercel AI SDK format
const vercelConfig = VercelProvider.toVercelAISDK(aiConfig, openai, {
nonInterpolatedMessages: [userMessage],
});

// Call the model and track metrics for the ai config
const result = await aiConfig.tracker.trackMetricsOf(
VercelProvider.getAIMetricsFromResponse,
() => generateText(vercelConfig),
);

console.log('Response:', result.text);
} catch (err) {
console.error('Error:', err);
}

// Example 2: Using streamText with trackStreamMetricsOf (streaming)
try {
const userMessage = {
role: 'user' as const,
content: 'Count from 1 to 5.',
};

process.stdout.write('Streaming Response: ');
const streamResult = aiConfig.tracker.trackVercelAISDKStreamTextMetrics(() =>
streamText(aiConfig.toVercelAISDK(openai, { nonInterpolatedMessages: [userMessage] })),
// Example of using generateText (non-streaming)
console.log('\n*** Streaming text:');
// Convert config to Vercel AI SDK format
const vercelConfig = VercelProvider.toVercelAISDK(aiConfig, openai, {
nonInterpolatedMessages: [userMessage],
});

// Stream is returned immediately (synchronously), metrics tracked in background
const streamResult = aiConfig.tracker.trackStreamMetricsOf(
() => streamText(vercelConfig),
VercelProvider.getAIMetricsFromStream,
);

// Consume the stream immediately - no await needed before this!
// eslint-disable-next-line no-restricted-syntax
for await (const textPart of streamResult.textStream) {
process.stdout.write(textPart);
Expand Down