From d7dead56535f7a789ec278f9ddd4488725c2ddfd Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Mon, 22 Sep 2025 08:43:45 +0200 Subject: [PATCH 1/2] feat(cloudflare,vercel-edge): Add support for Vercel AI instrumentation --- .../suites/tracing/google-genai/index.ts | 61 +++++++++ .../suites/tracing/google-genai/mocks.ts | 128 ++++++++++++++++++ .../suites/tracing/google-genai/test.ts | 75 ++++++++++ .../tracing/google-genai/wrangler.jsonc | 6 + packages/cloudflare/src/index.ts | 1 + packages/core/src/index.ts | 1 + packages/vercel-edge/src/index.ts | 1 + 7 files changed, 273 insertions(+) create mode 100644 dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/index.ts create mode 100644 dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/mocks.ts create mode 100644 dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts create mode 100644 dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/wrangler.jsonc diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/index.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/index.ts new file mode 100644 index 000000000000..4759ec9a107b --- /dev/null +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/index.ts @@ -0,0 +1,61 @@ +import * as Sentry from '@sentry/cloudflare'; +import type { GoogleGenAIClient } from '@sentry/core'; +import { MockGoogleGenAI } from './mocks'; + +interface Env { + SENTRY_DSN: string; +} + +const mockClient = new MockGoogleGenAI({ + apiKey: 'mock-api-key', +}); + +const client: GoogleGenAIClient = Sentry.instrumentGoogleGenAIClient(mockClient); + +export default Sentry.withSentry( + (env: Env) => ({ + dsn: env.SENTRY_DSN, + tracesSampleRate: 1.0, + }), + { + async fetch(_request, _env, _ctx) { + // Test 1: chats.create and sendMessage flow + const chat = client.chats.create({ + model: 'gemini-1.5-pro', + config: { + temperature: 0.8, + topP: 0.9, + maxOutputTokens: 150, + }, + history: [ + { + role: 'user', + parts: [{ text: 'Hello, how are you?' }], + }, + ], + }); + + const chatResponse = await chat.sendMessage({ + message: 'Tell me a joke', + }); + + // Test 2: models.generateContent + const modelResponse = await client.models.generateContent({ + model: 'gemini-1.5-flash', + config: { + temperature: 0.7, + topP: 0.9, + maxOutputTokens: 100, + }, + contents: [ + { + role: 'user', + parts: [{ text: 'What is the capital of France?' }], + }, + ], + }); + + return new Response(JSON.stringify({ chatResponse, modelResponse })); + }, + }, +); diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/mocks.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/mocks.ts new file mode 100644 index 000000000000..22ccba15bc36 --- /dev/null +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/mocks.ts @@ -0,0 +1,128 @@ +import type { GoogleGenAIChat, GoogleGenAIClient, GoogleGenAIResponse } from '@sentry/core'; + +export class MockGoogleGenAI implements GoogleGenAIClient { + public models: { + generateContent: (...args: unknown[]) => Promise; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + generateContentStream: (...args: unknown[]) => Promise>; + }; + public chats: { + create: (...args: unknown[]) => GoogleGenAIChat; + }; + public apiKey: string; + + public constructor(config: { apiKey: string }) { + this.apiKey = config.apiKey; + + // models.generateContent functionality + this.models = { + generateContent: async (...args: unknown[]) => { + const params = args[0] as { model: string; contents?: unknown }; + // Simulate processing time + await new Promise(resolve => setTimeout(resolve, 10)); + + if (params.model === 'error-model') { + const error = new Error('Model not found'); + (error as unknown as { status: number }).status = 404; + (error as unknown as { headers: Record }).headers = { 'x-request-id': 'mock-request-123' }; + throw error; + } + + return { + candidates: [ + { + content: { + parts: [ + { + text: 'Hello from Google GenAI mock!', + }, + ], + role: 'model', + }, + finishReason: 'stop', + index: 0, + }, + ], + usageMetadata: { + promptTokenCount: 8, + candidatesTokenCount: 12, + totalTokenCount: 20, + }, + }; + }, + generateContentStream: async () => { + // Return a promise that resolves to an async generator + return (async function* (): AsyncGenerator { + yield { + candidates: [ + { + content: { + parts: [{ text: 'Streaming response' }], + role: 'model', + }, + finishReason: 'stop', + index: 0, + }, + ], + }; + })(); + }, + }; + + // chats.create implementation + this.chats = { + create: (...args: unknown[]) => { + const params = args[0] as { model: string; config?: Record }; + const model = params.model; + + return { + modelVersion: model, + sendMessage: async (..._messageArgs: unknown[]) => { + // Simulate processing time + await new Promise(resolve => setTimeout(resolve, 10)); + + return { + candidates: [ + { + content: { + parts: [ + { + text: 'This is a joke from the chat!', + }, + ], + role: 'model', + }, + finishReason: 'stop', + index: 0, + }, + ], + usageMetadata: { + promptTokenCount: 8, + candidatesTokenCount: 12, + totalTokenCount: 20, + }, + modelVersion: model, // Include model version in response + }; + }, + sendMessageStream: async () => { + // Return a promise that resolves to an async generator + return (async function* (): AsyncGenerator { + yield { + candidates: [ + { + content: { + parts: [{ text: 'Streaming chat response' }], + role: 'model', + }, + finishReason: 'stop', + index: 0, + }, + ], + }; + })(); + }, + }; + }, + }; + } +} diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts new file mode 100644 index 000000000000..3c36e832a17a --- /dev/null +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts @@ -0,0 +1,75 @@ +import { expect, it } from 'vitest'; +import { createRunner } from '../../../runner'; + +// These tests are not exhaustive because the instrumentation is +// already tested in the node integration tests and we merely +// want to test that the instrumentation does not break in our +// cloudflare SDK. + +it('traces Google GenAI chat creation and message sending', async () => { + const runner = createRunner(__dirname) + .ignore('event') + .expect(envelope => { + const transactionEvent = envelope[1]?.[0]?.[1] as any; + + expect(transactionEvent.transaction).toBe('GET /'); + expect(transactionEvent.spans).toEqual( + expect.arrayContaining([ + // First span - chats.create + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'gemini-1.5-pro', + 'gen_ai.request.temperature': 0.8, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 150, + }), + description: 'chat gemini-1.5-pro create', + op: 'gen_ai.chat', + origin: 'auto.ai.google_genai', + }), + // Second span - chat.sendMessage + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'gemini-1.5-pro', + 'gen_ai.usage.input_tokens': 8, + 'gen_ai.usage.output_tokens': 12, + 'gen_ai.usage.total_tokens': 20, + }), + description: 'chat gemini-1.5-pro', + op: 'gen_ai.chat', + origin: 'auto.ai.google_genai', + }), + // Third span - models.generateContent + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'models', + 'sentry.op': 'gen_ai.models', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'gemini-1.5-flash', + 'gen_ai.request.temperature': 0.7, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 100, + 'gen_ai.usage.input_tokens': 8, + 'gen_ai.usage.output_tokens': 12, + 'gen_ai.usage.total_tokens': 20, + }), + description: 'models gemini-1.5-flash', + op: 'gen_ai.models', + origin: 'auto.ai.google_genai', + }), + ]), + ); + }) + .start(); + await runner.makeRequest('get', '/'); + await runner.completed(); +}); diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/wrangler.jsonc b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/wrangler.jsonc new file mode 100644 index 000000000000..d6be01281f0c --- /dev/null +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/wrangler.jsonc @@ -0,0 +1,6 @@ +{ + "name": "worker-name", + "compatibility_date": "2025-06-17", + "main": "index.ts", + "compatibility_flags": ["nodejs_compat"], +} diff --git a/packages/cloudflare/src/index.ts b/packages/cloudflare/src/index.ts index 5a35a994b641..9c7738e1522b 100644 --- a/packages/cloudflare/src/index.ts +++ b/packages/cloudflare/src/index.ts @@ -70,6 +70,7 @@ export { // eslint-disable-next-line deprecation/deprecation inboundFiltersIntegration, instrumentOpenAiClient, + instrumentGoogleGenAIClient, instrumentAnthropicAiClient, eventFiltersIntegration, linkedErrorsIntegration, diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 631181ccacc8..8a5566948f6e 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -134,6 +134,7 @@ export { instrumentAnthropicAiClient } from './utils/anthropic-ai'; export { ANTHROPIC_AI_INTEGRATION_NAME } from './utils/anthropic-ai/constants'; export { instrumentGoogleGenAIClient } from './utils/google-genai'; export { GOOGLE_GENAI_INTEGRATION_NAME } from './utils/google-genai/constants'; +export type { GoogleGenAIResponse } from './utils/google-genai/types'; export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './utils/openai/types'; export type { AnthropicAiClient, diff --git a/packages/vercel-edge/src/index.ts b/packages/vercel-edge/src/index.ts index 032e39f1b203..d8362ff31c98 100644 --- a/packages/vercel-edge/src/index.ts +++ b/packages/vercel-edge/src/index.ts @@ -70,6 +70,7 @@ export { // eslint-disable-next-line deprecation/deprecation inboundFiltersIntegration, instrumentOpenAiClient, + instrumentGoogleGenAIClient, instrumentAnthropicAiClient, eventFiltersIntegration, linkedErrorsIntegration, From 097020e7bcc1623a6526153caaafa8cc34cc7b85 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Mon, 22 Sep 2025 08:58:02 +0200 Subject: [PATCH 2/2] fix comment --- packages/core/src/utils/google-genai/index.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/core/src/utils/google-genai/index.ts b/packages/core/src/utils/google-genai/index.ts index cdad221ac60f..13079cb34b49 100644 --- a/packages/core/src/utils/google-genai/index.ts +++ b/packages/core/src/utils/google-genai/index.ts @@ -292,10 +292,10 @@ function createDeepProxy(target: T, currentPath = '', options: * * @example * ```typescript - * import { GoogleGenerativeAI } from '@google/genai'; + * import { GoogleGenAI } from '@google/genai'; * import { instrumentGoogleGenAIClient } from '@sentry/core'; * - * const genAI = new GoogleGenerativeAI({ apiKey: process.env.GOOGLE_GENAI_API_KEY }); + * const genAI = new GoogleGenAI({ apiKey: process.env.GOOGLE_GENAI_API_KEY }); * const instrumentedClient = instrumentGoogleGenAIClient(genAI); * * // Now both chats.create and sendMessage will be instrumented