Skip to content

Commit 7333e18

Browse files
authored
feat(core): Instrument LangChain AI (#17955)
This PR adds automatic instrumentation for LangChain chat clients in Node SDK, we cover most used providers mentioned in https://python.langchain.com/docs/integrations/chat/. **What's added?** TLDR; a [LangChain Callback Handler ](https://js.langchain.com/docs/concepts/callbacks/) that: - Creates a stateful callback handler that tracks LangChain lifecycle events - Handles LLM/Chat Model events (start, end, error, streaming) - Handles Chain events (start, end, error) - Handles Tool events (start, end, error) - Extracts and normalizes request/respo **How it works?** 1. **Module Patching**: When a LangChain provider package is loaded (e.g., `@langchain/anthropic`), the instrumentation: - Finds the chat model class (e.g., `ChatAnthropic`) - Wraps the `invoke`, `stream`, and `batch` methods on the prototype - Uses a Proxy to intercept method calls 2. **Callback Injection**: When a LangChain method is called: - The wrapper intercepts the call - Augments the `options.callbacks` array with Sentry's callback handler - Calls the original method with the augmented callbacks The integration is **enabled by default** when you initialize Sentry in Node.js: ```javascript import * as Sentry from '@sentry/node'; import { ChatAnthropic } from '@langchain/anthropic'; Sentry.init({ dsn: 'your-dsn', tracesSampleRate: 1.0, sendDefaultPii: true, // Enable to record inputs/outputs }); // LangChain calls are automatically instrumented const model = new ChatAnthropic({ model: 'claude-3-5-sonnet-20241022', }); await model.invoke('What is the capital of France?'); ``` You can configure what data is recorded: ```javascript Sentry.init({ integrations: [ Sentry.langChainIntegration({ recordInputs: true, // Record prompts/messages recordOutputs: true, // Record responses }) ], }); ``` Note: We need to disable integrations for AI providers that LangChain use to avoid duplicate spans, this will be handled in a follow up PR.
1 parent cefcdbc commit 7333e18

File tree

22 files changed

+1851
-18
lines changed

22 files changed

+1851
-18
lines changed

.size-limit.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ module.exports = [
240240
import: createImport('init'),
241241
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
242242
gzip: true,
243-
limit: '156 KB',
243+
limit: '157 KB',
244244
},
245245
{
246246
name: '@sentry/node - without tracing',

dev-packages/node-integration-tests/package.json

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@
2727
"@aws-sdk/client-s3": "^3.552.0",
2828
"@google/genai": "^1.20.0",
2929
"@growthbook/growthbook": "^1.6.1",
30+
"@langchain/anthropic": "^0.3.10",
31+
"@langchain/core": "^0.3.28",
3032
"@hapi/hapi": "^21.3.10",
3133
"@hono/node-server": "^1.19.4",
3234
"@nestjs/common": "^11",
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
// Filter out Anthropic integration to avoid duplicate spans with LangChain
11+
integrations: integrations => integrations.filter(integration => integration.name !== 'Anthropic_AI'),
12+
beforeSendTransaction: event => {
13+
// Filter out mock express server transactions
14+
if (event.transaction.includes('/v1/messages')) {
15+
return null;
16+
}
17+
return event;
18+
},
19+
});
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
// Filter out Anthropic integration to avoid duplicate spans with LangChain
11+
integrations: integrations => integrations.filter(integration => integration.name !== 'Anthropic_AI'),
12+
beforeSendTransaction: event => {
13+
// Filter out mock express server transactions
14+
if (event.transaction.includes('/v1/messages')) {
15+
return null;
16+
}
17+
return event;
18+
},
19+
});
Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
import { ChatAnthropic } from '@langchain/anthropic';
2+
import * as Sentry from '@sentry/node';
3+
import express from 'express';
4+
5+
function startMockAnthropicServer() {
6+
const app = express();
7+
app.use(express.json());
8+
9+
app.post('/v1/messages', (req, res) => {
10+
const model = req.body.model;
11+
12+
// Simulate tool call response
13+
res.json({
14+
id: 'msg_tool_test_123',
15+
type: 'message',
16+
role: 'assistant',
17+
model: model,
18+
content: [
19+
{
20+
type: 'text',
21+
text: 'Let me check the weather for you.',
22+
},
23+
{
24+
type: 'tool_use',
25+
id: 'toolu_01A09q90qw90lq917835lq9',
26+
name: 'get_weather',
27+
input: { location: 'San Francisco, CA' },
28+
},
29+
{
30+
type: 'text',
31+
text: 'The weather looks great!',
32+
},
33+
],
34+
stop_reason: 'tool_use',
35+
stop_sequence: null,
36+
usage: {
37+
input_tokens: 20,
38+
output_tokens: 30,
39+
},
40+
});
41+
});
42+
43+
return new Promise(resolve => {
44+
const server = app.listen(0, () => {
45+
resolve(server);
46+
});
47+
});
48+
}
49+
50+
async function run() {
51+
const server = await startMockAnthropicServer();
52+
const baseUrl = `http://localhost:${server.address().port}`;
53+
54+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
55+
const model = new ChatAnthropic({
56+
model: 'claude-3-5-sonnet-20241022',
57+
temperature: 0.7,
58+
maxTokens: 150,
59+
apiKey: 'mock-api-key',
60+
clientOptions: {
61+
baseURL: baseUrl,
62+
},
63+
});
64+
65+
await model.invoke('What is the weather in San Francisco?', {
66+
tools: [
67+
{
68+
name: 'get_weather',
69+
description: 'Get the current weather in a given location',
70+
input_schema: {
71+
type: 'object',
72+
properties: {
73+
location: {
74+
type: 'string',
75+
description: 'The city and state, e.g. San Francisco, CA',
76+
},
77+
},
78+
required: ['location'],
79+
},
80+
},
81+
],
82+
});
83+
});
84+
85+
await Sentry.flush(2000);
86+
87+
server.close();
88+
}
89+
90+
run();
Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
import { ChatAnthropic } from '@langchain/anthropic';
2+
import * as Sentry from '@sentry/node';
3+
import express from 'express';
4+
5+
function startMockAnthropicServer() {
6+
const app = express();
7+
app.use(express.json());
8+
9+
app.post('/v1/messages', (req, res) => {
10+
const model = req.body.model;
11+
12+
if (model === 'error-model') {
13+
res
14+
.status(400)
15+
.set('request-id', 'mock-request-123')
16+
.json({
17+
type: 'error',
18+
error: {
19+
type: 'invalid_request_error',
20+
message: 'Model not found',
21+
},
22+
});
23+
return;
24+
}
25+
26+
// Simulate basic response
27+
res.json({
28+
id: 'msg_test123',
29+
type: 'message',
30+
role: 'assistant',
31+
content: [
32+
{
33+
type: 'text',
34+
text: 'Mock response from Anthropic!',
35+
},
36+
],
37+
model: model,
38+
stop_reason: 'end_turn',
39+
stop_sequence: null,
40+
usage: {
41+
input_tokens: 10,
42+
output_tokens: 15,
43+
},
44+
});
45+
});
46+
47+
return new Promise(resolve => {
48+
const server = app.listen(0, () => {
49+
resolve(server);
50+
});
51+
});
52+
}
53+
54+
async function run() {
55+
const server = await startMockAnthropicServer();
56+
const baseUrl = `http://localhost:${server.address().port}`;
57+
58+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
59+
// Test 1: Basic chat model invocation
60+
const model1 = new ChatAnthropic({
61+
model: 'claude-3-5-sonnet-20241022',
62+
temperature: 0.7,
63+
maxTokens: 100,
64+
apiKey: 'mock-api-key',
65+
clientOptions: {
66+
baseURL: baseUrl,
67+
},
68+
});
69+
70+
await model1.invoke('Tell me a joke');
71+
72+
// Test 2: Chat with different model
73+
const model2 = new ChatAnthropic({
74+
model: 'claude-3-opus-20240229',
75+
temperature: 0.9,
76+
topP: 0.95,
77+
maxTokens: 200,
78+
apiKey: 'mock-api-key',
79+
clientOptions: {
80+
baseURL: baseUrl,
81+
},
82+
});
83+
84+
await model2.invoke([
85+
{ role: 'system', content: 'You are a helpful assistant' },
86+
{ role: 'user', content: 'What is the capital of France?' },
87+
]);
88+
89+
// Test 3: Error handling
90+
const errorModel = new ChatAnthropic({
91+
model: 'error-model',
92+
apiKey: 'mock-api-key',
93+
clientOptions: {
94+
baseURL: baseUrl,
95+
},
96+
});
97+
98+
try {
99+
await errorModel.invoke('This will fail');
100+
} catch {
101+
// Expected error
102+
}
103+
});
104+
105+
await Sentry.flush(2000);
106+
107+
server.close();
108+
}
109+
110+
run();

0 commit comments

Comments
 (0)