Skip to content

Commit

Permalink
feat: Support Gen AI attributes for Amazon Nova foundational model (#133
Browse files Browse the repository at this point in the history
)

*Description of changes:*
Added GenAI inference parameters auto instrumentation support for Amazon
Nova.

<img width="824" alt="image"
src="https://github.com/user-attachments/assets/78c172dc-7d3f-48bf-9795-e3369d8849fd"
/>

Contract test:
<img width="855" alt="image"
src="https://github.com/user-attachments/assets/8f4db5fc-1ab6-44e6-9d1a-20c2320f48e3"
/>

Unit test:
<img width="750" alt="image"
src="https://github.com/user-attachments/assets/f610fb86-85db-4727-971a-e8f929d7cc9a"
/>

By submitting this pull request, I confirm that you can use, modify,
copy, and redistribute this contribution, under the terms of your
choice.
  • Loading branch information
liustve authored Dec 17, 2024
1 parent a35f89c commit 5dd389e
Show file tree
Hide file tree
Showing 4 changed files with 146 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,16 @@ export class BedrockRuntimeServiceExtension implements ServiceExtension {
spanAttributes[AwsSpanProcessingUtil.GEN_AI_REQUEST_MAX_TOKENS] =
requestBody.textGenerationConfig.maxTokenCount;
}
} else if (modelId.includes('amazon.nova')) {
if (requestBody.inferenceConfig?.temperature !== undefined) {
spanAttributes[AwsSpanProcessingUtil.GEN_AI_REQUEST_TEMPERATURE] = requestBody.inferenceConfig.temperature;
}
if (requestBody.inferenceConfig?.top_p !== undefined) {
spanAttributes[AwsSpanProcessingUtil.GEN_AI_REQUEST_TOP_P] = requestBody.inferenceConfig.top_p;
}
if (requestBody.inferenceConfig?.max_new_tokens !== undefined) {
spanAttributes[AwsSpanProcessingUtil.GEN_AI_REQUEST_MAX_TOKENS] = requestBody.inferenceConfig.max_new_tokens;
}
} else if (modelId.includes('anthropic.claude')) {
if (requestBody.max_tokens !== undefined) {
spanAttributes[AwsSpanProcessingUtil.GEN_AI_REQUEST_MAX_TOKENS] = requestBody.max_tokens;
Expand Down Expand Up @@ -335,6 +345,18 @@ export class BedrockRuntimeServiceExtension implements ServiceExtension {
responseBody.results[0].completionReason,
]);
}
} else if (currentModelId.includes('amazon.nova')) {
if (responseBody.usage !== undefined) {
if (responseBody.usage.inputTokens !== undefined) {
span.setAttribute(AwsSpanProcessingUtil.GEN_AI_USAGE_INPUT_TOKENS, responseBody.usage.inputTokens);
}
if (responseBody.usage.outputTokens !== undefined) {
span.setAttribute(AwsSpanProcessingUtil.GEN_AI_USAGE_OUTPUT_TOKENS, responseBody.usage.outputTokens);
}
}
if (responseBody.stopReason !== undefined) {
span.setAttribute(AwsSpanProcessingUtil.GEN_AI_RESPONSE_FINISH_REASONS, [responseBody.stopReason]);
}
} else if (currentModelId.includes('anthropic.claude')) {
if (responseBody.usage?.input_tokens !== undefined) {
span.setAttribute(AwsSpanProcessingUtil.GEN_AI_USAGE_INPUT_TOKENS, responseBody.usage.input_tokens);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,60 @@ describe('BedrockRuntime', () => {
expect(invokeModelSpan.kind).toBe(SpanKind.CLIENT);
});

it('Add Amazon Nova model attributes to span', async () => {
const modelId: string = 'amazon.nova-pro-v1:0';
const prompt: string = 'Campfire story';
const mockRequestBody: string = JSON.stringify({
inputText: prompt,
inferenceConfig: {
max_new_tokens: 500,
temperature: 0.9,
top_p: 0.7,
},
});
const mockResponseBody: any = {
output: { message: { content: [{ text: '' }], role: 'assistant' } },
stopReason: 'max_tokens',
usage: { inputTokens: 432, outputTokens: 681 },

request: {
commandInput: {
modelId: modelId,
},
},
};

nock(`https://bedrock-runtime.${region}.amazonaws.com`)
.post(`/model/${encodeURIComponent(modelId)}/invoke`)
.reply(200, mockResponseBody);

await bedrock
.invokeModel({
modelId: modelId,
body: mockRequestBody,
})
.catch((err: any) => {});

const testSpans: ReadableSpan[] = getTestSpans();
const invokeModelSpans: ReadableSpan[] = testSpans.filter((s: ReadableSpan) => {
return s.name === 'BedrockRuntime.InvokeModel';
});
expect(invokeModelSpans.length).toBe(1);
const invokeModelSpan = invokeModelSpans[0];
expect(invokeModelSpan.attributes[AWS_ATTRIBUTE_KEYS.AWS_BEDROCK_AGENT_ID]).toBeUndefined();
expect(invokeModelSpan.attributes[AWS_ATTRIBUTE_KEYS.AWS_BEDROCK_KNOWLEDGE_BASE_ID]).toBeUndefined();
expect(invokeModelSpan.attributes[AWS_ATTRIBUTE_KEYS.AWS_BEDROCK_DATA_SOURCE_ID]).toBeUndefined();
expect(invokeModelSpan.attributes[AwsSpanProcessingUtil.GEN_AI_SYSTEM]).toBe('aws.bedrock');
expect(invokeModelSpan.attributes[AwsSpanProcessingUtil.GEN_AI_REQUEST_MODEL]).toBe(modelId);
expect(invokeModelSpan.attributes[AwsSpanProcessingUtil.GEN_AI_REQUEST_MAX_TOKENS]).toBe(500);
expect(invokeModelSpan.attributes[AwsSpanProcessingUtil.GEN_AI_REQUEST_TEMPERATURE]).toBe(0.9);
expect(invokeModelSpan.attributes[AwsSpanProcessingUtil.GEN_AI_REQUEST_TOP_P]).toBe(0.7);
expect(invokeModelSpan.attributes[AwsSpanProcessingUtil.GEN_AI_USAGE_INPUT_TOKENS]).toBe(432);
expect(invokeModelSpan.attributes[AwsSpanProcessingUtil.GEN_AI_USAGE_OUTPUT_TOKENS]).toBe(681);
expect(invokeModelSpan.attributes[AwsSpanProcessingUtil.GEN_AI_RESPONSE_FINISH_REASONS]).toEqual(['max_tokens']);
expect(invokeModelSpan.kind).toBe(SpanKind.CLIENT);
});

it('Add Anthropic Claude model attributes to span', async () => {
const modelId: string = 'anthropic.claude-3-5-sonnet-20240620-v1:0';
const prompt: string = 'Complete this text. It was the best of times it was the worst...';
Expand Down
24 changes: 23 additions & 1 deletion contract-tests/images/applications/aws-sdk/server.js
Original file line number Diff line number Diff line change
Expand Up @@ -631,7 +631,29 @@ async function handleBedrockRequest(req, res, path) {
},
],
}

}

if (path.includes("amazon.nova")) {

modelId = "amazon.nova-pro-v1:0"

request_body = {
messages: [{role: "user", content: [{text: "A camping trip"}]}],
inferenceConfig: {
max_new_tokens: 800,
temperature: 0.9,
top_p: 0.7,
},
}

response_body = {
output: {message: {content: [{text: ""}], role: "assistant"}},
stopReason: "max_tokens",
usage: {
inputTokens: 432,
outputTokens: 681
},
}
}

if (path.includes('anthropic.claude')) {
Expand Down
47 changes: 47 additions & 0 deletions contract-tests/tests/test/amazon/aws-sdk/aws_sdk_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -466,6 +466,34 @@ def test_bedrock_runtime_invoke_model_amazon_titan(self):

span_name="BedrockRuntime.InvokeModel"
)

def test_bedrock_runtime_invoke_model_amazon_nova(self):
result = self.do_test_requests(
"bedrock/invokemodel/invoke-model/amazon.nova-pro-v1:0",
"GET",
200,
0,
0,
local_operation="GET /bedrock",
rpc_service="BedrockRuntime",
remote_service="AWS::BedrockRuntime",
remote_operation="InvokeModel",
remote_resource_type="AWS::Bedrock::Model",
remote_resource_identifier='amazon.nova-pro-v1:0',
request_specific_attributes={
_GEN_AI_REQUEST_MODEL: 'amazon.nova-pro-v1:0',
_GEN_AI_REQUEST_MAX_TOKENS: 800,
_GEN_AI_REQUEST_TEMPERATURE: 0.9,
_GEN_AI_REQUEST_TOP_P: 0.7
},
response_specific_attributes={
_GEN_AI_RESPONSE_FINISH_REASONS: ['max_tokens'],
_GEN_AI_USAGE_INPUT_TOKENS: 432,
_GEN_AI_USAGE_OUTPUT_TOKENS: 681
},

span_name="BedrockRuntime.InvokeModel"
)

def test_bedrock_runtime_invoke_model_anthropic_claude(self):
self.do_test_requests(
Expand Down Expand Up @@ -1105,6 +1133,25 @@ def _assert_semantic_conventions_attributes(

for key, value in response_specific_attributes.items():
self._assert_attribute(attributes_dict, key, value)

def _assert_attribute(self, attributes_dict: Dict[str, AnyValue], key, value) -> None:
if isinstance(value, str):
self._assert_str_attribute(attributes_dict, key, value)
elif isinstance(value, int):
self._assert_int_attribute(attributes_dict, key, value)
elif isinstance(value, float):
self._assert_float_attribute(attributes_dict, key, value)
else:
self._assert_array_value_ddb_table_name(attributes_dict, key, value)

@override
def _assert_str_attribute(self, attributes_dict: Dict[str, AnyValue], key: str, expected_value: str):
self.assertIn(key, attributes_dict)
actual_value: AnyValue = attributes_dict[key]
self.assertIsNotNone(actual_value)
pattern = re.compile(expected_value)
match = pattern.fullmatch(actual_value.string_value)
self.assertTrue(match is not None, f"Actual: {actual_value.string_value} does not match Expected: {expected_value}")

@override
def _assert_metric_attributes(
Expand Down

0 comments on commit 5dd389e

Please sign in to comment.