Skip to content

Commit

Permalink
Working sanitized VCR for tests, but missing some assertions
Browse files Browse the repository at this point in the history
  • Loading branch information
aabmass committed Dec 5, 2024
1 parent 11a6dc2 commit 0dd1f25
Show file tree
Hide file tree
Showing 4 changed files with 169 additions and 53 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
interactions:
- request:
body: "{\n \"contents\": [\n {\n \"role\": \"user\",\n \"parts\":
[\n {\n \"fileData\": {\n \"mimeType\": \"image/jpeg\",\n
\ \"fileUri\": \"gs://generativeai-downloads/images/scones.jpg\"\n
\ }\n },\n {\n \"text\": \"what is shown in this
image?\"\n }\n ]\n }\n ]\n}"
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '317'
Content-Type:
- application/json
User-Agent:
- python-requests/2.32.3
method: POST
uri: https://us-central1-aiplatform.googleapis.com/v1beta1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-pro-vision:generateContent?%24alt=json%3Benum-encoding%3Dint
response:
body:
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"role\":
\"model\",\n \"parts\": [\n {\n \"text\": \" The
image shows a table with a cup of coffee, a bowl of blueberries, and several
blueberry scones. There are also pink flowers on the table.\"\n }\n
\ ]\n },\n \"finishReason\": 1,\n \"safetyRatings\":
[\n {\n \"category\": 1,\n \"probability\": 1,\n
\ \"probabilityScore\": 0.024780273,\n \"severity\": 1,\n
\ \"severityScore\": 0.072753906\n },\n {\n \"category\":
2,\n \"probability\": 1,\n \"probabilityScore\": 0.025512695,\n
\ \"severity\": 1,\n \"severityScore\": 0.06738281\n },\n
\ {\n \"category\": 3,\n \"probability\": 1,\n \"probabilityScore\":
0.040283203,\n \"severity\": 1,\n \"severityScore\": 0.03515625\n
\ },\n {\n \"category\": 4,\n \"probability\":
1,\n \"probabilityScore\": 0.07910156,\n \"severity\": 1,\n
\ \"severityScore\": 0.083984375\n }\n ],\n \"avgLogprobs\":
-0.068832365671793613\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
265,\n \"candidatesTokenCount\": 30,\n \"totalTokenCount\": 295\n },\n
\ \"modelVersion\": \"gemini-pro-vision\"\n}\n"
headers:
Cache-Control:
- private
Content-Encoding:
- gzip
Content-Type:
- application/json; charset=UTF-8
Transfer-Encoding:
- chunked
Vary:
- Origin
- X-Origin
- Referer
status:
code: 200
message: OK
version: 1
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
"""Unit tests configuration module."""

from os import replace
import re
from typing import Any, Mapping, MutableMapping

import pytest
from google.auth.credentials import AnonymousCredentials

from opentelemetry import trace
from opentelemetry.instrumentation.vertexai_v2 import VertexAIInstrumentor
Expand All @@ -12,6 +17,11 @@

pytest_plugins = []

import vertexai
from vcr import VCR
from vcr.record_mode import RecordMode
from vcr.request import Request


@pytest.fixture(scope="session")
def exporter():
Expand All @@ -32,6 +42,55 @@ def clear_exporter(exporter):
exporter.clear()


@pytest.fixture(autouse=True)
def vertexai_init(vcr: VCR) -> None:
# Unfortunately I couldn't find a nice way to globally reset the global_config for each
# test because different vertex submodules reference the global instance directly
# https://github.com/googleapis/python-aiplatform/blob/v1.74.0/google/cloud/aiplatform/initializer.py#L687
# so this config will leak if we don't call init() for each test.

# When not recording (in CI), don't do any auth. That prevents trying to read application
# default credentials from the filesystem or metadata server and oauth token exchange. This
# is not the interesting part of our instrumentation to test.
if vcr.record_mode is RecordMode.NONE:
vertexai.init(credentials=AnonymousCredentials())
else:
vertexai.init()


@pytest.fixture(scope="module")
def vcr_config():
return {"filter_headers": ["authorization"]}
filter_header_regexes = [
r"X-.*",
"Server",
"Date",
"Expires",
"Authorization",
]

def filter_headers(headers: Mapping[str, str]) -> Mapping[str, str]:
return {
key: val
for key, val in headers.items()
if not any(
re.match(filter_re, key, re.IGNORECASE)
for filter_re in filter_header_regexes
)
}

def before_record_cb(request: Request):
request.headers = filter_headers(request.headers)
request.uri = re.sub(
r"/projects/[^/]+/", f"/projects/fake-project/", request.uri
)
return request

def before_response_cb(response: MutableMapping[str, Any]):
response["headers"] = filter_headers(response["headers"])
return response

return {
"before_record_request": before_record_cb,
"before_record_response": before_response_cb,
"ignore_hosts": ["oauth2.googleapis.com"],
}

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import pytest
from vertexai.preview.generative_models import GenerativeModel, Part

# from opentelemetry.semconv_ai import SpanAttributes


@pytest.mark.vcr
def test_vertexai_generate_content(exporter):
multimodal_model = GenerativeModel("gemini-pro-vision")
response = multimodal_model.generate_content(
[
Part.from_uri(
"gs://generativeai-downloads/images/scones.jpg",
mime_type="image/jpeg",
),
"what is shown in this image?",
]
)

spans = exporter.get_finished_spans()
assert [span.name for span in spans] == [
"text_completion gemini-pro-vision"
]

vertexai_span = spans[0]
assert len(spans) == 1
# assert (
# "what is shown in this image?"
# in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"]
# )
# assert (
# vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL]
# == "gemini-pro-vision"
# )
# assert (
# vertexai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS]
# == response._raw_response.usage_metadata.total_token_count
# )
# assert (
# vertexai_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS]
# == response._raw_response.usage_metadata.prompt_token_count
# )
# assert (
# vertexai_span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]
# == response._raw_response.usage_metadata.candidates_token_count
# )
# assert (
# vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"]
# == response.text
# )

0 comments on commit 0dd1f25

Please sign in to comment.