Skip to content

Commit

Permalink
Record duration
Browse files Browse the repository at this point in the history
  • Loading branch information
alexmojaki committed May 3, 2024
1 parent 45774a0 commit dd4c929
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 35 deletions.
12 changes: 7 additions & 5 deletions logfire/_internal/integrations/openai.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from __future__ import annotations

# import time
from contextlib import contextmanager
from typing import (
TYPE_CHECKING,
Expand All @@ -24,6 +23,8 @@
from openai.types.images_response import ImagesResponse
from opentelemetry import context

from ..constants import ONE_SECOND_IN_NANOSECONDS

if TYPE_CHECKING:
from openai._models import FinalRequestOptions
from openai._streaming import AsyncStream, Stream
Expand Down Expand Up @@ -81,7 +82,7 @@ def uninstrument_context():
return uninstrument_context()


STEAMING_MSG_TEMPLATE: LiteralString = 'streaming response from {request_data[model]!r}'
STEAMING_MSG_TEMPLATE: LiteralString = 'streaming response from {request_data[model]!r} took {duration:.2f}s'


def instrument_openai_sync(logfire_openai: Logfire, openai_client: openai.OpenAI, suppress_otel: bool) -> None:
Expand Down Expand Up @@ -274,14 +275,15 @@ def record_chunk(chunk: Any) -> Any:
if chunk_content is not None:
content.append(chunk_content)

# start = time.monotonic()
timer = logfire_openai._config.ns_timestamp_generator
start = timer()
try:
yield record_chunk
finally:
# duration = time.monotonic() - start
duration = (timer() - start) / ONE_SECOND_IN_NANOSECONDS
logfire_openai.info(
STEAMING_MSG_TEMPLATE,
**span_data,
# duration=duration,
duration=duration,
response_data={'combined_chunk_content': ''.join(content), 'chunk_count': len(content)},
)
65 changes: 35 additions & 30 deletions tests/otel_integrations/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,23 +392,24 @@ def test_sync_chat_empty_response_chunk(instrumented_client: openai.Client, expo
},
},
{
'name': 'streaming response from {request_data[model]!r}',
'name': 'streaming response from {request_data[model]!r} took {duration:.2f}s',
'context': {'trace_id': 2, 'span_id': 3, 'is_remote': False},
'parent': None,
'start_time': 3000000000,
'end_time': 3000000000,
'start_time': 5000000000,
'end_time': 5000000000,
'attributes': {
'code.filepath': 'openai.py',
'logfire.level_num': 9,
'code.function': 'record_streaming',
'code.lineno': 123,
'request_data': '{"messages":[{"role":"system","content":"empty response chunk"}],"model":"gpt-4","stream":true}',
'async': False,
'logfire.msg_template': 'streaming response from {request_data[model]!r}',
'logfire.msg': "streaming response from 'gpt-4'",
'logfire.msg_template': 'streaming response from {request_data[model]!r} took {duration:.2f}s',
'logfire.msg': "streaming response from 'gpt-4' took 1.00s",
'logfire.span_type': 'log',
'duration': 1.0,
'response_data': '{"combined_chunk_content":"","chunk_count":0}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"duration":{},"response_data":{"type":"object"}}}',
},
},
]
Expand Down Expand Up @@ -445,23 +446,24 @@ def test_sync_chat_empty_response_choices(instrumented_client: openai.Client, ex
},
},
{
'name': 'streaming response from {request_data[model]!r}',
'name': 'streaming response from {request_data[model]!r} took {duration:.2f}s',
'context': {'trace_id': 2, 'span_id': 3, 'is_remote': False},
'parent': None,
'start_time': 3000000000,
'end_time': 3000000000,
'start_time': 5000000000,
'end_time': 5000000000,
'attributes': {
'code.filepath': 'openai.py',
'logfire.level_num': 9,
'code.function': 'record_streaming',
'code.lineno': 123,
'request_data': '{"messages":[{"role":"system","content":"empty choices in response chunk"}],"model":"gpt-4","stream":true}',
'async': False,
'logfire.msg_template': 'streaming response from {request_data[model]!r}',
'logfire.msg': "streaming response from 'gpt-4'",
'logfire.msg_template': 'streaming response from {request_data[model]!r} took {duration:.2f}s',
'logfire.msg': "streaming response from 'gpt-4' took 1.00s",
'logfire.span_type': 'log',
'duration': 1.0,
'response_data': '{"combined_chunk_content":"","chunk_count":0}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"duration":{},"response_data":{"type":"object"}}}',
},
},
]
Expand Down Expand Up @@ -500,23 +502,24 @@ def test_sync_chat_completions_stream(instrumented_client: openai.Client, export
},
},
{
'name': 'streaming response from {request_data[model]!r}',
'name': 'streaming response from {request_data[model]!r} took {duration:.2f}s',
'context': {'trace_id': 2, 'span_id': 3, 'is_remote': False},
'parent': None,
'start_time': 3000000000,
'end_time': 3000000000,
'start_time': 5000000000,
'end_time': 5000000000,
'attributes': {
'code.filepath': 'openai.py',
'logfire.level_num': 9,
'code.function': 'record_streaming',
'code.lineno': 123,
'request_data': '{"messages":[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"What is four plus five?"}],"model":"gpt-4","stream":true}',
'async': False,
'logfire.msg_template': 'streaming response from {request_data[model]!r}',
'logfire.msg': "streaming response from 'gpt-4'",
'logfire.msg_template': 'streaming response from {request_data[model]!r} took {duration:.2f}s',
'logfire.msg': "streaming response from 'gpt-4' took 1.00s",
'logfire.span_type': 'log',
'duration': 1.0,
'response_data': '{"combined_chunk_content":"The answer is Nine","chunk_count":2}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"duration":{},"response_data":{"type":"object"}}}',
},
},
]
Expand Down Expand Up @@ -558,23 +561,24 @@ async def test_async_chat_completions_stream(
},
},
{
'name': 'streaming response from {request_data[model]!r}',
'name': 'streaming response from {request_data[model]!r} took {duration:.2f}s',
'context': {'trace_id': 2, 'span_id': 3, 'is_remote': False},
'parent': None,
'start_time': 3000000000,
'end_time': 3000000000,
'start_time': 5000000000,
'end_time': 5000000000,
'attributes': {
'code.filepath': 'openai.py',
'logfire.level_num': 9,
'code.function': 'record_streaming',
'code.lineno': 123,
'request_data': '{"messages":[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"What is four plus five?"}],"model":"gpt-4","stream":true}',
'async': True,
'logfire.msg_template': 'streaming response from {request_data[model]!r}',
'logfire.msg': "streaming response from 'gpt-4'",
'logfire.msg_template': 'streaming response from {request_data[model]!r} took {duration:.2f}s',
'logfire.msg': "streaming response from 'gpt-4' took 1.00s",
'logfire.span_type': 'log',
'duration': 1.0,
'response_data': '{"combined_chunk_content":"The answer is Nine","chunk_count":2}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"duration":{},"response_data":{"type":"object"}}}',
},
},
]
Expand Down Expand Up @@ -641,23 +645,24 @@ def test_completions_stream(instrumented_client: openai.Client, exporter: TestEx
},
},
{
'name': 'streaming response from {request_data[model]!r}',
'name': 'streaming response from {request_data[model]!r} took {duration:.2f}s',
'context': {'trace_id': 2, 'span_id': 3, 'is_remote': False},
'parent': None,
'start_time': 3000000000,
'end_time': 3000000000,
'start_time': 5000000000,
'end_time': 5000000000,
'attributes': {
'code.filepath': 'openai.py',
'logfire.level_num': 9,
'code.function': 'record_streaming',
'code.lineno': 123,
'request_data': '{"model":"gpt-3.5-turbo-instruct","prompt":"What is four plus five?","stream":true}',
'async': False,
'logfire.msg_template': 'streaming response from {request_data[model]!r}',
'logfire.msg': "streaming response from 'gpt-3.5-turbo-instruct'",
'logfire.msg_template': 'streaming response from {request_data[model]!r} took {duration:.2f}s',
'logfire.msg': "streaming response from 'gpt-3.5-turbo-instruct' took 1.00s",
'logfire.span_type': 'log',
'duration': 1.0,
'response_data': '{"combined_chunk_content":"The answer is Nine","chunk_count":3}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"duration":{},"response_data":{"type":"object"}}}',
},
},
]
Expand Down

0 comments on commit dd4c929

Please sign in to comment.