Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(openai): Handle tool_calls assistant messages #1429

Merged
merged 3 commits into from
Jul 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -318,13 +318,19 @@ def _set_prompts(span, messages):

for i, msg in enumerate(messages):
prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
if isinstance(msg.get("content"), str):
content = msg.get("content")
elif isinstance(msg.get("content"), list):
content = json.dumps(msg.get("content"))

_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
_set_span_attribute(span, f"{prefix}.content", content)
if msg.get("content"):
content = msg.get("content")
if isinstance(content, list):
content = json.dumps(content)
_set_span_attribute(span, f"{prefix}.content", content)
if msg.get("tool_calls"):
_set_span_attribute(
span, f"{prefix}.tool_calls", json.dumps(msg.get("tool_calls"))
)
if msg.get("tool_call_id"):
_set_span_attribute(span, f"{prefix}.tool_call_id", msg.get("tool_call_id"))


def _set_completions(span, choices):
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
interactions:
- request:
body: '{"messages": [{"role": "assistant", "tool_calls": [{"id": "1", "type":
"function", "function": {"name": "get_current_weather", "arguments": "{\"location\":
\"San Francisco\"}"}}]}, {"role": "tool", "tool_call_id": "1", "content": "The
weather in San Francisco is 70 degrees and sunny."}], "model": "gpt-3.5-turbo"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '314'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.31.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.31.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQUUvDMBSF3/srLnlex1pX6voqiCKIMrehIiNN79pomsTcFB1j/13SdSu+5OGc
nJPv5BABMFmxAphouBetVfGifnh8K1eb5c2c0lWTPu/W6/zpVX5v7rs7NgkJU36i8OfUVJjWKvTS
6JMtHHKPoTXJk8V1niXzpDdaU6EKsdr6+Gqaxb5zpYlnSZoNycZIgcQKeI8AAA79GRh1hb+sgNnk
rLRIxGtkxeUSAHNGBYVxIkmea88moymM9qh77JcG4Qe5b9CB1LDkGm4d10KSMCAJ8hlUWDtEAq4r
oE7r/ZQNVccLgzK1daYMvLpT6qLvpJbUbB1yMjq8R97YU/wYAXz0W7t/+Mw601q/9eYLdSicD1PZ
+LujmaSD6Y3natSzNBr4GO3JY7vdSV2js072wwNldIz+AAAA//8DAFgC3/b3AQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 89ca0ace187267e5-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 01 Jul 2024 23:05:41 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=5KtfCvK8iEaeyiKJ_x.mKoz_lHBPohUO7we2u4JFgvI-1719875141-1.0.1.1-emXTOeWpVHhcRLjSmiFF90wqxIvME72vqSRqI6onZoc0a0MfJIIsD_Bcjns_XecEbRgOzrsytWQKY5rCmUDYpQ;
path=/; expires=Mon, 01-Jul-24 23:35:41 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=xT_vl23QbJqI6RPCdqTL8JznNz1gHw17O2DziF68JjM-1719875141467-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- aca-labs-1
openai-processing-ms:
- '467'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains
x-ratelimit-limit-requests:
- '5000'
x-ratelimit-limit-tokens:
- '160000'
x-ratelimit-remaining-requests:
- '4999'
x-ratelimit-remaining-tokens:
- '159968'
x-ratelimit-reset-requests:
- 12ms
x-ratelimit-reset-tokens:
- 12ms
x-request-id:
- req_cb8fecee9a23c26d9b4ff686171a670b
status:
code: 200
message: OK
version: 1
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import pytest
from opentelemetry.semconv.ai import SpanAttributes

Expand Down Expand Up @@ -33,6 +34,64 @@ def test_chat(exporter, openai_client):
assert open_ai_span.attributes.get(SpanAttributes.LLM_IS_STREAMING) is False


@pytest.mark.vcr
def test_chat_tool_calls(exporter, openai_client):
openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "assistant",
"tool_calls": [
{
"id": "1",
"type": "function",
"function": {
"name": "get_current_weather",
"arguments": '{"location": "San Francisco"}',
},
}
],
},
{
"role": "tool",
"tool_call_id": "1",
"content": "The weather in San Francisco is 70 degrees and sunny.",
},
],
)

spans = exporter.get_finished_spans()

assert [span.name for span in spans] == [
"openai.chat",
]
open_ai_span = spans[0]

assert f"{SpanAttributes.LLM_PROMPTS}.0.content" not in open_ai_span.attributes
assert open_ai_span.attributes[
f"{SpanAttributes.LLM_PROMPTS}.0.tool_calls"
] == json.dumps(
[
{
"id": "1",
"type": "function",
"function": {
"name": "get_current_weather",
"arguments": '{"location": "San Francisco"}',
},
}
]
)

assert (
open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.1.content"]
== "The weather in San Francisco is 70 degrees and sunny."
)
assert (
open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.1.tool_call_id"] == "1"
)


@pytest.mark.vcr
def test_chat_streaming(exporter, openai_client):
response = openai_client.chat.completions.create(
Expand Down