Skip to content

Commit 7c04ae7

Browse files
feat(api): support storing chat completions, enabling evals and model distillation in the dashboard (#1766)
Learn more at http://openai.com/devday2024
1 parent b5e39d9 commit 7c04ae7

File tree

6 files changed

+134
-15
lines changed

6 files changed

+134
-15
lines changed

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 68
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8ad878332083dd506a478a293db78dc9e7b1b2124f2682e1d991225bc5bbcc3b.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-52b934aee6468039ec7f4ce046a282b5fbce114afc708e70f17121df654f71da.yml

src/openai/resources/chat/completions.py

Lines changed: 92 additions & 12 deletions
Large diffs are not rendered by default.

src/openai/types/chat/completion_create_params.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,11 @@ class CompletionCreateParamsBase(TypedDict, total=False):
3030
messages: Required[Iterable[ChatCompletionMessageParam]]
3131
"""A list of messages comprising the conversation so far.
3232
33-
[Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
33+
Depending on the [model](https://platform.openai.com/docs/models) you use,
34+
different message types (modalities) are supported, like
35+
[text](https://platform.openai.com/docs/guides/text-generation),
36+
[images](https://platform.openai.com/docs/guides/vision), and
37+
[audio](https://platform.openai.com/docs/guides/audio).
3438
"""
3539

3640
model: Required[Union[str, ChatModel]]
@@ -105,6 +109,12 @@ class CompletionCreateParamsBase(TypedDict, total=False):
105109
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
106110
"""
107111

112+
metadata: Optional[Dict[str, str]]
113+
"""
114+
Developer-defined tags and values used for filtering completions in the
115+
[dashboard](https://platform.openai.com/completions).
116+
"""
117+
108118
n: Optional[int]
109119
"""How many chat completion choices to generate for each input message.
110120
@@ -183,6 +193,12 @@ class CompletionCreateParamsBase(TypedDict, total=False):
183193
stop: Union[Optional[str], List[str]]
184194
"""Up to 4 sequences where the API will stop generating further tokens."""
185195

196+
store: Optional[bool]
197+
"""
198+
Whether or not to store the output of this completion request for traffic
199+
logging in the [dashboard](https://platform.openai.com/completions).
200+
"""
201+
186202
stream_options: Optional[ChatCompletionStreamOptionsParam]
187203
"""Options for streaming response. Only set this when you set `stream: true`."""
188204

src/openai/types/chat_model.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
"gpt-4o",
1313
"gpt-4o-2024-08-06",
1414
"gpt-4o-2024-05-13",
15+
"gpt-4o-realtime-preview-2024-10-01",
1516
"chatgpt-4o-latest",
1617
"gpt-4o-mini",
1718
"gpt-4o-mini-2024-07-18",

src/openai/types/completion_usage.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,25 @@
44

55
from .._models import BaseModel
66

7-
__all__ = ["CompletionUsage", "CompletionTokensDetails"]
7+
__all__ = ["CompletionUsage", "CompletionTokensDetails", "PromptTokensDetails"]
88

99

1010
class CompletionTokensDetails(BaseModel):
11+
audio_tokens: Optional[int] = None
12+
"""Audio input tokens generated by the model."""
13+
1114
reasoning_tokens: Optional[int] = None
1215
"""Tokens generated by the model for reasoning."""
1316

1417

18+
class PromptTokensDetails(BaseModel):
19+
audio_tokens: Optional[int] = None
20+
"""Audio input tokens present in the prompt."""
21+
22+
cached_tokens: Optional[int] = None
23+
"""Cached tokens present in the prompt."""
24+
25+
1526
class CompletionUsage(BaseModel):
1627
completion_tokens: int
1728
"""Number of tokens in the generated completion."""
@@ -24,3 +35,6 @@ class CompletionUsage(BaseModel):
2435

2536
completion_tokens_details: Optional[CompletionTokensDetails] = None
2637
"""Breakdown of tokens used in a completion."""
38+
39+
prompt_tokens_details: Optional[PromptTokensDetails] = None
40+
"""Breakdown of tokens used in the prompt."""

tests/api_resources/chat/test_completions.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,13 +56,15 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
5656
logprobs=True,
5757
max_completion_tokens=0,
5858
max_tokens=0,
59+
metadata={"foo": "string"},
5960
n=1,
6061
parallel_tool_calls=True,
6162
presence_penalty=-2,
6263
response_format={"type": "text"},
6364
seed=-9007199254740991,
6465
service_tier="auto",
6566
stop="string",
67+
store=True,
6668
stream=False,
6769
stream_options={"include_usage": True},
6870
temperature=1,
@@ -177,13 +179,15 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
177179
logprobs=True,
178180
max_completion_tokens=0,
179181
max_tokens=0,
182+
metadata={"foo": "string"},
180183
n=1,
181184
parallel_tool_calls=True,
182185
presence_penalty=-2,
183186
response_format={"type": "text"},
184187
seed=-9007199254740991,
185188
service_tier="auto",
186189
stop="string",
190+
store=True,
187191
stream_options={"include_usage": True},
188192
temperature=1,
189193
tool_choice="none",
@@ -300,13 +304,15 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
300304
logprobs=True,
301305
max_completion_tokens=0,
302306
max_tokens=0,
307+
metadata={"foo": "string"},
303308
n=1,
304309
parallel_tool_calls=True,
305310
presence_penalty=-2,
306311
response_format={"type": "text"},
307312
seed=-9007199254740991,
308313
service_tier="auto",
309314
stop="string",
315+
store=True,
310316
stream=False,
311317
stream_options={"include_usage": True},
312318
temperature=1,
@@ -421,13 +427,15 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
421427
logprobs=True,
422428
max_completion_tokens=0,
423429
max_tokens=0,
430+
metadata={"foo": "string"},
424431
n=1,
425432
parallel_tool_calls=True,
426433
presence_penalty=-2,
427434
response_format={"type": "text"},
428435
seed=-9007199254740991,
429436
service_tier="auto",
430437
stop="string",
438+
store=True,
431439
stream_options={"include_usage": True},
432440
temperature=1,
433441
tool_choice="none",

0 commit comments

Comments
 (0)