diff --git a/.stats.yml b/.stats.yml
index 6cc7757..ac652c9 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 68
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b04761ffd2adad3cc19a6dc6fc696ac445878219972f891881a967340fa9a6b0.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4097c2f86beb3f3bb021775cd1dfa240e960caf842aeefc2e08da4dc0851ea79.yml
diff --git a/aliases.go b/aliases.go
index b84f85f..26737f4 100644
--- a/aliases.go
+++ b/aliases.go
@@ -28,3 +28,39 @@ type FunctionDefinitionParam = shared.FunctionDefinitionParam
//
// This is an alias to an internal type.
type FunctionParameters = shared.FunctionParameters
+
+// This is an alias to an internal type.
+type ResponseFormatJSONObjectParam = shared.ResponseFormatJSONObjectParam
+
+// The type of response format being defined: `json_object`
+//
+// This is an alias to an internal type.
+type ResponseFormatJSONObjectType = shared.ResponseFormatJSONObjectType
+
+// This is an alias to an internal value.
+const ResponseFormatJSONObjectTypeJSONObject = shared.ResponseFormatJSONObjectTypeJSONObject
+
+// This is an alias to an internal type.
+type ResponseFormatJSONSchemaParam = shared.ResponseFormatJSONSchemaParam
+
+// This is an alias to an internal type.
+type ResponseFormatJSONSchemaJSONSchemaParam = shared.ResponseFormatJSONSchemaJSONSchemaParam
+
+// The type of response format being defined: `json_schema`
+//
+// This is an alias to an internal type.
+type ResponseFormatJSONSchemaType = shared.ResponseFormatJSONSchemaType
+
+// This is an alias to an internal value.
+const ResponseFormatJSONSchemaTypeJSONSchema = shared.ResponseFormatJSONSchemaTypeJSONSchema
+
+// This is an alias to an internal type.
+type ResponseFormatTextParam = shared.ResponseFormatTextParam
+
+// The type of response format being defined: `text`
+//
+// This is an alias to an internal type.
+type ResponseFormatTextType = shared.ResponseFormatTextType
+
+// This is an alias to an internal value.
+const ResponseFormatTextTypeText = shared.ResponseFormatTextTypeText
diff --git a/api.md b/api.md
index 6b26f16..ecec90f 100644
--- a/api.md
+++ b/api.md
@@ -2,6 +2,9 @@
- shared.FunctionDefinitionParam
- shared.FunctionParameters
+- shared.ResponseFormatJSONObjectParam
+- shared.ResponseFormatJSONSchemaParam
+- shared.ResponseFormatTextParam
# Shared Response Types
@@ -34,6 +37,7 @@ Params Types:
- openai.ChatCompletionAssistantMessageParam
- openai.ChatCompletionContentPartUnionParam
- openai.ChatCompletionContentPartImageParam
+- openai.ChatCompletionContentPartRefusalParam
- openai.ChatCompletionContentPartTextParam
- openai.ChatCompletionFunctionCallOptionParam
- openai.ChatCompletionFunctionMessageParam
@@ -270,16 +274,12 @@ Methods:
Params Types:
-- openai.AssistantResponseFormatParam
-- openai.AssistantResponseFormatOptionUnionParam
- openai.AssistantToolChoiceParam
- openai.AssistantToolChoiceFunctionParam
- openai.AssistantToolChoiceOptionUnionParam
Response Types:
-- openai.AssistantResponseFormat
-- openai.AssistantResponseFormatOptionUnion
- openai.AssistantToolChoice
- openai.AssistantToolChoiceFunction
- openai.AssistantToolChoiceOptionUnion
@@ -371,6 +371,8 @@ Response Types:
- openai.MessageDeleted
- openai.MessageDelta
- openai.MessageDeltaEvent
+- openai.RefusalContentBlock
+- openai.RefusalDeltaBlock
- openai.Text
- openai.TextContentBlock
- openai.TextDelta
diff --git a/batch.go b/batch.go
index 8b7da8f..14a2509 100644
--- a/batch.go
+++ b/batch.go
@@ -368,7 +368,7 @@ type BatchListParams struct {
// URLQuery serializes [BatchListParams]'s query parameters as `url.Values`.
func (r BatchListParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/betaassistant.go b/betaassistant.go
index 6ac3c1c..28ae56e 100644
--- a/betaassistant.go
+++ b/betaassistant.go
@@ -136,22 +136,6 @@ type Assistant struct {
// assistant. Tools can be of types `code_interpreter`, `file_search`, or
// `function`.
Tools []AssistantTool `json:"tools,required"`
- // Specifies the format that the model must output. Compatible with
- // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
- // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- //
- // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
- // message the model generates is valid JSON.
- //
- // **Important:** when using JSON mode, you **must** also instruct the model to
- // produce JSON yourself via a system or user message. Without this, the model may
- // generate an unending stream of whitespace until the generation reaches the token
- // limit, resulting in a long-running and seemingly "stuck" request. Also note that
- // the message content may be partially cut off if `finish_reason="length"`, which
- // indicates the generation exceeded `max_tokens` or the conversation exceeded the
- // max context length.
- ResponseFormat AssistantResponseFormatOptionUnion `json:"response_format,nullable"`
// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
// make the output more random, while lower values like 0.2 will make it more
// focused and deterministic.
@@ -172,21 +156,20 @@ type Assistant struct {
// assistantJSON contains the JSON metadata for the struct [Assistant]
type assistantJSON struct {
- ID apijson.Field
- CreatedAt apijson.Field
- Description apijson.Field
- Instructions apijson.Field
- Metadata apijson.Field
- Model apijson.Field
- Name apijson.Field
- Object apijson.Field
- Tools apijson.Field
- ResponseFormat apijson.Field
- Temperature apijson.Field
- ToolResources apijson.Field
- TopP apijson.Field
- raw string
- ExtraFields map[string]apijson.Field
+ ID apijson.Field
+ CreatedAt apijson.Field
+ Description apijson.Field
+ Instructions apijson.Field
+ Metadata apijson.Field
+ Model apijson.Field
+ Name apijson.Field
+ Object apijson.Field
+ Tools apijson.Field
+ Temperature apijson.Field
+ ToolResources apijson.Field
+ TopP apijson.Field
+ raw string
+ ExtraFields map[string]apijson.Field
}
func (r *Assistant) UnmarshalJSON(data []byte) (err error) {
@@ -1869,8 +1852,8 @@ func (r FileSearchToolType) IsKnown() bool {
// Overrides for the file search tool.
type FileSearchToolFileSearch struct {
// The maximum number of results the file search tool should output. The default is
- // 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1
- // and 50 inclusive.
+ // 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between
+ // 1 and 50 inclusive.
//
// Note that the file search tool may output fewer than `max_num_results` results.
// See the
@@ -1914,8 +1897,8 @@ func (r FileSearchToolParam) implementsBetaThreadNewAndRunParamsToolUnion() {}
// Overrides for the file search tool.
type FileSearchToolFileSearchParam struct {
// The maximum number of results the file search tool should output. The default is
- // 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1
- // and 50 inclusive.
+ // 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between
+ // 1 and 50 inclusive.
//
// Note that the file search tool may output fewer than `max_num_results` results.
// See the
@@ -2001,22 +1984,6 @@ type BetaAssistantNewParams struct {
Metadata param.Field[interface{}] `json:"metadata"`
// The name of the assistant. The maximum length is 256 characters.
Name param.Field[string] `json:"name"`
- // Specifies the format that the model must output. Compatible with
- // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
- // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- //
- // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
- // message the model generates is valid JSON.
- //
- // **Important:** when using JSON mode, you **must** also instruct the model to
- // produce JSON yourself via a system or user message. Without this, the model may
- // generate an unending stream of whitespace until the generation reaches the token
- // limit, resulting in a long-running and seemingly "stuck" request. Also note that
- // the message content may be partially cut off if `finish_reason="length"`, which
- // indicates the generation exceeded `max_tokens` or the conversation exceeded the
- // max context length.
- ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"`
// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
// make the output more random, while lower values like 0.2 will make it more
// focused and deterministic.
@@ -2234,22 +2201,6 @@ type BetaAssistantUpdateParams struct {
Model param.Field[string] `json:"model"`
// The name of the assistant. The maximum length is 256 characters.
Name param.Field[string] `json:"name"`
- // Specifies the format that the model must output. Compatible with
- // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
- // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- //
- // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
- // message the model generates is valid JSON.
- //
- // **Important:** when using JSON mode, you **must** also instruct the model to
- // produce JSON yourself via a system or user message. Without this, the model may
- // generate an unending stream of whitespace until the generation reaches the token
- // limit, resulting in a long-running and seemingly "stuck" request. Also note that
- // the message content may be partially cut off if `finish_reason="length"`, which
- // indicates the generation exceeded `max_tokens` or the conversation exceeded the
- // max context length.
- ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"`
// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
// make the output more random, while lower values like 0.2 will make it more
// focused and deterministic.
@@ -2335,7 +2286,7 @@ type BetaAssistantListParams struct {
// `url.Values`.
func (r BetaAssistantListParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/betaassistant_test.go b/betaassistant_test.go
index 3082757..bf60a21 100644
--- a/betaassistant_test.go
+++ b/betaassistant_test.go
@@ -26,13 +26,12 @@ func TestBetaAssistantNewWithOptionalParams(t *testing.T) {
option.WithAPIKey("My API Key"),
)
_, err := client.Beta.Assistants.New(context.TODO(), openai.BetaAssistantNewParams{
- Model: openai.F(openai.ChatModelGPT4o),
- Description: openai.F("description"),
- Instructions: openai.F("instructions"),
- Metadata: openai.F[any](map[string]interface{}{}),
- Name: openai.F("name"),
- ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)),
- Temperature: openai.F(1.000000),
+ Model: openai.F(openai.ChatModelGPT4o),
+ Description: openai.F("description"),
+ Instructions: openai.F("instructions"),
+ Metadata: openai.F[any](map[string]interface{}{}),
+ Name: openai.F("name"),
+ Temperature: openai.F(1.000000),
ToolResources: openai.F(openai.BetaAssistantNewParamsToolResources{
CodeInterpreter: openai.F(openai.BetaAssistantNewParamsToolResourcesCodeInterpreter{
FileIDs: openai.F([]string{"string", "string", "string"}),
@@ -104,13 +103,12 @@ func TestBetaAssistantUpdateWithOptionalParams(t *testing.T) {
context.TODO(),
"assistant_id",
openai.BetaAssistantUpdateParams{
- Description: openai.F("description"),
- Instructions: openai.F("instructions"),
- Metadata: openai.F[any](map[string]interface{}{}),
- Model: openai.F("model"),
- Name: openai.F("name"),
- ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)),
- Temperature: openai.F(1.000000),
+ Description: openai.F("description"),
+ Instructions: openai.F("instructions"),
+ Metadata: openai.F[any](map[string]interface{}{}),
+ Model: openai.F("model"),
+ Name: openai.F("name"),
+ Temperature: openai.F(1.000000),
ToolResources: openai.F(openai.BetaAssistantUpdateParamsToolResources{
CodeInterpreter: openai.F(openai.BetaAssistantUpdateParamsToolResourcesCodeInterpreter{
FileIDs: openai.F([]string{"string", "string", "string"}),
diff --git a/betathread.go b/betathread.go
index 70587ae..1e2ef68 100644
--- a/betathread.go
+++ b/betathread.go
@@ -106,142 +106,6 @@ func (r *BetaThreadService) NewAndRunStreaming(ctx context.Context, body BetaThr
return ssestream.NewStream[AssistantStreamEvent](ssestream.NewDecoder(raw), err)
}
-// An object describing the expected output of the model. If `json_object` only
-// `function` type `tools` are allowed to be passed to the Run. If `text` the model
-// can return text or any value needed.
-type AssistantResponseFormat struct {
- // Must be one of `text` or `json_object`.
- Type AssistantResponseFormatType `json:"type"`
- JSON assistantResponseFormatJSON `json:"-"`
-}
-
-// assistantResponseFormatJSON contains the JSON metadata for the struct
-// [AssistantResponseFormat]
-type assistantResponseFormatJSON struct {
- Type apijson.Field
- raw string
- ExtraFields map[string]apijson.Field
-}
-
-func (r *AssistantResponseFormat) UnmarshalJSON(data []byte) (err error) {
- return apijson.UnmarshalRoot(data, r)
-}
-
-func (r assistantResponseFormatJSON) RawJSON() string {
- return r.raw
-}
-
-func (r AssistantResponseFormat) implementsAssistantResponseFormatOptionUnion() {}
-
-// Must be one of `text` or `json_object`.
-type AssistantResponseFormatType string
-
-const (
- AssistantResponseFormatTypeText AssistantResponseFormatType = "text"
- AssistantResponseFormatTypeJSONObject AssistantResponseFormatType = "json_object"
-)
-
-func (r AssistantResponseFormatType) IsKnown() bool {
- switch r {
- case AssistantResponseFormatTypeText, AssistantResponseFormatTypeJSONObject:
- return true
- }
- return false
-}
-
-// An object describing the expected output of the model. If `json_object` only
-// `function` type `tools` are allowed to be passed to the Run. If `text` the model
-// can return text or any value needed.
-type AssistantResponseFormatParam struct {
- // Must be one of `text` or `json_object`.
- Type param.Field[AssistantResponseFormatType] `json:"type"`
-}
-
-func (r AssistantResponseFormatParam) MarshalJSON() (data []byte, err error) {
- return apijson.MarshalRoot(r)
-}
-
-func (r AssistantResponseFormatParam) implementsAssistantResponseFormatOptionUnionParam() {}
-
-// Specifies the format that the model must output. Compatible with
-// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
-// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
-// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
-//
-// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
-// message the model generates is valid JSON.
-//
-// **Important:** when using JSON mode, you **must** also instruct the model to
-// produce JSON yourself via a system or user message. Without this, the model may
-// generate an unending stream of whitespace until the generation reaches the token
-// limit, resulting in a long-running and seemingly "stuck" request. Also note that
-// the message content may be partially cut off if `finish_reason="length"`, which
-// indicates the generation exceeded `max_tokens` or the conversation exceeded the
-// max context length.
-//
-// Union satisfied by [AssistantResponseFormatOptionString] or
-// [AssistantResponseFormat].
-type AssistantResponseFormatOptionUnion interface {
- implementsAssistantResponseFormatOptionUnion()
-}
-
-func init() {
- apijson.RegisterUnion(
- reflect.TypeOf((*AssistantResponseFormatOptionUnion)(nil)).Elem(),
- "",
- apijson.UnionVariant{
- TypeFilter: gjson.String,
- Type: reflect.TypeOf(AssistantResponseFormatOptionString("")),
- },
- apijson.UnionVariant{
- TypeFilter: gjson.JSON,
- Type: reflect.TypeOf(AssistantResponseFormat{}),
- },
- )
-}
-
-// `auto` is the default value
-type AssistantResponseFormatOptionString string
-
-const (
- AssistantResponseFormatOptionStringNone AssistantResponseFormatOptionString = "none"
- AssistantResponseFormatOptionStringAuto AssistantResponseFormatOptionString = "auto"
-)
-
-func (r AssistantResponseFormatOptionString) IsKnown() bool {
- switch r {
- case AssistantResponseFormatOptionStringNone, AssistantResponseFormatOptionStringAuto:
- return true
- }
- return false
-}
-
-func (r AssistantResponseFormatOptionString) implementsAssistantResponseFormatOptionUnion() {}
-
-func (r AssistantResponseFormatOptionString) implementsAssistantResponseFormatOptionUnionParam() {}
-
-// Specifies the format that the model must output. Compatible with
-// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
-// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
-// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
-//
-// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
-// message the model generates is valid JSON.
-//
-// **Important:** when using JSON mode, you **must** also instruct the model to
-// produce JSON yourself via a system or user message. Without this, the model may
-// generate an unending stream of whitespace until the generation reaches the token
-// limit, resulting in a long-running and seemingly "stuck" request. Also note that
-// the message content may be partially cut off if `finish_reason="length"`, which
-// indicates the generation exceeded `max_tokens` or the conversation exceeded the
-// max context length.
-//
-// Satisfied by [AssistantResponseFormatOptionString],
-// [AssistantResponseFormatParam].
-type AssistantResponseFormatOptionUnionParam interface {
- implementsAssistantResponseFormatOptionUnionParam()
-}
-
// Specifies a tool the model should use. Use to force the model to call a specific
// tool.
type AssistantToolChoice struct {
@@ -977,22 +841,6 @@ type BetaThreadNewAndRunParams struct {
// [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
// during tool use.
ParallelToolCalls param.Field[bool] `json:"parallel_tool_calls"`
- // Specifies the format that the model must output. Compatible with
- // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
- // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- //
- // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
- // message the model generates is valid JSON.
- //
- // **Important:** when using JSON mode, you **must** also instruct the model to
- // produce JSON yourself via a system or user message. Without this, the model may
- // generate an unending stream of whitespace until the generation reaches the token
- // limit, resulting in a long-running and seemingly "stuck" request. Also note that
- // the message content may be partially cut off if `finish_reason="length"`, which
- // indicates the generation exceeded `max_tokens` or the conversation exceeded the
- // max context length.
- ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"`
// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
// make the output more random, while lower values like 0.2 will make it more
// focused and deterministic.
diff --git a/betathread_test.go b/betathread_test.go
index 7a5baed..413ecce 100644
--- a/betathread_test.go
+++ b/betathread_test.go
@@ -250,7 +250,6 @@ func TestBetaThreadNewAndRunWithOptionalParams(t *testing.T) {
Metadata: openai.F[any](map[string]interface{}{}),
Model: openai.F(openai.ChatModelGPT4o),
ParallelToolCalls: openai.F(true),
- ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)),
Temperature: openai.F(1.000000),
Thread: openai.F(openai.BetaThreadNewAndRunParamsThread{
Messages: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessage{{
diff --git a/betathreadmessage.go b/betathreadmessage.go
index e093d8e..093bfd9 100644
--- a/betathreadmessage.go
+++ b/betathreadmessage.go
@@ -1333,6 +1333,7 @@ type MessageContent struct {
ImageFile ImageFile `json:"image_file"`
ImageURL ImageURL `json:"image_url"`
Text Text `json:"text"`
+ Refusal string `json:"refusal"`
JSON messageContentJSON `json:"-"`
union MessageContentUnion
}
@@ -1343,6 +1344,7 @@ type messageContentJSON struct {
ImageFile apijson.Field
ImageURL apijson.Field
Text apijson.Field
+ Refusal apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
@@ -1364,7 +1366,7 @@ func (r *MessageContent) UnmarshalJSON(data []byte) (err error) {
// specific types for more type safety.
//
// Possible runtime types of the union are [ImageFileContentBlock],
-// [ImageURLContentBlock], [TextContentBlock].
+// [ImageURLContentBlock], [TextContentBlock], [RefusalContentBlock].
func (r MessageContent) AsUnion() MessageContentUnion {
return r.union
}
@@ -1372,8 +1374,8 @@ func (r MessageContent) AsUnion() MessageContentUnion {
// References an image [File](https://platform.openai.com/docs/api-reference/files)
// in the content of a message.
//
-// Union satisfied by [ImageFileContentBlock], [ImageURLContentBlock] or
-// [TextContentBlock].
+// Union satisfied by [ImageFileContentBlock], [ImageURLContentBlock],
+// [TextContentBlock] or [RefusalContentBlock].
type MessageContentUnion interface {
implementsMessageContent()
}
@@ -1397,6 +1399,11 @@ func init() {
Type: reflect.TypeOf(TextContentBlock{}),
DiscriminatorValue: "text",
},
+ apijson.UnionVariant{
+ TypeFilter: gjson.JSON,
+ Type: reflect.TypeOf(RefusalContentBlock{}),
+ DiscriminatorValue: "refusal",
+ },
)
}
@@ -1407,11 +1414,12 @@ const (
MessageContentTypeImageFile MessageContentType = "image_file"
MessageContentTypeImageURL MessageContentType = "image_url"
MessageContentTypeText MessageContentType = "text"
+ MessageContentTypeRefusal MessageContentType = "refusal"
)
func (r MessageContentType) IsKnown() bool {
switch r {
- case MessageContentTypeImageFile, MessageContentTypeImageURL, MessageContentTypeText:
+ case MessageContentTypeImageFile, MessageContentTypeImageURL, MessageContentTypeText, MessageContentTypeRefusal:
return true
}
return false
@@ -1426,6 +1434,7 @@ type MessageContentDelta struct {
Type MessageContentDeltaType `json:"type,required"`
ImageFile ImageFileDelta `json:"image_file"`
Text TextDelta `json:"text"`
+ Refusal string `json:"refusal"`
ImageURL ImageURLDelta `json:"image_url"`
JSON messageContentDeltaJSON `json:"-"`
union MessageContentDeltaUnion
@@ -1438,6 +1447,7 @@ type messageContentDeltaJSON struct {
Type apijson.Field
ImageFile apijson.Field
Text apijson.Field
+ Refusal apijson.Field
ImageURL apijson.Field
raw string
ExtraFields map[string]apijson.Field
@@ -1460,7 +1470,7 @@ func (r *MessageContentDelta) UnmarshalJSON(data []byte) (err error) {
// specific types for more type safety.
//
// Possible runtime types of the union are [ImageFileDeltaBlock], [TextDeltaBlock],
-// [ImageURLDeltaBlock].
+// [RefusalDeltaBlock], [ImageURLDeltaBlock].
func (r MessageContentDelta) AsUnion() MessageContentDeltaUnion {
return r.union
}
@@ -1468,8 +1478,8 @@ func (r MessageContentDelta) AsUnion() MessageContentDeltaUnion {
// References an image [File](https://platform.openai.com/docs/api-reference/files)
// in the content of a message.
//
-// Union satisfied by [ImageFileDeltaBlock], [TextDeltaBlock] or
-// [ImageURLDeltaBlock].
+// Union satisfied by [ImageFileDeltaBlock], [TextDeltaBlock], [RefusalDeltaBlock]
+// or [ImageURLDeltaBlock].
type MessageContentDeltaUnion interface {
implementsMessageContentDelta()
}
@@ -1488,6 +1498,11 @@ func init() {
Type: reflect.TypeOf(TextDeltaBlock{}),
DiscriminatorValue: "text",
},
+ apijson.UnionVariant{
+ TypeFilter: gjson.JSON,
+ Type: reflect.TypeOf(RefusalDeltaBlock{}),
+ DiscriminatorValue: "refusal",
+ },
apijson.UnionVariant{
TypeFilter: gjson.JSON,
Type: reflect.TypeOf(ImageURLDeltaBlock{}),
@@ -1502,12 +1517,13 @@ type MessageContentDeltaType string
const (
MessageContentDeltaTypeImageFile MessageContentDeltaType = "image_file"
MessageContentDeltaTypeText MessageContentDeltaType = "text"
+ MessageContentDeltaTypeRefusal MessageContentDeltaType = "refusal"
MessageContentDeltaTypeImageURL MessageContentDeltaType = "image_url"
)
func (r MessageContentDeltaType) IsKnown() bool {
switch r {
- case MessageContentDeltaTypeImageFile, MessageContentDeltaTypeText, MessageContentDeltaTypeImageURL:
+ case MessageContentDeltaTypeImageFile, MessageContentDeltaTypeText, MessageContentDeltaTypeRefusal, MessageContentDeltaTypeImageURL:
return true
}
return false
@@ -1680,6 +1696,93 @@ func (r MessageDeltaEventObject) IsKnown() bool {
return false
}
+// The refusal content generated by the assistant.
+type RefusalContentBlock struct {
+ Refusal string `json:"refusal,required"`
+ // Always `refusal`.
+ Type RefusalContentBlockType `json:"type,required"`
+ JSON refusalContentBlockJSON `json:"-"`
+}
+
+// refusalContentBlockJSON contains the JSON metadata for the struct
+// [RefusalContentBlock]
+type refusalContentBlockJSON struct {
+ Refusal apijson.Field
+ Type apijson.Field
+ raw string
+ ExtraFields map[string]apijson.Field
+}
+
+func (r *RefusalContentBlock) UnmarshalJSON(data []byte) (err error) {
+ return apijson.UnmarshalRoot(data, r)
+}
+
+func (r refusalContentBlockJSON) RawJSON() string {
+ return r.raw
+}
+
+func (r RefusalContentBlock) implementsMessageContent() {}
+
+// Always `refusal`.
+type RefusalContentBlockType string
+
+const (
+ RefusalContentBlockTypeRefusal RefusalContentBlockType = "refusal"
+)
+
+func (r RefusalContentBlockType) IsKnown() bool {
+ switch r {
+ case RefusalContentBlockTypeRefusal:
+ return true
+ }
+ return false
+}
+
+// The refusal content that is part of a message.
+type RefusalDeltaBlock struct {
+ // The index of the refusal part in the message.
+ Index int64 `json:"index,required"`
+ // Always `refusal`.
+ Type RefusalDeltaBlockType `json:"type,required"`
+ Refusal string `json:"refusal"`
+ JSON refusalDeltaBlockJSON `json:"-"`
+}
+
+// refusalDeltaBlockJSON contains the JSON metadata for the struct
+// [RefusalDeltaBlock]
+type refusalDeltaBlockJSON struct {
+ Index apijson.Field
+ Type apijson.Field
+ Refusal apijson.Field
+ raw string
+ ExtraFields map[string]apijson.Field
+}
+
+func (r *RefusalDeltaBlock) UnmarshalJSON(data []byte) (err error) {
+ return apijson.UnmarshalRoot(data, r)
+}
+
+func (r refusalDeltaBlockJSON) RawJSON() string {
+ return r.raw
+}
+
+func (r RefusalDeltaBlock) implementsMessageContentDelta() {}
+
+// Always `refusal`.
+type RefusalDeltaBlockType string
+
+const (
+ RefusalDeltaBlockTypeRefusal RefusalDeltaBlockType = "refusal"
+)
+
+func (r RefusalDeltaBlockType) IsKnown() bool {
+ switch r {
+ case RefusalDeltaBlockTypeRefusal:
+ return true
+ }
+ return false
+}
+
type Text struct {
Annotations []Annotation `json:"annotations,required"`
// The data that makes up the text.
@@ -2008,7 +2111,7 @@ type BetaThreadMessageListParams struct {
// `url.Values`.
func (r BetaThreadMessageListParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/betathreadrun.go b/betathreadrun.go
index 2d08951..da4833c 100644
--- a/betathreadrun.go
+++ b/betathreadrun.go
@@ -313,22 +313,6 @@ type Run struct {
// Details on the action required to continue the run. Will be `null` if no action
// is required.
RequiredAction RunRequiredAction `json:"required_action,required,nullable"`
- // Specifies the format that the model must output. Compatible with
- // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
- // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- //
- // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
- // message the model generates is valid JSON.
- //
- // **Important:** when using JSON mode, you **must** also instruct the model to
- // produce JSON yourself via a system or user message. Without this, the model may
- // generate an unending stream of whitespace until the generation reaches the token
- // limit, resulting in a long-running and seemingly "stuck" request. Also note that
- // the message content may be partially cut off if `finish_reason="length"`, which
- // indicates the generation exceeded `max_tokens` or the conversation exceeded the
- // max context length.
- ResponseFormat AssistantResponseFormatOptionUnion `json:"response_format,required,nullable"`
// The Unix timestamp (in seconds) for when the run was started.
StartedAt int64 `json:"started_at,required,nullable"`
// The status of the run, which can be either `queued`, `in_progress`,
@@ -382,7 +366,6 @@ type runJSON struct {
Object apijson.Field
ParallelToolCalls apijson.Field
RequiredAction apijson.Field
- ResponseFormat apijson.Field
StartedAt apijson.Field
Status apijson.Field
ThreadID apijson.Field
@@ -713,22 +696,6 @@ type BetaThreadRunNewParams struct {
// [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
// during tool use.
ParallelToolCalls param.Field[bool] `json:"parallel_tool_calls"`
- // Specifies the format that the model must output. Compatible with
- // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
- // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- //
- // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
- // message the model generates is valid JSON.
- //
- // **Important:** when using JSON mode, you **must** also instruct the model to
- // produce JSON yourself via a system or user message. Without this, the model may
- // generate an unending stream of whitespace until the generation reaches the token
- // limit, resulting in a long-running and seemingly "stuck" request. Also note that
- // the message content may be partially cut off if `finish_reason="length"`, which
- // indicates the generation exceeded `max_tokens` or the conversation exceeded the
- // max context length.
- ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"`
// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
// make the output more random, while lower values like 0.2 will make it more
// focused and deterministic.
@@ -960,7 +927,7 @@ type BetaThreadRunListParams struct {
// `url.Values`.
func (r BetaThreadRunListParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/betathreadrun_test.go b/betathreadrun_test.go
index e2976e8..636ded8 100644
--- a/betathreadrun_test.go
+++ b/betathreadrun_test.go
@@ -135,7 +135,6 @@ func TestBetaThreadRunNewWithOptionalParams(t *testing.T) {
Metadata: openai.F[any](map[string]interface{}{}),
Model: openai.F(openai.ChatModelGPT4o),
ParallelToolCalls: openai.F(true),
- ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)),
Temperature: openai.F(1.000000),
ToolChoice: openai.F[openai.AssistantToolChoiceOptionUnionParam](openai.AssistantToolChoiceOptionString(openai.AssistantToolChoiceOptionStringNone)),
Tools: openai.F([]openai.AssistantToolUnionParam{openai.CodeInterpreterToolParam{
diff --git a/betathreadrunstep.go b/betathreadrunstep.go
index e383e4e..59779b5 100644
--- a/betathreadrunstep.go
+++ b/betathreadrunstep.go
@@ -1777,7 +1777,7 @@ type BetaThreadRunStepListParams struct {
// `url.Values`.
func (r BetaThreadRunStepListParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/betavectorstore.go b/betavectorstore.go
index be6dcad..391387f 100644
--- a/betavectorstore.go
+++ b/betavectorstore.go
@@ -546,7 +546,7 @@ type BetaVectorStoreListParams struct {
// `url.Values`.
func (r BetaVectorStoreListParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/betavectorstorefile.go b/betavectorstorefile.go
index b04e15b..1b46000 100644
--- a/betavectorstorefile.go
+++ b/betavectorstorefile.go
@@ -195,15 +195,14 @@ func (r vectorStoreFileLastErrorJSON) RawJSON() string {
type VectorStoreFileLastErrorCode string
const (
- VectorStoreFileLastErrorCodeInternalError VectorStoreFileLastErrorCode = "internal_error"
- VectorStoreFileLastErrorCodeFileNotFound VectorStoreFileLastErrorCode = "file_not_found"
- VectorStoreFileLastErrorCodeParsingError VectorStoreFileLastErrorCode = "parsing_error"
- VectorStoreFileLastErrorCodeUnhandledMimeType VectorStoreFileLastErrorCode = "unhandled_mime_type"
+ VectorStoreFileLastErrorCodeServerError VectorStoreFileLastErrorCode = "server_error"
+ VectorStoreFileLastErrorCodeUnsupportedFile VectorStoreFileLastErrorCode = "unsupported_file"
+ VectorStoreFileLastErrorCodeInvalidFile VectorStoreFileLastErrorCode = "invalid_file"
)
func (r VectorStoreFileLastErrorCode) IsKnown() bool {
switch r {
- case VectorStoreFileLastErrorCodeInternalError, VectorStoreFileLastErrorCodeFileNotFound, VectorStoreFileLastErrorCodeParsingError, VectorStoreFileLastErrorCodeUnhandledMimeType:
+ case VectorStoreFileLastErrorCodeServerError, VectorStoreFileLastErrorCodeUnsupportedFile, VectorStoreFileLastErrorCodeInvalidFile:
return true
}
return false
@@ -629,7 +628,7 @@ type BetaVectorStoreFileListParams struct {
// `url.Values`.
func (r BetaVectorStoreFileListParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/betavectorstorefilebatch.go b/betavectorstorefilebatch.go
index 12047a4..6545ffd 100644
--- a/betavectorstorefilebatch.go
+++ b/betavectorstorefilebatch.go
@@ -374,7 +374,7 @@ type BetaVectorStoreFileBatchListFilesParams struct {
// as `url.Values`.
func (r BetaVectorStoreFileBatchListFilesParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/chat.go b/chat.go
index 404be7a..e20be1e 100644
--- a/chat.go
+++ b/chat.go
@@ -31,6 +31,7 @@ type ChatModel = string
const (
ChatModelGPT4o ChatModel = "gpt-4o"
+ ChatModelGPT4o2024_08_06 ChatModel = "gpt-4o-2024-08-06"
ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13"
ChatModelGPT4oMini ChatModel = "gpt-4o-mini"
ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18"
diff --git a/chatcompletion.go b/chatcompletion.go
index 4dcd393..664eae4 100644
--- a/chatcompletion.go
+++ b/chatcompletion.go
@@ -166,7 +166,9 @@ func (r ChatCompletionChoicesFinishReason) IsKnown() bool {
// Log probability information for the choice.
type ChatCompletionChoicesLogprobs struct {
// A list of message content tokens with log probability information.
- Content []ChatCompletionTokenLogprob `json:"content,required,nullable"`
+ Content []ChatCompletionTokenLogprob `json:"content,required,nullable"`
+ // A list of message refusal tokens with log probability information.
+ Refusal []ChatCompletionTokenLogprob `json:"refusal,required,nullable"`
JSON chatCompletionChoicesLogprobsJSON `json:"-"`
}
@@ -174,6 +176,7 @@ type ChatCompletionChoicesLogprobs struct {
// [ChatCompletionChoicesLogprobs]
type chatCompletionChoicesLogprobsJSON struct {
Content apijson.Field
+ Refusal apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
@@ -223,13 +226,15 @@ type ChatCompletionAssistantMessageParam struct {
Role param.Field[ChatCompletionAssistantMessageParamRole] `json:"role,required"`
// The contents of the assistant message. Required unless `tool_calls` or
// `function_call` is specified.
- Content param.Field[string] `json:"content"`
+ Content param.Field[ChatCompletionAssistantMessageParamContentUnion] `json:"content"`
// Deprecated and replaced by `tool_calls`. The name and arguments of a function
// that should be called, as generated by the model.
FunctionCall param.Field[ChatCompletionAssistantMessageParamFunctionCall] `json:"function_call"`
// An optional name for the participant. Provides the model information to
// differentiate between participants of the same role.
Name param.Field[string] `json:"name"`
+ // The refusal message by the assistant.
+ Refusal param.Field[string] `json:"refusal"`
// The tool calls generated by the model, such as function calls.
ToolCalls param.Field[[]ChatCompletionMessageToolCallParam] `json:"tool_calls"`
}
@@ -255,6 +260,59 @@ func (r ChatCompletionAssistantMessageParamRole) IsKnown() bool {
return false
}
+// The contents of the assistant message. Required unless `tool_calls` or
+// `function_call` is specified.
+//
+// Satisfied by [shared.UnionString],
+// [ChatCompletionAssistantMessageParamContentArrayOfContentParts].
+type ChatCompletionAssistantMessageParamContentUnion interface {
+ ImplementsChatCompletionAssistantMessageParamContentUnion()
+}
+
+type ChatCompletionAssistantMessageParamContentArrayOfContentParts []ChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem
+
+func (r ChatCompletionAssistantMessageParamContentArrayOfContentParts) ImplementsChatCompletionAssistantMessageParamContentUnion() {
+}
+
+type ChatCompletionAssistantMessageParamContentArrayOfContentPart struct {
+ // The type of the content part.
+ Type param.Field[ChatCompletionAssistantMessageParamContentArrayOfContentPartsType] `json:"type,required"`
+ // The text content.
+ Text param.Field[string] `json:"text"`
+ // The refusal message generated by the model.
+ Refusal param.Field[string] `json:"refusal"`
+}
+
+func (r ChatCompletionAssistantMessageParamContentArrayOfContentPart) MarshalJSON() (data []byte, err error) {
+ return apijson.MarshalRoot(r)
+}
+
+func (r ChatCompletionAssistantMessageParamContentArrayOfContentPart) implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem() {
+}
+
+// Satisfied by [ChatCompletionContentPartTextParam],
+// [ChatCompletionContentPartRefusalParam],
+// [ChatCompletionAssistantMessageParamContentArrayOfContentPart].
+type ChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem interface {
+ implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem()
+}
+
+// The type of the content part.
+type ChatCompletionAssistantMessageParamContentArrayOfContentPartsType string
+
+const (
+ ChatCompletionAssistantMessageParamContentArrayOfContentPartsTypeText ChatCompletionAssistantMessageParamContentArrayOfContentPartsType = "text"
+ ChatCompletionAssistantMessageParamContentArrayOfContentPartsTypeRefusal ChatCompletionAssistantMessageParamContentArrayOfContentPartsType = "refusal"
+)
+
+func (r ChatCompletionAssistantMessageParamContentArrayOfContentPartsType) IsKnown() bool {
+ switch r {
+ case ChatCompletionAssistantMessageParamContentArrayOfContentPartsTypeText, ChatCompletionAssistantMessageParamContentArrayOfContentPartsTypeRefusal:
+ return true
+ }
+ return false
+}
+
// Deprecated and replaced by `tool_calls`. The name and arguments of a function
// that should be called, as generated by the model.
type ChatCompletionAssistantMessageParamFunctionCall struct {
@@ -368,6 +426,8 @@ type ChatCompletionChunkChoicesDelta struct {
// Deprecated and replaced by `tool_calls`. The name and arguments of a function
// that should be called, as generated by the model.
FunctionCall ChatCompletionChunkChoicesDeltaFunctionCall `json:"function_call"`
+ // The refusal message generated by the model.
+ Refusal string `json:"refusal,nullable"`
// The role of the author of this message.
Role ChatCompletionChunkChoicesDeltaRole `json:"role"`
ToolCalls []ChatCompletionChunkChoicesDeltaToolCall `json:"tool_calls"`
@@ -379,6 +439,7 @@ type ChatCompletionChunkChoicesDelta struct {
type chatCompletionChunkChoicesDeltaJSON struct {
Content apijson.Field
FunctionCall apijson.Field
+ Refusal apijson.Field
Role apijson.Field
ToolCalls apijson.Field
raw string
@@ -540,7 +601,9 @@ func (r ChatCompletionChunkChoicesFinishReason) IsKnown() bool {
// Log probability information for the choice.
type ChatCompletionChunkChoicesLogprobs struct {
// A list of message content tokens with log probability information.
- Content []ChatCompletionTokenLogprob `json:"content,required,nullable"`
+ Content []ChatCompletionTokenLogprob `json:"content,required,nullable"`
+ // A list of message refusal tokens with log probability information.
+ Refusal []ChatCompletionTokenLogprob `json:"refusal,required,nullable"`
JSON chatCompletionChunkChoicesLogprobsJSON `json:"-"`
}
@@ -548,6 +611,7 @@ type ChatCompletionChunkChoicesLogprobs struct {
// [ChatCompletionChunkChoicesLogprobs]
type chatCompletionChunkChoicesLogprobsJSON struct {
Content apijson.Field
+ Refusal apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
@@ -685,6 +749,38 @@ func (r ChatCompletionContentPartImageType) IsKnown() bool {
return false
}
+type ChatCompletionContentPartRefusalParam struct {
+ // The refusal message generated by the model.
+ Refusal param.Field[string] `json:"refusal,required"`
+ // The type of the content part.
+ Type param.Field[ChatCompletionContentPartRefusalType] `json:"type,required"`
+}
+
+func (r ChatCompletionContentPartRefusalParam) MarshalJSON() (data []byte, err error) {
+ return apijson.MarshalRoot(r)
+}
+
+func (r ChatCompletionContentPartRefusalParam) implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem() {
+}
+
+func (r ChatCompletionContentPartRefusalParam) implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnion() {
+}
+
+// The type of the content part.
+type ChatCompletionContentPartRefusalType string
+
+const (
+ ChatCompletionContentPartRefusalTypeRefusal ChatCompletionContentPartRefusalType = "refusal"
+)
+
+func (r ChatCompletionContentPartRefusalType) IsKnown() bool {
+ switch r {
+ case ChatCompletionContentPartRefusalTypeRefusal:
+ return true
+ }
+ return false
+}
+
type ChatCompletionContentPartTextParam struct {
// The text content.
Text param.Field[string] `json:"text,required"`
@@ -696,8 +792,14 @@ func (r ChatCompletionContentPartTextParam) MarshalJSON() (data []byte, err erro
return apijson.MarshalRoot(r)
}
+func (r ChatCompletionContentPartTextParam) implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem() {
+}
+
func (r ChatCompletionContentPartTextParam) implementsChatCompletionContentPartUnionParam() {}
+func (r ChatCompletionContentPartTextParam) implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnion() {
+}
+
// The type of the content part.
type ChatCompletionContentPartTextType string
@@ -760,6 +862,8 @@ func (r ChatCompletionFunctionMessageParamRole) IsKnown() bool {
type ChatCompletionMessage struct {
// The contents of the message.
Content string `json:"content,required,nullable"`
+ // The refusal message generated by the model.
+ Refusal string `json:"refusal,required,nullable"`
// The role of the author of this message.
Role ChatCompletionMessageRole `json:"role,required"`
// Deprecated and replaced by `tool_calls`. The name and arguments of a function
@@ -774,6 +878,7 @@ type ChatCompletionMessage struct {
// [ChatCompletionMessage]
type chatCompletionMessageJSON struct {
Content apijson.Field
+ Refusal apijson.Field
Role apijson.Field
FunctionCall apijson.Field
ToolCalls apijson.Field
@@ -840,7 +945,9 @@ type ChatCompletionMessageParam struct {
Role param.Field[ChatCompletionMessageParamRole] `json:"role,required"`
// An optional name for the participant. Provides the model information to
// differentiate between participants of the same role.
- Name param.Field[string] `json:"name"`
+ Name param.Field[string] `json:"name"`
+ // The refusal message by the assistant.
+ Refusal param.Field[string] `json:"refusal"`
ToolCalls param.Field[interface{}] `json:"tool_calls,required"`
FunctionCall param.Field[interface{}] `json:"function_call,required"`
// Tool call that this message is responding to.
@@ -1033,7 +1140,7 @@ func (r ChatCompletionStreamOptionsParam) MarshalJSON() (data []byte, err error)
type ChatCompletionSystemMessageParam struct {
// The contents of the system message.
- Content param.Field[string] `json:"content,required"`
+ Content param.Field[ChatCompletionSystemMessageParamContentUnion] `json:"content,required"`
// The role of the messages author, in this case `system`.
Role param.Field[ChatCompletionSystemMessageParamRole] `json:"role,required"`
// An optional name for the participant. Provides the model information to
@@ -1047,6 +1154,19 @@ func (r ChatCompletionSystemMessageParam) MarshalJSON() (data []byte, err error)
func (r ChatCompletionSystemMessageParam) implementsChatCompletionMessageParamUnion() {}
+// The contents of the system message.
+//
+// Satisfied by [shared.UnionString],
+// [ChatCompletionSystemMessageParamContentArrayOfContentParts].
+type ChatCompletionSystemMessageParamContentUnion interface {
+ ImplementsChatCompletionSystemMessageParamContentUnion()
+}
+
+type ChatCompletionSystemMessageParamContentArrayOfContentParts []ChatCompletionContentPartTextParam
+
+func (r ChatCompletionSystemMessageParamContentArrayOfContentParts) ImplementsChatCompletionSystemMessageParamContentUnion() {
+}
+
// The role of the messages author, in this case `system`.
type ChatCompletionSystemMessageParamRole string
@@ -1197,7 +1317,7 @@ func (r ChatCompletionToolChoiceOptionString) implementsChatCompletionToolChoice
type ChatCompletionToolMessageParam struct {
// The contents of the tool message.
- Content param.Field[string] `json:"content,required"`
+ Content param.Field[ChatCompletionToolMessageParamContentUnion] `json:"content,required"`
// The role of the messages author, in this case `tool`.
Role param.Field[ChatCompletionToolMessageParamRole] `json:"role,required"`
// Tool call that this message is responding to.
@@ -1210,6 +1330,19 @@ func (r ChatCompletionToolMessageParam) MarshalJSON() (data []byte, err error) {
func (r ChatCompletionToolMessageParam) implementsChatCompletionMessageParamUnion() {}
+// The contents of the tool message.
+//
+// Satisfied by [shared.UnionString],
+// [ChatCompletionToolMessageParamContentArrayOfContentParts].
+type ChatCompletionToolMessageParamContentUnion interface {
+ ImplementsChatCompletionToolMessageParamContentUnion()
+}
+
+type ChatCompletionToolMessageParamContentArrayOfContentParts []ChatCompletionContentPartTextParam
+
+func (r ChatCompletionToolMessageParamContentArrayOfContentParts) ImplementsChatCompletionToolMessageParamContentUnion() {
+}
+
// The role of the messages author, in this case `tool`.
type ChatCompletionToolMessageParamRole string
@@ -1334,6 +1467,8 @@ type ChatCompletionNewParams struct {
// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
PresencePenalty param.Field[float64] `json:"presence_penalty"`
// An object specifying the format that the model must output. Compatible with
+ // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+ // [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
//
@@ -1347,7 +1482,7 @@ type ChatCompletionNewParams struct {
// the message content may be partially cut off if `finish_reason="length"`, which
// indicates the generation exceeded `max_tokens` or the conversation exceeded the
// max context length.
- ResponseFormat param.Field[ChatCompletionNewParamsResponseFormat] `json:"response_format"`
+ ResponseFormat param.Field[ChatCompletionNewParamsResponseFormatUnion] `json:"response_format"`
// This feature is in Beta. If specified, our system will make a best effort to
// sample deterministically, such that repeated requests with the same `seed` and
// parameters should return the same result. Determinism is not guaranteed, and you
@@ -1470,6 +1605,8 @@ func (r ChatCompletionNewParamsFunction) MarshalJSON() (data []byte, err error)
}
// An object specifying the format that the model must output. Compatible with
+// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
//
@@ -1484,25 +1621,54 @@ func (r ChatCompletionNewParamsFunction) MarshalJSON() (data []byte, err error)
// indicates the generation exceeded `max_tokens` or the conversation exceeded the
// max context length.
type ChatCompletionNewParamsResponseFormat struct {
- // Must be one of `text` or `json_object`.
- Type param.Field[ChatCompletionNewParamsResponseFormatType] `json:"type"`
+ // The type of response format being defined: `text`
+ Type param.Field[ChatCompletionNewParamsResponseFormatType] `json:"type,required"`
+ JSONSchema param.Field[interface{}] `json:"json_schema,required"`
}
func (r ChatCompletionNewParamsResponseFormat) MarshalJSON() (data []byte, err error) {
return apijson.MarshalRoot(r)
}
-// Must be one of `text` or `json_object`.
+func (r ChatCompletionNewParamsResponseFormat) ImplementsChatCompletionNewParamsResponseFormatUnion() {
+}
+
+// An object specifying the format that the model must output. Compatible with
+// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
+// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+//
+// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+// message the model generates is valid JSON.
+//
+// **Important:** when using JSON mode, you **must** also instruct the model to
+// produce JSON yourself via a system or user message. Without this, the model may
+// generate an unending stream of whitespace until the generation reaches the token
+// limit, resulting in a long-running and seemingly "stuck" request. Also note that
+// the message content may be partially cut off if `finish_reason="length"`, which
+// indicates the generation exceeded `max_tokens` or the conversation exceeded the
+// max context length.
+//
+// Satisfied by [shared.ResponseFormatTextParam],
+// [shared.ResponseFormatJSONObjectParam], [shared.ResponseFormatJSONSchemaParam],
+// [ChatCompletionNewParamsResponseFormat].
+type ChatCompletionNewParamsResponseFormatUnion interface {
+ ImplementsChatCompletionNewParamsResponseFormatUnion()
+}
+
+// The type of response format being defined: `text`
type ChatCompletionNewParamsResponseFormatType string
const (
ChatCompletionNewParamsResponseFormatTypeText ChatCompletionNewParamsResponseFormatType = "text"
ChatCompletionNewParamsResponseFormatTypeJSONObject ChatCompletionNewParamsResponseFormatType = "json_object"
+ ChatCompletionNewParamsResponseFormatTypeJSONSchema ChatCompletionNewParamsResponseFormatType = "json_schema"
)
func (r ChatCompletionNewParamsResponseFormatType) IsKnown() bool {
switch r {
- case ChatCompletionNewParamsResponseFormatTypeText, ChatCompletionNewParamsResponseFormatTypeJSONObject:
+ case ChatCompletionNewParamsResponseFormatTypeText, ChatCompletionNewParamsResponseFormatTypeJSONObject, ChatCompletionNewParamsResponseFormatTypeJSONSchema:
return true
}
return false
diff --git a/chatcompletion_test.go b/chatcompletion_test.go
index e93623d..6c0be39 100644
--- a/chatcompletion_test.go
+++ b/chatcompletion_test.go
@@ -28,7 +28,7 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) {
)
_, err := client.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{
Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionSystemMessageParam{
- Content: openai.F("content"),
+ Content: openai.F[openai.ChatCompletionSystemMessageParamContentUnion](shared.UnionString("string")),
Role: openai.F(openai.ChatCompletionSystemMessageParamRoleSystem),
Name: openai.F("name"),
}}),
@@ -50,8 +50,8 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) {
N: openai.F(int64(1)),
ParallelToolCalls: openai.F(true),
PresencePenalty: openai.F(-2.000000),
- ResponseFormat: openai.F(openai.ChatCompletionNewParamsResponseFormat{
- Type: openai.F(openai.ChatCompletionNewParamsResponseFormatTypeJSONObject),
+ ResponseFormat: openai.F[openai.ChatCompletionNewParamsResponseFormatUnion](shared.ResponseFormatTextParam{
+ Type: openai.F(shared.ResponseFormatTextTypeText),
}),
Seed: openai.F(int64(-9007199254740991)),
ServiceTier: openai.F(openai.ChatCompletionNewParamsServiceTierAuto),
@@ -69,6 +69,7 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) {
Parameters: openai.F(shared.FunctionParameters{
"foo": "bar",
}),
+ Strict: openai.F(true),
}),
}, {
Type: openai.F(openai.ChatCompletionToolTypeFunction),
@@ -78,6 +79,7 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) {
Parameters: openai.F(shared.FunctionParameters{
"foo": "bar",
}),
+ Strict: openai.F(true),
}),
}, {
Type: openai.F(openai.ChatCompletionToolTypeFunction),
@@ -87,6 +89,7 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) {
Parameters: openai.F(shared.FunctionParameters{
"foo": "bar",
}),
+ Strict: openai.F(true),
}),
}}),
TopLogprobs: openai.F(int64(0)),
diff --git a/file.go b/file.go
index 76fda1e..25d90bb 100644
--- a/file.go
+++ b/file.go
@@ -331,7 +331,7 @@ type FileListParams struct {
// URLQuery serializes [FileListParams]'s query parameters as `url.Values`.
func (r FileListParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/finetuningjob.go b/finetuningjob.go
index fe162ed..be03054 100644
--- a/finetuningjob.go
+++ b/finetuningjob.go
@@ -498,7 +498,7 @@ func (r fineTuningJobWandbIntegrationJSON) RawJSON() string {
type FineTuningJobNewParams struct {
// The name of the model to fine-tune. You can select one of the
- // [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
+ // [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
Model param.Field[FineTuningJobNewParamsModel] `json:"model,required"`
// The ID of an uploaded file that contains training data.
//
@@ -528,7 +528,7 @@ type FineTuningJobNewParams struct {
// name.
//
// For example, a `suffix` of "custom-model-name" would produce a model name like
- // `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
+ // `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
Suffix param.Field[string] `json:"suffix"`
// The ID of an uploaded file that contains validation data.
//
@@ -555,11 +555,12 @@ const (
FineTuningJobNewParamsModelBabbage002 FineTuningJobNewParamsModel = "babbage-002"
FineTuningJobNewParamsModelDavinci002 FineTuningJobNewParamsModel = "davinci-002"
FineTuningJobNewParamsModelGPT3_5Turbo FineTuningJobNewParamsModel = "gpt-3.5-turbo"
+ FineTuningJobNewParamsModelGPT4oMini FineTuningJobNewParamsModel = "gpt-4o-mini"
)
func (r FineTuningJobNewParamsModel) IsKnown() bool {
switch r {
- case FineTuningJobNewParamsModelBabbage002, FineTuningJobNewParamsModelDavinci002, FineTuningJobNewParamsModelGPT3_5Turbo:
+ case FineTuningJobNewParamsModelBabbage002, FineTuningJobNewParamsModelDavinci002, FineTuningJobNewParamsModelGPT3_5Turbo, FineTuningJobNewParamsModelGPT4oMini:
return true
}
return false
@@ -727,7 +728,7 @@ type FineTuningJobListParams struct {
// `url.Values`.
func (r FineTuningJobListParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
@@ -743,7 +744,7 @@ type FineTuningJobListEventsParams struct {
// `url.Values`.
func (r FineTuningJobListEventsParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/finetuningjobcheckpoint.go b/finetuningjobcheckpoint.go
index 6279e24..4ca6383 100644
--- a/finetuningjobcheckpoint.go
+++ b/finetuningjobcheckpoint.go
@@ -165,7 +165,7 @@ type FineTuningJobCheckpointListParams struct {
// `url.Values`.
func (r FineTuningJobCheckpointListParams) URLQuery() (v url.Values) {
return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
- ArrayFormat: apiquery.ArrayQueryFormatComma,
+ ArrayFormat: apiquery.ArrayQueryFormatBrackets,
NestedFormat: apiquery.NestedQueryFormatBrackets,
})
}
diff --git a/model_test.go b/model_test.go
index 205b5d0..194b16f 100644
--- a/model_test.go
+++ b/model_test.go
@@ -25,7 +25,7 @@ func TestModelGet(t *testing.T) {
option.WithBaseURL(baseURL),
option.WithAPIKey("My API Key"),
)
- _, err := client.Models.Get(context.TODO(), "gpt-3.5-turbo")
+ _, err := client.Models.Get(context.TODO(), "gpt-4o-mini")
if err != nil {
var apierr *openai.Error
if errors.As(err, &apierr) {
@@ -69,7 +69,7 @@ func TestModelDelete(t *testing.T) {
option.WithBaseURL(baseURL),
option.WithAPIKey("My API Key"),
)
- _, err := client.Models.Delete(context.TODO(), "ft:gpt-3.5-turbo:acemeco:suffix:abc123")
+ _, err := client.Models.Delete(context.TODO(), "ft:gpt-4o-mini:acemeco:suffix:abc123")
if err != nil {
var apierr *openai.Error
if errors.As(err, &apierr) {
diff --git a/shared/shared.go b/shared/shared.go
index 4c0f3ca..18331e2 100644
--- a/shared/shared.go
+++ b/shared/shared.go
@@ -47,8 +47,14 @@ type FunctionDefinition struct {
// documentation about the format.
//
// Omitting `parameters` defines a function with an empty parameter list.
- Parameters FunctionParameters `json:"parameters"`
- JSON functionDefinitionJSON `json:"-"`
+ Parameters FunctionParameters `json:"parameters"`
+ // Whether to enable strict schema adherence when generating the function call. If
+ // set to true, the model will follow the exact schema defined in the `parameters`
+ // field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
+ // more about Structured Outputs in the
+ // [function calling guide](docs/guides/function-calling).
+ Strict bool `json:"strict,nullable"`
+ JSON functionDefinitionJSON `json:"-"`
}
// functionDefinitionJSON contains the JSON metadata for the struct
@@ -57,6 +63,7 @@ type functionDefinitionJSON struct {
Name apijson.Field
Description apijson.Field
Parameters apijson.Field
+ Strict apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
@@ -84,6 +91,12 @@ type FunctionDefinitionParam struct {
//
// Omitting `parameters` defines a function with an empty parameter list.
Parameters param.Field[FunctionParameters] `json:"parameters"`
+ // Whether to enable strict schema adherence when generating the function call. If
+ // set to true, the model will follow the exact schema defined in the `parameters`
+ // field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
+ // more about Structured Outputs in the
+ // [function calling guide](docs/guides/function-calling).
+ Strict param.Field[bool] `json:"strict"`
}
func (r FunctionDefinitionParam) MarshalJSON() (data []byte, err error) {
@@ -91,3 +104,103 @@ func (r FunctionDefinitionParam) MarshalJSON() (data []byte, err error) {
}
type FunctionParameters map[string]interface{}
+
+type ResponseFormatJSONObjectParam struct {
+ // The type of response format being defined: `json_object`
+ Type param.Field[ResponseFormatJSONObjectType] `json:"type,required"`
+}
+
+func (r ResponseFormatJSONObjectParam) MarshalJSON() (data []byte, err error) {
+ return apijson.MarshalRoot(r)
+}
+
+func (r ResponseFormatJSONObjectParam) ImplementsChatCompletionNewParamsResponseFormatUnion() {}
+
+// The type of response format being defined: `json_object`
+type ResponseFormatJSONObjectType string
+
+const (
+ ResponseFormatJSONObjectTypeJSONObject ResponseFormatJSONObjectType = "json_object"
+)
+
+func (r ResponseFormatJSONObjectType) IsKnown() bool {
+ switch r {
+ case ResponseFormatJSONObjectTypeJSONObject:
+ return true
+ }
+ return false
+}
+
+type ResponseFormatJSONSchemaParam struct {
+ JSONSchema param.Field[ResponseFormatJSONSchemaJSONSchemaParam] `json:"json_schema,required"`
+ // The type of response format being defined: `json_schema`
+ Type param.Field[ResponseFormatJSONSchemaType] `json:"type,required"`
+}
+
+func (r ResponseFormatJSONSchemaParam) MarshalJSON() (data []byte, err error) {
+ return apijson.MarshalRoot(r)
+}
+
+func (r ResponseFormatJSONSchemaParam) ImplementsChatCompletionNewParamsResponseFormatUnion() {}
+
+type ResponseFormatJSONSchemaJSONSchemaParam struct {
+ // The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores
+ // and dashes, with a maximum length of 64.
+ Name param.Field[string] `json:"name,required"`
+ // A description of what the response format is for, used by the model to determine
+ // how to respond in the format.
+ Description param.Field[string] `json:"description"`
+ // The schema for the response format, described as a JSON Schema object.
+ Schema param.Field[map[string]interface{}] `json:"schema"`
+ // Whether to enable strict schema adherence when generating the output. If set to
+ // true, the model will always follow the exact schema defined in the `schema`
+ // field. Only a subset of JSON Schema is supported when `strict` is `true`. To
+ // learn more, read the
+ // [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ Strict param.Field[bool] `json:"strict"`
+}
+
+func (r ResponseFormatJSONSchemaJSONSchemaParam) MarshalJSON() (data []byte, err error) {
+ return apijson.MarshalRoot(r)
+}
+
+// The type of response format being defined: `json_schema`
+type ResponseFormatJSONSchemaType string
+
+const (
+ ResponseFormatJSONSchemaTypeJSONSchema ResponseFormatJSONSchemaType = "json_schema"
+)
+
+func (r ResponseFormatJSONSchemaType) IsKnown() bool {
+ switch r {
+ case ResponseFormatJSONSchemaTypeJSONSchema:
+ return true
+ }
+ return false
+}
+
+type ResponseFormatTextParam struct {
+ // The type of response format being defined: `text`
+ Type param.Field[ResponseFormatTextType] `json:"type,required"`
+}
+
+func (r ResponseFormatTextParam) MarshalJSON() (data []byte, err error) {
+ return apijson.MarshalRoot(r)
+}
+
+func (r ResponseFormatTextParam) ImplementsChatCompletionNewParamsResponseFormatUnion() {}
+
+// The type of response format being defined: `text`
+type ResponseFormatTextType string
+
+const (
+ ResponseFormatTextTypeText ResponseFormatTextType = "text"
+)
+
+func (r ResponseFormatTextType) IsKnown() bool {
+ switch r {
+ case ResponseFormatTextTypeText:
+ return true
+ }
+ return false
+}
diff --git a/shared/union.go b/shared/union.go
index cea8002..7bab9fa 100644
--- a/shared/union.go
+++ b/shared/union.go
@@ -6,7 +6,10 @@ type UnionString string
func (UnionString) ImplementsCompletionNewParamsPromptUnion() {}
func (UnionString) ImplementsCompletionNewParamsStopUnion() {}
+func (UnionString) ImplementsChatCompletionAssistantMessageParamContentUnion() {}
+func (UnionString) ImplementsChatCompletionSystemMessageParamContentUnion() {}
func (UnionString) ImplementsChatCompletionUserMessageParamContentUnion() {}
+func (UnionString) ImplementsChatCompletionToolMessageParamContentUnion() {}
func (UnionString) ImplementsChatCompletionNewParamsStopUnion() {}
func (UnionString) ImplementsEmbeddingNewParamsInputUnion() {}
func (UnionString) ImplementsModerationNewParamsInputUnion() {}