diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c476280..ba6c348 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.0.1-alpha.0" + ".": "0.1.0-alpha.1" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 4e4cb55..ac652c9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-77cfff37114bc9f141c7e6107eb5f1b38d8cc99bc3d4ce03a066db2b6b649c69.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4097c2f86beb3f3bb021775cd1dfa240e960caf842aeefc2e08da4dc0851ea79.yml diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..e5b7a13 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,18 @@ +# Changelog + +## 0.1.0-alpha.1 (2024-08-06) + +Full Changelog: [v0.0.1-alpha.0...v0.1.0-alpha.1](https://github.com/openai/openai-go/compare/v0.0.1-alpha.0...v0.1.0-alpha.1) + +### Features + +* add azure, examples, and message constructors ([fb2df0f](https://github.com/openai/openai-go/commit/fb2df0fe22002f1826bfaa1cb008c45db375885c)) +* **api:** updates ([#5](https://github.com/openai/openai-go/issues/5)) ([9f525e8](https://github.com/openai/openai-go/commit/9f525e85d8fe13cce2a18a1a48179bc5a6d1f094)) +* extract out `ImageModel`, `AudioModel`, `SpeechModel` ([#3](https://github.com/openai/openai-go/issues/3)) ([f085893](https://github.com/openai/openai-go/commit/f085893d109a9e841d1df13df4c71cae06018758)) +* make enums not nominal ([#4](https://github.com/openai/openai-go/issues/4)) ([9f77005](https://github.com/openai/openai-go/commit/9f77005474b8a38cbfc09f22ec3b81d1de62d3c3)) +* publish ([c329601](https://github.com/openai/openai-go/commit/c329601324226e28ff18d6ccecfdde41cedd3b5a)) + + +### Chores + +* **internal:** updates ([#2](https://github.com/openai/openai-go/issues/2)) ([5976d8d](https://github.com/openai/openai-go/commit/5976d8d8b9a94cd78e4d86f704137f4b43224a08)) diff --git a/README.md b/README.md index 0397a06..d3ca018 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ Or to pin the version: ```sh -go get -u 'github.com/openai/openai-go@v0.0.1-alpha.0' +go get -u 'github.com/openai/openai-go@v0.1.0-alpha.1' ``` @@ -51,9 +51,10 @@ func main() { option.WithAPIKey("My API Key"), // defaults to os.LookupEnv("OPENAI_API_KEY") ) chatCompletion, err := client.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{ - Messages: openai.F([]openai.ChatCompletionMessageParamUnion{ - openai.UserMessage("Say this is a test"), - }), + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Say this is a test")), + }}), Model: openai.F(openai.ChatModelGPT4o), }) if err != nil { @@ -236,9 +237,10 @@ defer cancel() client.Chat.Completions.New( ctx, openai.ChatCompletionNewParams{ - Messages: openai.F([]openai.ChatCompletionMessageParamUnion{ - openai.UserMessage("Say this is a test"), - }), + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("How can I list all files in a directory using Python?")), + }}), Model: openai.F(openai.ChatModelGPT4o), }, // This sets the per-retry timeout @@ -298,9 +300,10 @@ client := openai.NewClient( client.Chat.Completions.New( context.TODO(), openai.ChatCompletionNewParams{ - Messages: openai.F([]openai.ChatCompletionMessageParamUnion{ - openai.UserMessage("Say this is a test"), - }), + Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionUserMessageParam{ + Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), + Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("How can I get the name of the current day in Node.js?")), + }}), Model: openai.F(openai.ChatModelGPT4o), }, option.WithMaxRetries(5), @@ -393,44 +396,6 @@ You may also replace the default `http.Client` with accepted (this overwrites any previous client) and receives requests after any middleware has been applied. -## Microsoft Azure OpenAI - -To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the option.RequestOption functions in the `azure` package. - -```go -package main - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/openai/openai-go" - "github.com/openai/openai-go/azure" - "github.com/openai/openai-go/option" -) - -func main() { - const azureOpenAIEndpoint = "https://.openai.azure.com" - - // The latest API versions, including previews, can be found here: - // https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning - const azureOpenAIAPIVersion = "2024-06-01" - - tokenCredential, err := azidentity.NewDefaultAzureCredential(nil) - - if err != nil { - fmt.Printf("Failed to create the DefaultAzureCredential: %s", err) - os.Exit(1) - } - - client := openai.NewClient( - azure.WithEndpoint(azureOpenAIEndpoint, azureOpenAIAPIVersion), - - // Choose between authenticating using a TokenCredential or an API Key - azure.WithTokenCredential(tokenCredential), - // or azure.WithAPIKey(azureOpenAIAPIKey), - ) -} -``` - ## Semantic versioning This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: diff --git a/aliases.go b/aliases.go index b84f85f..26737f4 100644 --- a/aliases.go +++ b/aliases.go @@ -28,3 +28,39 @@ type FunctionDefinitionParam = shared.FunctionDefinitionParam // // This is an alias to an internal type. type FunctionParameters = shared.FunctionParameters + +// This is an alias to an internal type. +type ResponseFormatJSONObjectParam = shared.ResponseFormatJSONObjectParam + +// The type of response format being defined: `json_object` +// +// This is an alias to an internal type. +type ResponseFormatJSONObjectType = shared.ResponseFormatJSONObjectType + +// This is an alias to an internal value. +const ResponseFormatJSONObjectTypeJSONObject = shared.ResponseFormatJSONObjectTypeJSONObject + +// This is an alias to an internal type. +type ResponseFormatJSONSchemaParam = shared.ResponseFormatJSONSchemaParam + +// This is an alias to an internal type. +type ResponseFormatJSONSchemaJSONSchemaParam = shared.ResponseFormatJSONSchemaJSONSchemaParam + +// The type of response format being defined: `json_schema` +// +// This is an alias to an internal type. +type ResponseFormatJSONSchemaType = shared.ResponseFormatJSONSchemaType + +// This is an alias to an internal value. +const ResponseFormatJSONSchemaTypeJSONSchema = shared.ResponseFormatJSONSchemaTypeJSONSchema + +// This is an alias to an internal type. +type ResponseFormatTextParam = shared.ResponseFormatTextParam + +// The type of response format being defined: `text` +// +// This is an alias to an internal type. +type ResponseFormatTextType = shared.ResponseFormatTextType + +// This is an alias to an internal value. +const ResponseFormatTextTypeText = shared.ResponseFormatTextTypeText diff --git a/api.md b/api.md index 6ee3cc5..ecec90f 100644 --- a/api.md +++ b/api.md @@ -2,6 +2,9 @@ - shared.FunctionDefinitionParam - shared.FunctionParameters +- shared.ResponseFormatJSONObjectParam +- shared.ResponseFormatJSONSchemaParam +- shared.ResponseFormatTextParam # Shared Response Types @@ -34,6 +37,7 @@ Params Types: - openai.ChatCompletionAssistantMessageParam - openai.ChatCompletionContentPartUnionParam - openai.ChatCompletionContentPartImageParam +- openai.ChatCompletionContentPartRefusalParam - openai.ChatCompletionContentPartTextParam - openai.ChatCompletionFunctionCallOptionParam - openai.ChatCompletionFunctionMessageParam @@ -87,6 +91,10 @@ Methods: # Images +Params Types: + +- openai.ImageModel + Response Types: - openai.Image @@ -100,6 +108,10 @@ Methods: # Audio +Params Types: + +- openai.AudioModel + ## Transcriptions Response Types: @@ -122,12 +134,20 @@ Methods: ## Speech +Params Types: + +- openai.SpeechModel + Methods: - client.Audio.Speech.New(ctx context.Context, body openai.AudioSpeechNewParams) (http.Response, error) # Moderations +Params Types: + +- openai.ModerationModel + Response Types: - openai.Moderation @@ -254,16 +274,12 @@ Methods: Params Types: -- openai.AssistantResponseFormatParam -- openai.AssistantResponseFormatOptionUnionParam - openai.AssistantToolChoiceParam - openai.AssistantToolChoiceFunctionParam - openai.AssistantToolChoiceOptionUnionParam Response Types: -- openai.AssistantResponseFormat -- openai.AssistantResponseFormatOptionUnion - openai.AssistantToolChoice - openai.AssistantToolChoiceFunction - openai.AssistantToolChoiceOptionUnion @@ -355,6 +371,8 @@ Response Types: - openai.MessageDeleted - openai.MessageDelta - openai.MessageDeltaEvent +- openai.RefusalContentBlock +- openai.RefusalDeltaBlock - openai.Text - openai.TextContentBlock - openai.TextDelta diff --git a/audio.go b/audio.go index 9ea966d..1d40eff 100644 --- a/audio.go +++ b/audio.go @@ -30,3 +30,9 @@ func NewAudioService(opts ...option.RequestOption) (r *AudioService) { r.Speech = NewAudioSpeechService(opts...) return } + +type AudioModel = string + +const ( + AudioModelWhisper1 AudioModel = "whisper-1" +) diff --git a/audiospeech.go b/audiospeech.go index 1412ea1..780d957 100644 --- a/audiospeech.go +++ b/audiospeech.go @@ -40,12 +40,19 @@ func (r *AudioSpeechService) New(ctx context.Context, body AudioSpeechNewParams, return } +type SpeechModel = string + +const ( + SpeechModelTTS1 SpeechModel = "tts-1" + SpeechModelTTS1HD SpeechModel = "tts-1-hd" +) + type AudioSpeechNewParams struct { // The text to generate audio for. The maximum length is 4096 characters. Input param.Field[string] `json:"input,required"` // One of the available [TTS models](https://platform.openai.com/docs/models/tts): // `tts-1` or `tts-1-hd` - Model param.Field[AudioSpeechNewParamsModel] `json:"model,required"` + Model param.Field[SpeechModel] `json:"model,required"` // The voice to use when generating the audio. Supported voices are `alloy`, // `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are // available in the @@ -63,21 +70,6 @@ func (r AudioSpeechNewParams) MarshalJSON() (data []byte, err error) { return apijson.MarshalRoot(r) } -type AudioSpeechNewParamsModel string - -const ( - AudioSpeechNewParamsModelTTS1 AudioSpeechNewParamsModel = "tts-1" - AudioSpeechNewParamsModelTTS1HD AudioSpeechNewParamsModel = "tts-1-hd" -) - -func (r AudioSpeechNewParamsModel) IsKnown() bool { - switch r { - case AudioSpeechNewParamsModelTTS1, AudioSpeechNewParamsModelTTS1HD: - return true - } - return false -} - // The voice to use when generating the audio. Supported voices are `alloy`, // `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are // available in the diff --git a/audiospeech_test.go b/audiospeech_test.go index 14f334a..4672869 100644 --- a/audiospeech_test.go +++ b/audiospeech_test.go @@ -28,7 +28,7 @@ func TestAudioSpeechNewWithOptionalParams(t *testing.T) { ) resp, err := client.Audio.Speech.New(context.TODO(), openai.AudioSpeechNewParams{ Input: openai.F("input"), - Model: openai.F(openai.AudioSpeechNewParamsModelTTS1), + Model: openai.F(openai.SpeechModelTTS1), Voice: openai.F(openai.AudioSpeechNewParamsVoiceAlloy), ResponseFormat: openai.F(openai.AudioSpeechNewParamsResponseFormatMP3), Speed: openai.F(0.250000), diff --git a/audiotranscription.go b/audiotranscription.go index df5f27e..3acabe7 100644 --- a/audiotranscription.go +++ b/audiotranscription.go @@ -72,7 +72,7 @@ type AudioTranscriptionNewParams struct { File param.Field[io.Reader] `json:"file,required" format:"binary"` // ID of the model to use. Only `whisper-1` (which is powered by our open source // Whisper V2 model) is currently available. - Model param.Field[AudioTranscriptionNewParamsModel] `json:"model,required"` + Model param.Field[AudioModel] `json:"model,required"` // The language of the input audio. Supplying the input language in // [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will // improve accuracy and latency. @@ -114,20 +114,6 @@ func (r AudioTranscriptionNewParams) MarshalMultipart() (data []byte, contentTyp return buf.Bytes(), writer.FormDataContentType(), nil } -type AudioTranscriptionNewParamsModel string - -const ( - AudioTranscriptionNewParamsModelWhisper1 AudioTranscriptionNewParamsModel = "whisper-1" -) - -func (r AudioTranscriptionNewParamsModel) IsKnown() bool { - switch r { - case AudioTranscriptionNewParamsModelWhisper1: - return true - } - return false -} - // The format of the transcript output, in one of these options: `json`, `text`, // `srt`, `verbose_json`, or `vtt`. type AudioTranscriptionNewParamsResponseFormat string diff --git a/audiotranscription_test.go b/audiotranscription_test.go index 55ebbc5..9411fdc 100644 --- a/audiotranscription_test.go +++ b/audiotranscription_test.go @@ -29,7 +29,7 @@ func TestAudioTranscriptionNewWithOptionalParams(t *testing.T) { ) _, err := client.Audio.Transcriptions.New(context.TODO(), openai.AudioTranscriptionNewParams{ File: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), - Model: openai.F(openai.AudioTranscriptionNewParamsModelWhisper1), + Model: openai.F(openai.AudioModelWhisper1), Language: openai.F("language"), Prompt: openai.F("prompt"), ResponseFormat: openai.F(openai.AudioTranscriptionNewParamsResponseFormatJSON), diff --git a/audiotranslation.go b/audiotranslation.go index b50733c..4c81eb1 100644 --- a/audiotranslation.go +++ b/audiotranslation.go @@ -69,7 +69,7 @@ type AudioTranslationNewParams struct { File param.Field[io.Reader] `json:"file,required" format:"binary"` // ID of the model to use. Only `whisper-1` (which is powered by our open source // Whisper V2 model) is currently available. - Model param.Field[AudioTranslationNewParamsModel] `json:"model,required"` + Model param.Field[AudioModel] `json:"model,required"` // An optional text to guide the model's style or continue a previous audio // segment. The // [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) @@ -100,17 +100,3 @@ func (r AudioTranslationNewParams) MarshalMultipart() (data []byte, contentType } return buf.Bytes(), writer.FormDataContentType(), nil } - -type AudioTranslationNewParamsModel string - -const ( - AudioTranslationNewParamsModelWhisper1 AudioTranslationNewParamsModel = "whisper-1" -) - -func (r AudioTranslationNewParamsModel) IsKnown() bool { - switch r { - case AudioTranslationNewParamsModelWhisper1: - return true - } - return false -} diff --git a/audiotranslation_test.go b/audiotranslation_test.go index e5634aa..db9681d 100644 --- a/audiotranslation_test.go +++ b/audiotranslation_test.go @@ -29,7 +29,7 @@ func TestAudioTranslationNewWithOptionalParams(t *testing.T) { ) _, err := client.Audio.Translations.New(context.TODO(), openai.AudioTranslationNewParams{ File: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), - Model: openai.F(openai.AudioTranslationNewParamsModelWhisper1), + Model: openai.F(openai.AudioModelWhisper1), Prompt: openai.F("prompt"), ResponseFormat: openai.F("response_format"), Temperature: openai.F(0.000000), diff --git a/azure/azure.go b/azure/azure.go deleted file mode 100644 index 5d3156f..0000000 --- a/azure/azure.go +++ /dev/null @@ -1,237 +0,0 @@ -// Package azure provides configuration options so you can connect and use Azure OpenAI using the [openai.Client]. -// -// Typical usage of this package will look like this: -// -// client := openai.NewClient( -// azure.WithEndpoint(azureOpenAIEndpoint, azureOpenAIAPIVersion), -// azure.WithTokenCredential(azureIdentityTokenCredential), -// // or azure.WithAPIKey(azureOpenAIAPIKey), -// ) -// -// Or, if you want to construct a specific service: -// -// client := openai.NewChatCompletionService( -// azure.WithEndpoint(azureOpenAIEndpoint, azureOpenAIAPIVersion), -// azure.WithTokenCredential(azureIdentityTokenCredential), -// // or azure.WithAPIKey(azureOpenAIAPIKey), -// ) -package azure - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "mime" - "mime/multipart" - "net/http" - "net/url" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/openai/openai-go/internal/requestconfig" - "github.com/openai/openai-go/option" -) - -// WithEndpoint configures this client to connect to an Azure OpenAI endpoint. -// -// - endpoint - the Azure OpenAI endpoint to connect to. Ex: https://.openai.azure.com -// - apiVersion - the Azure OpenAI API version to target (ex: 2024-06-01). See [Azure OpenAI apiversions] for current API versions. This value cannot be empty. -// -// This function should be paired with a call to authenticate, like [azure.WithAPIKey] or [azure.WithTokenCredential], similar to this: -// -// client := openai.NewClient( -// azure.WithEndpoint(azureOpenAIEndpoint, azureOpenAIAPIVersion), -// azure.WithTokenCredential(azureIdentityTokenCredential), -// // or azure.WithAPIKey(azureOpenAIAPIKey), -// ) -// -// [Azure OpenAI apiversions]: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning -func WithEndpoint(endpoint string, apiVersion string) option.RequestOption { - if !strings.HasSuffix(endpoint, "/") { - endpoint += "/" - } - - endpoint += "openai/" - - withQueryAdd := option.WithQueryAdd("api-version", apiVersion) - withEndpoint := option.WithBaseURL(endpoint) - - withModelMiddleware := option.WithMiddleware(func(r *http.Request, mn option.MiddlewareNext) (*http.Response, error) { - replacementPath, err := getReplacementPathWithDeployment(r) - - if err != nil { - return nil, err - } - - r.URL.Path = replacementPath - return mn(r) - }) - - return func(rc *requestconfig.RequestConfig) error { - if apiVersion == "" { - return errors.New("apiVersion is an empty string, but needs to be set. See https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning for details.") - } - - if err := withQueryAdd(rc); err != nil { - return err - } - - if err := withEndpoint(rc); err != nil { - return err - } - - if err := withModelMiddleware(rc); err != nil { - return err - } - - return nil - } -} - -// WithTokenCredential configures this client to authenticate using an [Azure Identity] TokenCredential. -// This function should be paired with a call to [WithEndpoint] to point to your Azure OpenAI instance. -// -// [Azure Identity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity -func WithTokenCredential(tokenCredential azcore.TokenCredential) option.RequestOption { - bearerTokenPolicy := runtime.NewBearerTokenPolicy(tokenCredential, []string{"https://cognitiveservices.azure.com/.default"}, nil) - - // add in a middleware that uses the bearer token generated from the token credential - return option.WithMiddleware(func(req *http.Request, next option.MiddlewareNext) (*http.Response, error) { - pipeline := runtime.NewPipeline("azopenai-extensions", version, runtime.PipelineOptions{}, &policy.ClientOptions{ - InsecureAllowCredentialWithHTTP: true, // allow for plain HTTP proxies, etc.. - PerRetryPolicies: []policy.Policy{ - bearerTokenPolicy, - policyAdapter(next), - }, - }) - - req2, err := runtime.NewRequestFromRequest(req) - - if err != nil { - return nil, err - } - - return pipeline.Do(req2) - }) -} - -// WithAPIKey configures this client to authenticate using an API key. -// This function should be paired with a call to [WithEndpoint] to point to your Azure OpenAI instance. -func WithAPIKey(apiKey string) option.RequestOption { - // NOTE: there is an option.WithApiKey(), but that adds the value into - // the Authorization header instead so we're doing this instead. - return option.WithHeader("Api-Key", apiKey) -} - -// jsonRoutes have JSON payloads - we'll deserialize looking for a .model field in there -// so we won't have to worry about individual types for completions vs embeddings, etc... -var jsonRoutes = map[string]bool{ - "/openai/completions": true, - "/openai/chat/completions": true, - "/openai/embeddings": true, - "/openai/audio/speech": true, - "/openai/images/generations": true, -} - -// audioMultipartRoutes have mime/multipart payloads. These are less generic - we're very much -// expecting a transcription or translation payload for these. -var audioMultipartRoutes = map[string]bool{ - "/openai/audio/transcriptions": true, - "/openai/audio/translations": true, -} - -// getReplacementPathWithDeployment parses the request body to extract out the Model parameter (or equivalent) -// (note, the req.Body is fully read as part of this, and is replaced with a bytes.Reader) -func getReplacementPathWithDeployment(req *http.Request) (string, error) { - if jsonRoutes[req.URL.Path] { - return getJSONRoute(req) - } - - if audioMultipartRoutes[req.URL.Path] { - return getAudioMultipartRoute(req) - } - - // No need to relocate the path. We've already tacked on /openai when we setup the endpoint. - return req.URL.Path, nil -} - -func getJSONRoute(req *http.Request) (string, error) { - // we need to deserialize the body, partly, in order to read out the model field. - jsonBytes, err := io.ReadAll(req.Body) - - if err != nil { - return "", err - } - - // make sure we restore the body so it can be used in later middlewares. - req.Body = io.NopCloser(bytes.NewReader(jsonBytes)) - - var v *struct { - Model string `json:"model"` - } - - if err := json.Unmarshal(jsonBytes, &v); err != nil { - return "", err - } - - escapedDeployment := url.PathEscape(v.Model) - return strings.Replace(req.URL.Path, "/openai/", "/openai/deployments/"+escapedDeployment+"/", 1), nil -} - -func getAudioMultipartRoute(req *http.Request) (string, error) { - // body is a multipart/mime body type instead. - mimeBytes, err := io.ReadAll(req.Body) - - if err != nil { - return "", err - } - - // make sure we restore the body so it can be used in later middlewares. - req.Body = io.NopCloser(bytes.NewReader(mimeBytes)) - - _, mimeParams, err := mime.ParseMediaType(req.Header.Get("Content-Type")) - - if err != nil { - return "", err - } - - mimeReader := multipart.NewReader( - io.NopCloser(bytes.NewReader(mimeBytes)), - mimeParams["boundary"]) - - for { - mp, err := mimeReader.NextPart() - - if err != nil { - if errors.Is(err, io.EOF) { - return "", errors.New("unable to find the model part in multipart body") - } - - return "", err - } - - defer mp.Close() - - if mp.FormName() == "model" { - modelBytes, err := io.ReadAll(mp) - - if err != nil { - return "", err - } - - escapedDeployment := url.PathEscape(string(modelBytes)) - return strings.Replace(req.URL.Path, "/openai/", "/openai/deployments/"+escapedDeployment+"/", 1), nil - } - } -} - -type policyAdapter option.MiddlewareNext - -func (mp policyAdapter) Do(req *policy.Request) (*http.Response, error) { - return (option.MiddlewareNext)(mp)(req.Raw()) -} - -const version = "v.0.1.0" diff --git a/azure/azure_test.go b/azure/azure_test.go deleted file mode 100644 index 00f5733..0000000 --- a/azure/azure_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package azure - -import ( - "bytes" - "mime/multipart" - "net/http" - "testing" - - "github.com/openai/openai-go" - "github.com/openai/openai-go/internal/apijson" - "github.com/openai/openai-go/shared" -) - -func TestJSONRoute(t *testing.T) { - chatCompletionParams := openai.ChatCompletionNewParams{ - Model: openai.F(openai.ChatModel("arbitraryDeployment")), - Messages: openai.F([]openai.ChatCompletionMessageParamUnion{ - openai.ChatCompletionAssistantMessageParam{ - Role: openai.F(openai.ChatCompletionAssistantMessageParamRoleAssistant), - Content: openai.String("You are a helpful assistant"), - }, - openai.ChatCompletionUserMessageParam{ - Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), - Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Can you tell me another word for the universe?")), - }, - }), - } - - serializedBytes, err := apijson.MarshalRoot(chatCompletionParams) - - if err != nil { - t.Fatal(err) - } - - req, err := http.NewRequest("POST", "/openai/chat/completions", bytes.NewReader(serializedBytes)) - - if err != nil { - t.Fatal(err) - } - - replacementPath, err := getReplacementPathWithDeployment(req) - - if err != nil { - t.Fatal(err) - } - - if replacementPath != "/openai/deployments/arbitraryDeployment/chat/completions" { - t.Fatalf("replacementpath didn't match: %s", replacementPath) - } -} - -func TestGetAudioMultipartRoute(t *testing.T) { - buff := &bytes.Buffer{} - mw := multipart.NewWriter(buff) - defer mw.Close() - - fw, err := mw.CreateFormFile("file", "test.mp3") - - if err != nil { - t.Fatal(err) - } - - if _, err = fw.Write([]byte("ignore me")); err != nil { - t.Fatal(err) - } - - if err := mw.WriteField("model", "arbitraryDeployment"); err != nil { - t.Fatal(err) - } - - if err := mw.Close(); err != nil { - t.Fatal(err) - } - - req, err := http.NewRequest("POST", "/openai/audio/transcriptions", bytes.NewReader(buff.Bytes())) - - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Content-Type", mw.FormDataContentType()) - - replacementPath, err := getReplacementPathWithDeployment(req) - - if err != nil { - t.Fatal(err) - } - - if replacementPath != "/openai/deployments/arbitraryDeployment/audio/transcriptions" { - t.Fatalf("replacementpath didn't match: %s", replacementPath) - } -} - -func TestNoRouteChangeNeeded(t *testing.T) { - chatCompletionParams := openai.ChatCompletionNewParams{ - Model: openai.F(openai.ChatModel("arbitraryDeployment")), - Messages: openai.F([]openai.ChatCompletionMessageParamUnion{ - openai.ChatCompletionAssistantMessageParam{ - Role: openai.F(openai.ChatCompletionAssistantMessageParamRoleAssistant), - Content: openai.String("You are a helpful assistant"), - }, - openai.ChatCompletionUserMessageParam{ - Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser), - Content: openai.F[openai.ChatCompletionUserMessageParamContentUnion](shared.UnionString("Can you tell me another word for the universe?")), - }, - }), - } - - serializedBytes, err := apijson.MarshalRoot(chatCompletionParams) - - if err != nil { - t.Fatal(err) - } - - req, err := http.NewRequest("POST", "/openai/does/not/need/a/deployment", bytes.NewReader(serializedBytes)) - - if err != nil { - t.Fatal(err) - } - - replacementPath, err := getReplacementPathWithDeployment(req) - - if err != nil { - t.Fatal(err) - } - - if replacementPath != "/openai/does/not/need/a/deployment" { - t.Fatalf("replacementpath didn't match: %s", replacementPath) - } -} diff --git a/azure/example_auth_test.go b/azure/example_auth_test.go deleted file mode 100644 index 3a8ef21..0000000 --- a/azure/example_auth_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package azure_test - -import ( - "fmt" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/openai/openai-go" - "github.com/openai/openai-go/azure" -) - -func Example_authentication() { - // There are two ways to authenticate - using a TokenCredential (via the azidentity - // package), or using an API Key. - const azureOpenAIEndpoint = "https://.openai.azure.com" - const azureOpenAIAPIVersion = "" - - // Using a TokenCredential - { - // For a full list of credential types look at the documentation for the Azure Identity - // package: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity - tokenCredential, err := azidentity.NewDefaultAzureCredential(nil) - - if err != nil { - fmt.Printf("Failed to create TokenCredential: %s\n", err) - return - } - - client := openai.NewClient( - azure.WithEndpoint(azureOpenAIEndpoint, azureOpenAIAPIVersion), - azure.WithTokenCredential(tokenCredential), - ) - - _ = client - } - - // Using an API Key - { - const azureOpenAIAPIKey = "" - - client := openai.NewClient( - azure.WithEndpoint(azureOpenAIEndpoint, azureOpenAIAPIVersion), - azure.WithAPIKey(azureOpenAIAPIKey), - ) - - _ = client - } -} diff --git a/batch.go b/batch.go index 8b7da8f..14a2509 100644 --- a/batch.go +++ b/batch.go @@ -368,7 +368,7 @@ type BatchListParams struct { // URLQuery serializes [BatchListParams]'s query parameters as `url.Values`. func (r BatchListParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/betaassistant.go b/betaassistant.go index f6a4b76..28ae56e 100644 --- a/betaassistant.go +++ b/betaassistant.go @@ -136,22 +136,6 @@ type Assistant struct { // assistant. Tools can be of types `code_interpreter`, `file_search`, or // `function`. Tools []AssistantTool `json:"tools,required"` - // Specifies the format that the model must output. Compatible with - // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), - // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - // - // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - // message the model generates is valid JSON. - // - // **Important:** when using JSON mode, you **must** also instruct the model to - // produce JSON yourself via a system or user message. Without this, the model may - // generate an unending stream of whitespace until the generation reaches the token - // limit, resulting in a long-running and seemingly "stuck" request. Also note that - // the message content may be partially cut off if `finish_reason="length"`, which - // indicates the generation exceeded `max_tokens` or the conversation exceeded the - // max context length. - ResponseFormat AssistantResponseFormatOptionUnion `json:"response_format,nullable"` // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will // make the output more random, while lower values like 0.2 will make it more // focused and deterministic. @@ -172,21 +156,20 @@ type Assistant struct { // assistantJSON contains the JSON metadata for the struct [Assistant] type assistantJSON struct { - ID apijson.Field - CreatedAt apijson.Field - Description apijson.Field - Instructions apijson.Field - Metadata apijson.Field - Model apijson.Field - Name apijson.Field - Object apijson.Field - Tools apijson.Field - ResponseFormat apijson.Field - Temperature apijson.Field - ToolResources apijson.Field - TopP apijson.Field - raw string - ExtraFields map[string]apijson.Field + ID apijson.Field + CreatedAt apijson.Field + Description apijson.Field + Instructions apijson.Field + Metadata apijson.Field + Model apijson.Field + Name apijson.Field + Object apijson.Field + Tools apijson.Field + Temperature apijson.Field + ToolResources apijson.Field + TopP apijson.Field + raw string + ExtraFields map[string]apijson.Field } func (r *Assistant) UnmarshalJSON(data []byte) (err error) { @@ -1869,8 +1852,8 @@ func (r FileSearchToolType) IsKnown() bool { // Overrides for the file search tool. type FileSearchToolFileSearch struct { // The maximum number of results the file search tool should output. The default is - // 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1 - // and 50 inclusive. + // 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between + // 1 and 50 inclusive. // // Note that the file search tool may output fewer than `max_num_results` results. // See the @@ -1914,8 +1897,8 @@ func (r FileSearchToolParam) implementsBetaThreadNewAndRunParamsToolUnion() {} // Overrides for the file search tool. type FileSearchToolFileSearchParam struct { // The maximum number of results the file search tool should output. The default is - // 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1 - // and 50 inclusive. + // 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between + // 1 and 50 inclusive. // // Note that the file search tool may output fewer than `max_num_results` results. // See the @@ -1988,7 +1971,7 @@ type BetaAssistantNewParams struct { // see all of your available models, or see our // [Model overview](https://platform.openai.com/docs/models/overview) for // descriptions of them. - Model param.Field[BetaAssistantNewParamsModel] `json:"model,required"` + Model param.Field[ChatModel] `json:"model,required"` // The description of the assistant. The maximum length is 512 characters. Description param.Field[string] `json:"description"` // The system instructions that the assistant uses. The maximum length is 256,000 @@ -2001,22 +1984,6 @@ type BetaAssistantNewParams struct { Metadata param.Field[interface{}] `json:"metadata"` // The name of the assistant. The maximum length is 256 characters. Name param.Field[string] `json:"name"` - // Specifies the format that the model must output. Compatible with - // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), - // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - // - // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - // message the model generates is valid JSON. - // - // **Important:** when using JSON mode, you **must** also instruct the model to - // produce JSON yourself via a system or user message. Without this, the model may - // generate an unending stream of whitespace until the generation reaches the token - // limit, resulting in a long-running and seemingly "stuck" request. Also note that - // the message content may be partially cut off if `finish_reason="length"`, which - // indicates the generation exceeded `max_tokens` or the conversation exceeded the - // max context length. - ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"` // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will // make the output more random, while lower values like 0.2 will make it more // focused and deterministic. @@ -2042,41 +2009,6 @@ func (r BetaAssistantNewParams) MarshalJSON() (data []byte, err error) { return apijson.MarshalRoot(r) } -type BetaAssistantNewParamsModel string - -const ( - BetaAssistantNewParamsModelGPT4o BetaAssistantNewParamsModel = "gpt-4o" - BetaAssistantNewParamsModelGPT4o2024_05_13 BetaAssistantNewParamsModel = "gpt-4o-2024-05-13" - BetaAssistantNewParamsModelGPT4oMini BetaAssistantNewParamsModel = "gpt-4o-mini" - BetaAssistantNewParamsModelGPT4oMini2024_07_18 BetaAssistantNewParamsModel = "gpt-4o-mini-2024-07-18" - BetaAssistantNewParamsModelGPT4Turbo BetaAssistantNewParamsModel = "gpt-4-turbo" - BetaAssistantNewParamsModelGPT4Turbo2024_04_09 BetaAssistantNewParamsModel = "gpt-4-turbo-2024-04-09" - BetaAssistantNewParamsModelGPT4_0125Preview BetaAssistantNewParamsModel = "gpt-4-0125-preview" - BetaAssistantNewParamsModelGPT4TurboPreview BetaAssistantNewParamsModel = "gpt-4-turbo-preview" - BetaAssistantNewParamsModelGPT4_1106Preview BetaAssistantNewParamsModel = "gpt-4-1106-preview" - BetaAssistantNewParamsModelGPT4VisionPreview BetaAssistantNewParamsModel = "gpt-4-vision-preview" - BetaAssistantNewParamsModelGPT4 BetaAssistantNewParamsModel = "gpt-4" - BetaAssistantNewParamsModelGPT4_0314 BetaAssistantNewParamsModel = "gpt-4-0314" - BetaAssistantNewParamsModelGPT4_0613 BetaAssistantNewParamsModel = "gpt-4-0613" - BetaAssistantNewParamsModelGPT4_32k BetaAssistantNewParamsModel = "gpt-4-32k" - BetaAssistantNewParamsModelGPT4_32k0314 BetaAssistantNewParamsModel = "gpt-4-32k-0314" - BetaAssistantNewParamsModelGPT4_32k0613 BetaAssistantNewParamsModel = "gpt-4-32k-0613" - BetaAssistantNewParamsModelGPT3_5Turbo BetaAssistantNewParamsModel = "gpt-3.5-turbo" - BetaAssistantNewParamsModelGPT3_5Turbo16k BetaAssistantNewParamsModel = "gpt-3.5-turbo-16k" - BetaAssistantNewParamsModelGPT3_5Turbo0613 BetaAssistantNewParamsModel = "gpt-3.5-turbo-0613" - BetaAssistantNewParamsModelGPT3_5Turbo1106 BetaAssistantNewParamsModel = "gpt-3.5-turbo-1106" - BetaAssistantNewParamsModelGPT3_5Turbo0125 BetaAssistantNewParamsModel = "gpt-3.5-turbo-0125" - BetaAssistantNewParamsModelGPT3_5Turbo16k0613 BetaAssistantNewParamsModel = "gpt-3.5-turbo-16k-0613" -) - -func (r BetaAssistantNewParamsModel) IsKnown() bool { - switch r { - case BetaAssistantNewParamsModelGPT4o, BetaAssistantNewParamsModelGPT4o2024_05_13, BetaAssistantNewParamsModelGPT4oMini, BetaAssistantNewParamsModelGPT4oMini2024_07_18, BetaAssistantNewParamsModelGPT4Turbo, BetaAssistantNewParamsModelGPT4Turbo2024_04_09, BetaAssistantNewParamsModelGPT4_0125Preview, BetaAssistantNewParamsModelGPT4TurboPreview, BetaAssistantNewParamsModelGPT4_1106Preview, BetaAssistantNewParamsModelGPT4VisionPreview, BetaAssistantNewParamsModelGPT4, BetaAssistantNewParamsModelGPT4_0314, BetaAssistantNewParamsModelGPT4_0613, BetaAssistantNewParamsModelGPT4_32k, BetaAssistantNewParamsModelGPT4_32k0314, BetaAssistantNewParamsModelGPT4_32k0613, BetaAssistantNewParamsModelGPT3_5Turbo, BetaAssistantNewParamsModelGPT3_5Turbo16k, BetaAssistantNewParamsModelGPT3_5Turbo0613, BetaAssistantNewParamsModelGPT3_5Turbo1106, BetaAssistantNewParamsModelGPT3_5Turbo0125, BetaAssistantNewParamsModelGPT3_5Turbo16k0613: - return true - } - return false -} - // A set of resources that are used by the assistant's tools. The resources are // specific to the type of tool. For example, the `code_interpreter` tool requires // a list of file IDs, while the `file_search` tool requires a list of vector store @@ -2269,22 +2201,6 @@ type BetaAssistantUpdateParams struct { Model param.Field[string] `json:"model"` // The name of the assistant. The maximum length is 256 characters. Name param.Field[string] `json:"name"` - // Specifies the format that the model must output. Compatible with - // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), - // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - // - // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - // message the model generates is valid JSON. - // - // **Important:** when using JSON mode, you **must** also instruct the model to - // produce JSON yourself via a system or user message. Without this, the model may - // generate an unending stream of whitespace until the generation reaches the token - // limit, resulting in a long-running and seemingly "stuck" request. Also note that - // the message content may be partially cut off if `finish_reason="length"`, which - // indicates the generation exceeded `max_tokens` or the conversation exceeded the - // max context length. - ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"` // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will // make the output more random, while lower values like 0.2 will make it more // focused and deterministic. @@ -2370,7 +2286,7 @@ type BetaAssistantListParams struct { // `url.Values`. func (r BetaAssistantListParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/betaassistant_test.go b/betaassistant_test.go index 1c6f29f..bf60a21 100644 --- a/betaassistant_test.go +++ b/betaassistant_test.go @@ -26,13 +26,12 @@ func TestBetaAssistantNewWithOptionalParams(t *testing.T) { option.WithAPIKey("My API Key"), ) _, err := client.Beta.Assistants.New(context.TODO(), openai.BetaAssistantNewParams{ - Model: openai.F(openai.BetaAssistantNewParamsModelGPT4o), - Description: openai.F("description"), - Instructions: openai.F("instructions"), - Metadata: openai.F[any](map[string]interface{}{}), - Name: openai.F("name"), - ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)), - Temperature: openai.F(1.000000), + Model: openai.F(openai.ChatModelGPT4o), + Description: openai.F("description"), + Instructions: openai.F("instructions"), + Metadata: openai.F[any](map[string]interface{}{}), + Name: openai.F("name"), + Temperature: openai.F(1.000000), ToolResources: openai.F(openai.BetaAssistantNewParamsToolResources{ CodeInterpreter: openai.F(openai.BetaAssistantNewParamsToolResourcesCodeInterpreter{ FileIDs: openai.F([]string{"string", "string", "string"}), @@ -104,13 +103,12 @@ func TestBetaAssistantUpdateWithOptionalParams(t *testing.T) { context.TODO(), "assistant_id", openai.BetaAssistantUpdateParams{ - Description: openai.F("description"), - Instructions: openai.F("instructions"), - Metadata: openai.F[any](map[string]interface{}{}), - Model: openai.F("model"), - Name: openai.F("name"), - ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)), - Temperature: openai.F(1.000000), + Description: openai.F("description"), + Instructions: openai.F("instructions"), + Metadata: openai.F[any](map[string]interface{}{}), + Model: openai.F("model"), + Name: openai.F("name"), + Temperature: openai.F(1.000000), ToolResources: openai.F(openai.BetaAssistantUpdateParamsToolResources{ CodeInterpreter: openai.F(openai.BetaAssistantUpdateParamsToolResourcesCodeInterpreter{ FileIDs: openai.F([]string{"string", "string", "string"}), diff --git a/betathread.go b/betathread.go index 0c5f281..1e2ef68 100644 --- a/betathread.go +++ b/betathread.go @@ -106,142 +106,6 @@ func (r *BetaThreadService) NewAndRunStreaming(ctx context.Context, body BetaThr return ssestream.NewStream[AssistantStreamEvent](ssestream.NewDecoder(raw), err) } -// An object describing the expected output of the model. If `json_object` only -// `function` type `tools` are allowed to be passed to the Run. If `text` the model -// can return text or any value needed. -type AssistantResponseFormat struct { - // Must be one of `text` or `json_object`. - Type AssistantResponseFormatType `json:"type"` - JSON assistantResponseFormatJSON `json:"-"` -} - -// assistantResponseFormatJSON contains the JSON metadata for the struct -// [AssistantResponseFormat] -type assistantResponseFormatJSON struct { - Type apijson.Field - raw string - ExtraFields map[string]apijson.Field -} - -func (r *AssistantResponseFormat) UnmarshalJSON(data []byte) (err error) { - return apijson.UnmarshalRoot(data, r) -} - -func (r assistantResponseFormatJSON) RawJSON() string { - return r.raw -} - -func (r AssistantResponseFormat) implementsAssistantResponseFormatOptionUnion() {} - -// Must be one of `text` or `json_object`. -type AssistantResponseFormatType string - -const ( - AssistantResponseFormatTypeText AssistantResponseFormatType = "text" - AssistantResponseFormatTypeJSONObject AssistantResponseFormatType = "json_object" -) - -func (r AssistantResponseFormatType) IsKnown() bool { - switch r { - case AssistantResponseFormatTypeText, AssistantResponseFormatTypeJSONObject: - return true - } - return false -} - -// An object describing the expected output of the model. If `json_object` only -// `function` type `tools` are allowed to be passed to the Run. If `text` the model -// can return text or any value needed. -type AssistantResponseFormatParam struct { - // Must be one of `text` or `json_object`. - Type param.Field[AssistantResponseFormatType] `json:"type"` -} - -func (r AssistantResponseFormatParam) MarshalJSON() (data []byte, err error) { - return apijson.MarshalRoot(r) -} - -func (r AssistantResponseFormatParam) implementsAssistantResponseFormatOptionUnionParam() {} - -// Specifies the format that the model must output. Compatible with -// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), -// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), -// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. -// -// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the -// message the model generates is valid JSON. -// -// **Important:** when using JSON mode, you **must** also instruct the model to -// produce JSON yourself via a system or user message. Without this, the model may -// generate an unending stream of whitespace until the generation reaches the token -// limit, resulting in a long-running and seemingly "stuck" request. Also note that -// the message content may be partially cut off if `finish_reason="length"`, which -// indicates the generation exceeded `max_tokens` or the conversation exceeded the -// max context length. -// -// Union satisfied by [AssistantResponseFormatOptionString] or -// [AssistantResponseFormat]. -type AssistantResponseFormatOptionUnion interface { - implementsAssistantResponseFormatOptionUnion() -} - -func init() { - apijson.RegisterUnion( - reflect.TypeOf((*AssistantResponseFormatOptionUnion)(nil)).Elem(), - "", - apijson.UnionVariant{ - TypeFilter: gjson.String, - Type: reflect.TypeOf(AssistantResponseFormatOptionString("")), - }, - apijson.UnionVariant{ - TypeFilter: gjson.JSON, - Type: reflect.TypeOf(AssistantResponseFormat{}), - }, - ) -} - -// `auto` is the default value -type AssistantResponseFormatOptionString string - -const ( - AssistantResponseFormatOptionStringNone AssistantResponseFormatOptionString = "none" - AssistantResponseFormatOptionStringAuto AssistantResponseFormatOptionString = "auto" -) - -func (r AssistantResponseFormatOptionString) IsKnown() bool { - switch r { - case AssistantResponseFormatOptionStringNone, AssistantResponseFormatOptionStringAuto: - return true - } - return false -} - -func (r AssistantResponseFormatOptionString) implementsAssistantResponseFormatOptionUnion() {} - -func (r AssistantResponseFormatOptionString) implementsAssistantResponseFormatOptionUnionParam() {} - -// Specifies the format that the model must output. Compatible with -// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), -// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), -// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. -// -// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the -// message the model generates is valid JSON. -// -// **Important:** when using JSON mode, you **must** also instruct the model to -// produce JSON yourself via a system or user message. Without this, the model may -// generate an unending stream of whitespace until the generation reaches the token -// limit, resulting in a long-running and seemingly "stuck" request. Also note that -// the message content may be partially cut off if `finish_reason="length"`, which -// indicates the generation exceeded `max_tokens` or the conversation exceeded the -// max context length. -// -// Satisfied by [AssistantResponseFormatOptionString], -// [AssistantResponseFormatParam]. -type AssistantResponseFormatOptionUnionParam interface { - implementsAssistantResponseFormatOptionUnionParam() -} - // Specifies a tool the model should use. Use to force the model to call a specific // tool. type AssistantToolChoice struct { @@ -972,27 +836,11 @@ type BetaThreadNewAndRunParams struct { // be used to execute this run. If a value is provided here, it will override the // model associated with the assistant. If not, the model associated with the // assistant will be used. - Model param.Field[BetaThreadNewAndRunParamsModel] `json:"model"` + Model param.Field[ChatModel] `json:"model"` // Whether to enable // [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) // during tool use. ParallelToolCalls param.Field[bool] `json:"parallel_tool_calls"` - // Specifies the format that the model must output. Compatible with - // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), - // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - // - // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - // message the model generates is valid JSON. - // - // **Important:** when using JSON mode, you **must** also instruct the model to - // produce JSON yourself via a system or user message. Without this, the model may - // generate an unending stream of whitespace until the generation reaches the token - // limit, resulting in a long-running and seemingly "stuck" request. Also note that - // the message content may be partially cut off if `finish_reason="length"`, which - // indicates the generation exceeded `max_tokens` or the conversation exceeded the - // max context length. - ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"` // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will // make the output more random, while lower values like 0.2 will make it more // focused and deterministic. @@ -1030,41 +878,6 @@ func (r BetaThreadNewAndRunParams) MarshalJSON() (data []byte, err error) { return apijson.MarshalRoot(r) } -type BetaThreadNewAndRunParamsModel string - -const ( - BetaThreadNewAndRunParamsModelGPT4o BetaThreadNewAndRunParamsModel = "gpt-4o" - BetaThreadNewAndRunParamsModelGPT4o2024_05_13 BetaThreadNewAndRunParamsModel = "gpt-4o-2024-05-13" - BetaThreadNewAndRunParamsModelGPT4oMini BetaThreadNewAndRunParamsModel = "gpt-4o-mini" - BetaThreadNewAndRunParamsModelGPT4oMini2024_07_18 BetaThreadNewAndRunParamsModel = "gpt-4o-mini-2024-07-18" - BetaThreadNewAndRunParamsModelGPT4Turbo BetaThreadNewAndRunParamsModel = "gpt-4-turbo" - BetaThreadNewAndRunParamsModelGPT4Turbo2024_04_09 BetaThreadNewAndRunParamsModel = "gpt-4-turbo-2024-04-09" - BetaThreadNewAndRunParamsModelGPT4_0125Preview BetaThreadNewAndRunParamsModel = "gpt-4-0125-preview" - BetaThreadNewAndRunParamsModelGPT4TurboPreview BetaThreadNewAndRunParamsModel = "gpt-4-turbo-preview" - BetaThreadNewAndRunParamsModelGPT4_1106Preview BetaThreadNewAndRunParamsModel = "gpt-4-1106-preview" - BetaThreadNewAndRunParamsModelGPT4VisionPreview BetaThreadNewAndRunParamsModel = "gpt-4-vision-preview" - BetaThreadNewAndRunParamsModelGPT4 BetaThreadNewAndRunParamsModel = "gpt-4" - BetaThreadNewAndRunParamsModelGPT4_0314 BetaThreadNewAndRunParamsModel = "gpt-4-0314" - BetaThreadNewAndRunParamsModelGPT4_0613 BetaThreadNewAndRunParamsModel = "gpt-4-0613" - BetaThreadNewAndRunParamsModelGPT4_32k BetaThreadNewAndRunParamsModel = "gpt-4-32k" - BetaThreadNewAndRunParamsModelGPT4_32k0314 BetaThreadNewAndRunParamsModel = "gpt-4-32k-0314" - BetaThreadNewAndRunParamsModelGPT4_32k0613 BetaThreadNewAndRunParamsModel = "gpt-4-32k-0613" - BetaThreadNewAndRunParamsModelGPT3_5Turbo BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo" - BetaThreadNewAndRunParamsModelGPT3_5Turbo16k BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-16k" - BetaThreadNewAndRunParamsModelGPT3_5Turbo0613 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-0613" - BetaThreadNewAndRunParamsModelGPT3_5Turbo1106 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-1106" - BetaThreadNewAndRunParamsModelGPT3_5Turbo0125 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-0125" - BetaThreadNewAndRunParamsModelGPT3_5Turbo16k0613 BetaThreadNewAndRunParamsModel = "gpt-3.5-turbo-16k-0613" -) - -func (r BetaThreadNewAndRunParamsModel) IsKnown() bool { - switch r { - case BetaThreadNewAndRunParamsModelGPT4o, BetaThreadNewAndRunParamsModelGPT4o2024_05_13, BetaThreadNewAndRunParamsModelGPT4oMini, BetaThreadNewAndRunParamsModelGPT4oMini2024_07_18, BetaThreadNewAndRunParamsModelGPT4Turbo, BetaThreadNewAndRunParamsModelGPT4Turbo2024_04_09, BetaThreadNewAndRunParamsModelGPT4_0125Preview, BetaThreadNewAndRunParamsModelGPT4TurboPreview, BetaThreadNewAndRunParamsModelGPT4_1106Preview, BetaThreadNewAndRunParamsModelGPT4VisionPreview, BetaThreadNewAndRunParamsModelGPT4, BetaThreadNewAndRunParamsModelGPT4_0314, BetaThreadNewAndRunParamsModelGPT4_0613, BetaThreadNewAndRunParamsModelGPT4_32k, BetaThreadNewAndRunParamsModelGPT4_32k0314, BetaThreadNewAndRunParamsModelGPT4_32k0613, BetaThreadNewAndRunParamsModelGPT3_5Turbo, BetaThreadNewAndRunParamsModelGPT3_5Turbo16k, BetaThreadNewAndRunParamsModelGPT3_5Turbo0613, BetaThreadNewAndRunParamsModelGPT3_5Turbo1106, BetaThreadNewAndRunParamsModelGPT3_5Turbo0125, BetaThreadNewAndRunParamsModelGPT3_5Turbo16k0613: - return true - } - return false -} - // If no thread is provided, an empty thread will be created. type BetaThreadNewAndRunParamsThread struct { // A list of [messages](https://platform.openai.com/docs/api-reference/messages) to diff --git a/betathread_test.go b/betathread_test.go index e1acb0e..413ecce 100644 --- a/betathread_test.go +++ b/betathread_test.go @@ -248,9 +248,8 @@ func TestBetaThreadNewAndRunWithOptionalParams(t *testing.T) { MaxCompletionTokens: openai.F(int64(256)), MaxPromptTokens: openai.F(int64(256)), Metadata: openai.F[any](map[string]interface{}{}), - Model: openai.F(openai.BetaThreadNewAndRunParamsModelGPT4o), + Model: openai.F(openai.ChatModelGPT4o), ParallelToolCalls: openai.F(true), - ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)), Temperature: openai.F(1.000000), Thread: openai.F(openai.BetaThreadNewAndRunParamsThread{ Messages: openai.F([]openai.BetaThreadNewAndRunParamsThreadMessage{{ diff --git a/betathreadmessage.go b/betathreadmessage.go index e093d8e..093bfd9 100644 --- a/betathreadmessage.go +++ b/betathreadmessage.go @@ -1333,6 +1333,7 @@ type MessageContent struct { ImageFile ImageFile `json:"image_file"` ImageURL ImageURL `json:"image_url"` Text Text `json:"text"` + Refusal string `json:"refusal"` JSON messageContentJSON `json:"-"` union MessageContentUnion } @@ -1343,6 +1344,7 @@ type messageContentJSON struct { ImageFile apijson.Field ImageURL apijson.Field Text apijson.Field + Refusal apijson.Field raw string ExtraFields map[string]apijson.Field } @@ -1364,7 +1366,7 @@ func (r *MessageContent) UnmarshalJSON(data []byte) (err error) { // specific types for more type safety. // // Possible runtime types of the union are [ImageFileContentBlock], -// [ImageURLContentBlock], [TextContentBlock]. +// [ImageURLContentBlock], [TextContentBlock], [RefusalContentBlock]. func (r MessageContent) AsUnion() MessageContentUnion { return r.union } @@ -1372,8 +1374,8 @@ func (r MessageContent) AsUnion() MessageContentUnion { // References an image [File](https://platform.openai.com/docs/api-reference/files) // in the content of a message. // -// Union satisfied by [ImageFileContentBlock], [ImageURLContentBlock] or -// [TextContentBlock]. +// Union satisfied by [ImageFileContentBlock], [ImageURLContentBlock], +// [TextContentBlock] or [RefusalContentBlock]. type MessageContentUnion interface { implementsMessageContent() } @@ -1397,6 +1399,11 @@ func init() { Type: reflect.TypeOf(TextContentBlock{}), DiscriminatorValue: "text", }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(RefusalContentBlock{}), + DiscriminatorValue: "refusal", + }, ) } @@ -1407,11 +1414,12 @@ const ( MessageContentTypeImageFile MessageContentType = "image_file" MessageContentTypeImageURL MessageContentType = "image_url" MessageContentTypeText MessageContentType = "text" + MessageContentTypeRefusal MessageContentType = "refusal" ) func (r MessageContentType) IsKnown() bool { switch r { - case MessageContentTypeImageFile, MessageContentTypeImageURL, MessageContentTypeText: + case MessageContentTypeImageFile, MessageContentTypeImageURL, MessageContentTypeText, MessageContentTypeRefusal: return true } return false @@ -1426,6 +1434,7 @@ type MessageContentDelta struct { Type MessageContentDeltaType `json:"type,required"` ImageFile ImageFileDelta `json:"image_file"` Text TextDelta `json:"text"` + Refusal string `json:"refusal"` ImageURL ImageURLDelta `json:"image_url"` JSON messageContentDeltaJSON `json:"-"` union MessageContentDeltaUnion @@ -1438,6 +1447,7 @@ type messageContentDeltaJSON struct { Type apijson.Field ImageFile apijson.Field Text apijson.Field + Refusal apijson.Field ImageURL apijson.Field raw string ExtraFields map[string]apijson.Field @@ -1460,7 +1470,7 @@ func (r *MessageContentDelta) UnmarshalJSON(data []byte) (err error) { // specific types for more type safety. // // Possible runtime types of the union are [ImageFileDeltaBlock], [TextDeltaBlock], -// [ImageURLDeltaBlock]. +// [RefusalDeltaBlock], [ImageURLDeltaBlock]. func (r MessageContentDelta) AsUnion() MessageContentDeltaUnion { return r.union } @@ -1468,8 +1478,8 @@ func (r MessageContentDelta) AsUnion() MessageContentDeltaUnion { // References an image [File](https://platform.openai.com/docs/api-reference/files) // in the content of a message. // -// Union satisfied by [ImageFileDeltaBlock], [TextDeltaBlock] or -// [ImageURLDeltaBlock]. +// Union satisfied by [ImageFileDeltaBlock], [TextDeltaBlock], [RefusalDeltaBlock] +// or [ImageURLDeltaBlock]. type MessageContentDeltaUnion interface { implementsMessageContentDelta() } @@ -1488,6 +1498,11 @@ func init() { Type: reflect.TypeOf(TextDeltaBlock{}), DiscriminatorValue: "text", }, + apijson.UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(RefusalDeltaBlock{}), + DiscriminatorValue: "refusal", + }, apijson.UnionVariant{ TypeFilter: gjson.JSON, Type: reflect.TypeOf(ImageURLDeltaBlock{}), @@ -1502,12 +1517,13 @@ type MessageContentDeltaType string const ( MessageContentDeltaTypeImageFile MessageContentDeltaType = "image_file" MessageContentDeltaTypeText MessageContentDeltaType = "text" + MessageContentDeltaTypeRefusal MessageContentDeltaType = "refusal" MessageContentDeltaTypeImageURL MessageContentDeltaType = "image_url" ) func (r MessageContentDeltaType) IsKnown() bool { switch r { - case MessageContentDeltaTypeImageFile, MessageContentDeltaTypeText, MessageContentDeltaTypeImageURL: + case MessageContentDeltaTypeImageFile, MessageContentDeltaTypeText, MessageContentDeltaTypeRefusal, MessageContentDeltaTypeImageURL: return true } return false @@ -1680,6 +1696,93 @@ func (r MessageDeltaEventObject) IsKnown() bool { return false } +// The refusal content generated by the assistant. +type RefusalContentBlock struct { + Refusal string `json:"refusal,required"` + // Always `refusal`. + Type RefusalContentBlockType `json:"type,required"` + JSON refusalContentBlockJSON `json:"-"` +} + +// refusalContentBlockJSON contains the JSON metadata for the struct +// [RefusalContentBlock] +type refusalContentBlockJSON struct { + Refusal apijson.Field + Type apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RefusalContentBlock) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r refusalContentBlockJSON) RawJSON() string { + return r.raw +} + +func (r RefusalContentBlock) implementsMessageContent() {} + +// Always `refusal`. +type RefusalContentBlockType string + +const ( + RefusalContentBlockTypeRefusal RefusalContentBlockType = "refusal" +) + +func (r RefusalContentBlockType) IsKnown() bool { + switch r { + case RefusalContentBlockTypeRefusal: + return true + } + return false +} + +// The refusal content that is part of a message. +type RefusalDeltaBlock struct { + // The index of the refusal part in the message. + Index int64 `json:"index,required"` + // Always `refusal`. + Type RefusalDeltaBlockType `json:"type,required"` + Refusal string `json:"refusal"` + JSON refusalDeltaBlockJSON `json:"-"` +} + +// refusalDeltaBlockJSON contains the JSON metadata for the struct +// [RefusalDeltaBlock] +type refusalDeltaBlockJSON struct { + Index apijson.Field + Type apijson.Field + Refusal apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *RefusalDeltaBlock) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r refusalDeltaBlockJSON) RawJSON() string { + return r.raw +} + +func (r RefusalDeltaBlock) implementsMessageContentDelta() {} + +// Always `refusal`. +type RefusalDeltaBlockType string + +const ( + RefusalDeltaBlockTypeRefusal RefusalDeltaBlockType = "refusal" +) + +func (r RefusalDeltaBlockType) IsKnown() bool { + switch r { + case RefusalDeltaBlockTypeRefusal: + return true + } + return false +} + type Text struct { Annotations []Annotation `json:"annotations,required"` // The data that makes up the text. @@ -2008,7 +2111,7 @@ type BetaThreadMessageListParams struct { // `url.Values`. func (r BetaThreadMessageListParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/betathreadrun.go b/betathreadrun.go index 507dd30..da4833c 100644 --- a/betathreadrun.go +++ b/betathreadrun.go @@ -313,22 +313,6 @@ type Run struct { // Details on the action required to continue the run. Will be `null` if no action // is required. RequiredAction RunRequiredAction `json:"required_action,required,nullable"` - // Specifies the format that the model must output. Compatible with - // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), - // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - // - // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - // message the model generates is valid JSON. - // - // **Important:** when using JSON mode, you **must** also instruct the model to - // produce JSON yourself via a system or user message. Without this, the model may - // generate an unending stream of whitespace until the generation reaches the token - // limit, resulting in a long-running and seemingly "stuck" request. Also note that - // the message content may be partially cut off if `finish_reason="length"`, which - // indicates the generation exceeded `max_tokens` or the conversation exceeded the - // max context length. - ResponseFormat AssistantResponseFormatOptionUnion `json:"response_format,required,nullable"` // The Unix timestamp (in seconds) for when the run was started. StartedAt int64 `json:"started_at,required,nullable"` // The status of the run, which can be either `queued`, `in_progress`, @@ -382,7 +366,6 @@ type runJSON struct { Object apijson.Field ParallelToolCalls apijson.Field RequiredAction apijson.Field - ResponseFormat apijson.Field StartedAt apijson.Field Status apijson.Field ThreadID apijson.Field @@ -708,27 +691,11 @@ type BetaThreadRunNewParams struct { // be used to execute this run. If a value is provided here, it will override the // model associated with the assistant. If not, the model associated with the // assistant will be used. - Model param.Field[BetaThreadRunNewParamsModel] `json:"model"` + Model param.Field[ChatModel] `json:"model"` // Whether to enable // [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) // during tool use. ParallelToolCalls param.Field[bool] `json:"parallel_tool_calls"` - // Specifies the format that the model must output. Compatible with - // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), - // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - // - // Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - // message the model generates is valid JSON. - // - // **Important:** when using JSON mode, you **must** also instruct the model to - // produce JSON yourself via a system or user message. Without this, the model may - // generate an unending stream of whitespace until the generation reaches the token - // limit, resulting in a long-running and seemingly "stuck" request. Also note that - // the message content may be partially cut off if `finish_reason="length"`, which - // indicates the generation exceeded `max_tokens` or the conversation exceeded the - // max context length. - ResponseFormat param.Field[AssistantResponseFormatOptionUnionParam] `json:"response_format"` // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will // make the output more random, while lower values like 0.2 will make it more // focused and deterministic. @@ -889,41 +856,6 @@ func (r BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType) IsKnown() return false } -type BetaThreadRunNewParamsModel string - -const ( - BetaThreadRunNewParamsModelGPT4o BetaThreadRunNewParamsModel = "gpt-4o" - BetaThreadRunNewParamsModelGPT4o2024_05_13 BetaThreadRunNewParamsModel = "gpt-4o-2024-05-13" - BetaThreadRunNewParamsModelGPT4oMini BetaThreadRunNewParamsModel = "gpt-4o-mini" - BetaThreadRunNewParamsModelGPT4oMini2024_07_18 BetaThreadRunNewParamsModel = "gpt-4o-mini-2024-07-18" - BetaThreadRunNewParamsModelGPT4Turbo BetaThreadRunNewParamsModel = "gpt-4-turbo" - BetaThreadRunNewParamsModelGPT4Turbo2024_04_09 BetaThreadRunNewParamsModel = "gpt-4-turbo-2024-04-09" - BetaThreadRunNewParamsModelGPT4_0125Preview BetaThreadRunNewParamsModel = "gpt-4-0125-preview" - BetaThreadRunNewParamsModelGPT4TurboPreview BetaThreadRunNewParamsModel = "gpt-4-turbo-preview" - BetaThreadRunNewParamsModelGPT4_1106Preview BetaThreadRunNewParamsModel = "gpt-4-1106-preview" - BetaThreadRunNewParamsModelGPT4VisionPreview BetaThreadRunNewParamsModel = "gpt-4-vision-preview" - BetaThreadRunNewParamsModelGPT4 BetaThreadRunNewParamsModel = "gpt-4" - BetaThreadRunNewParamsModelGPT4_0314 BetaThreadRunNewParamsModel = "gpt-4-0314" - BetaThreadRunNewParamsModelGPT4_0613 BetaThreadRunNewParamsModel = "gpt-4-0613" - BetaThreadRunNewParamsModelGPT4_32k BetaThreadRunNewParamsModel = "gpt-4-32k" - BetaThreadRunNewParamsModelGPT4_32k0314 BetaThreadRunNewParamsModel = "gpt-4-32k-0314" - BetaThreadRunNewParamsModelGPT4_32k0613 BetaThreadRunNewParamsModel = "gpt-4-32k-0613" - BetaThreadRunNewParamsModelGPT3_5Turbo BetaThreadRunNewParamsModel = "gpt-3.5-turbo" - BetaThreadRunNewParamsModelGPT3_5Turbo16k BetaThreadRunNewParamsModel = "gpt-3.5-turbo-16k" - BetaThreadRunNewParamsModelGPT3_5Turbo0613 BetaThreadRunNewParamsModel = "gpt-3.5-turbo-0613" - BetaThreadRunNewParamsModelGPT3_5Turbo1106 BetaThreadRunNewParamsModel = "gpt-3.5-turbo-1106" - BetaThreadRunNewParamsModelGPT3_5Turbo0125 BetaThreadRunNewParamsModel = "gpt-3.5-turbo-0125" - BetaThreadRunNewParamsModelGPT3_5Turbo16k0613 BetaThreadRunNewParamsModel = "gpt-3.5-turbo-16k-0613" -) - -func (r BetaThreadRunNewParamsModel) IsKnown() bool { - switch r { - case BetaThreadRunNewParamsModelGPT4o, BetaThreadRunNewParamsModelGPT4o2024_05_13, BetaThreadRunNewParamsModelGPT4oMini, BetaThreadRunNewParamsModelGPT4oMini2024_07_18, BetaThreadRunNewParamsModelGPT4Turbo, BetaThreadRunNewParamsModelGPT4Turbo2024_04_09, BetaThreadRunNewParamsModelGPT4_0125Preview, BetaThreadRunNewParamsModelGPT4TurboPreview, BetaThreadRunNewParamsModelGPT4_1106Preview, BetaThreadRunNewParamsModelGPT4VisionPreview, BetaThreadRunNewParamsModelGPT4, BetaThreadRunNewParamsModelGPT4_0314, BetaThreadRunNewParamsModelGPT4_0613, BetaThreadRunNewParamsModelGPT4_32k, BetaThreadRunNewParamsModelGPT4_32k0314, BetaThreadRunNewParamsModelGPT4_32k0613, BetaThreadRunNewParamsModelGPT3_5Turbo, BetaThreadRunNewParamsModelGPT3_5Turbo16k, BetaThreadRunNewParamsModelGPT3_5Turbo0613, BetaThreadRunNewParamsModelGPT3_5Turbo1106, BetaThreadRunNewParamsModelGPT3_5Turbo0125, BetaThreadRunNewParamsModelGPT3_5Turbo16k0613: - return true - } - return false -} - // Controls for how a thread will be truncated prior to the run. Use this to // control the intial context window of the run. type BetaThreadRunNewParamsTruncationStrategy struct { @@ -995,7 +927,7 @@ type BetaThreadRunListParams struct { // `url.Values`. func (r BetaThreadRunListParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/betathreadrun_test.go b/betathreadrun_test.go index 791e429..636ded8 100644 --- a/betathreadrun_test.go +++ b/betathreadrun_test.go @@ -133,9 +133,8 @@ func TestBetaThreadRunNewWithOptionalParams(t *testing.T) { MaxCompletionTokens: openai.F(int64(256)), MaxPromptTokens: openai.F(int64(256)), Metadata: openai.F[any](map[string]interface{}{}), - Model: openai.F(openai.BetaThreadRunNewParamsModelGPT4o), + Model: openai.F(openai.ChatModelGPT4o), ParallelToolCalls: openai.F(true), - ResponseFormat: openai.F[openai.AssistantResponseFormatOptionUnionParam](openai.AssistantResponseFormatOptionString(openai.AssistantResponseFormatOptionStringNone)), Temperature: openai.F(1.000000), ToolChoice: openai.F[openai.AssistantToolChoiceOptionUnionParam](openai.AssistantToolChoiceOptionString(openai.AssistantToolChoiceOptionStringNone)), Tools: openai.F([]openai.AssistantToolUnionParam{openai.CodeInterpreterToolParam{ diff --git a/betathreadrunstep.go b/betathreadrunstep.go index e383e4e..59779b5 100644 --- a/betathreadrunstep.go +++ b/betathreadrunstep.go @@ -1777,7 +1777,7 @@ type BetaThreadRunStepListParams struct { // `url.Values`. func (r BetaThreadRunStepListParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/betavectorstore.go b/betavectorstore.go index be6dcad..391387f 100644 --- a/betavectorstore.go +++ b/betavectorstore.go @@ -546,7 +546,7 @@ type BetaVectorStoreListParams struct { // `url.Values`. func (r BetaVectorStoreListParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/betavectorstorefile.go b/betavectorstorefile.go index b04e15b..1b46000 100644 --- a/betavectorstorefile.go +++ b/betavectorstorefile.go @@ -195,15 +195,14 @@ func (r vectorStoreFileLastErrorJSON) RawJSON() string { type VectorStoreFileLastErrorCode string const ( - VectorStoreFileLastErrorCodeInternalError VectorStoreFileLastErrorCode = "internal_error" - VectorStoreFileLastErrorCodeFileNotFound VectorStoreFileLastErrorCode = "file_not_found" - VectorStoreFileLastErrorCodeParsingError VectorStoreFileLastErrorCode = "parsing_error" - VectorStoreFileLastErrorCodeUnhandledMimeType VectorStoreFileLastErrorCode = "unhandled_mime_type" + VectorStoreFileLastErrorCodeServerError VectorStoreFileLastErrorCode = "server_error" + VectorStoreFileLastErrorCodeUnsupportedFile VectorStoreFileLastErrorCode = "unsupported_file" + VectorStoreFileLastErrorCodeInvalidFile VectorStoreFileLastErrorCode = "invalid_file" ) func (r VectorStoreFileLastErrorCode) IsKnown() bool { switch r { - case VectorStoreFileLastErrorCodeInternalError, VectorStoreFileLastErrorCodeFileNotFound, VectorStoreFileLastErrorCodeParsingError, VectorStoreFileLastErrorCodeUnhandledMimeType: + case VectorStoreFileLastErrorCodeServerError, VectorStoreFileLastErrorCodeUnsupportedFile, VectorStoreFileLastErrorCodeInvalidFile: return true } return false @@ -629,7 +628,7 @@ type BetaVectorStoreFileListParams struct { // `url.Values`. func (r BetaVectorStoreFileListParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/betavectorstorefilebatch.go b/betavectorstorefilebatch.go index 12047a4..6545ffd 100644 --- a/betavectorstorefilebatch.go +++ b/betavectorstorefilebatch.go @@ -374,7 +374,7 @@ type BetaVectorStoreFileBatchListFilesParams struct { // as `url.Values`. func (r BetaVectorStoreFileBatchListFilesParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/chat.go b/chat.go index 999df42..e20be1e 100644 --- a/chat.go +++ b/chat.go @@ -27,10 +27,11 @@ func NewChatService(opts ...option.RequestOption) (r *ChatService) { return } -type ChatModel string +type ChatModel = string const ( ChatModelGPT4o ChatModel = "gpt-4o" + ChatModelGPT4o2024_08_06 ChatModel = "gpt-4o-2024-08-06" ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13" ChatModelGPT4oMini ChatModel = "gpt-4o-mini" ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18" @@ -54,11 +55,3 @@ const ( ChatModelGPT3_5Turbo0125 ChatModel = "gpt-3.5-turbo-0125" ChatModelGPT3_5Turbo16k0613 ChatModel = "gpt-3.5-turbo-16k-0613" ) - -func (r ChatModel) IsKnown() bool { - switch r { - case ChatModelGPT4o, ChatModelGPT4o2024_05_13, ChatModelGPT4oMini, ChatModelGPT4oMini2024_07_18, ChatModelGPT4Turbo, ChatModelGPT4Turbo2024_04_09, ChatModelGPT4_0125Preview, ChatModelGPT4TurboPreview, ChatModelGPT4_1106Preview, ChatModelGPT4VisionPreview, ChatModelGPT4, ChatModelGPT4_0314, ChatModelGPT4_0613, ChatModelGPT4_32k, ChatModelGPT4_32k0314, ChatModelGPT4_32k0613, ChatModelGPT3_5Turbo, ChatModelGPT3_5Turbo16k, ChatModelGPT3_5Turbo0301, ChatModelGPT3_5Turbo0613, ChatModelGPT3_5Turbo1106, ChatModelGPT3_5Turbo0125, ChatModelGPT3_5Turbo16k0613: - return true - } - return false -} diff --git a/chatcompletion.go b/chatcompletion.go index f6331cb..664eae4 100644 --- a/chatcompletion.go +++ b/chatcompletion.go @@ -12,73 +12,8 @@ import ( "github.com/openai/openai-go/option" "github.com/openai/openai-go/packages/ssestream" "github.com/openai/openai-go/shared" - "github.com/tidwall/sjson" ) -func UserMessage(content string) ChatCompletionMessageParamUnion { - return ChatCompletionUserMessageParam{ - Role: F(ChatCompletionUserMessageParamRoleUser), - Content: F[ChatCompletionUserMessageParamContentUnion]( - shared.UnionString(content), - ), - } -} - -func UserMessageBlocks(blocks ...ChatCompletionContentPartUnionParam) ChatCompletionMessageParamUnion { - return ChatCompletionUserMessageParam{ - Role: F(ChatCompletionUserMessageParamRoleUser), - Content: F[ChatCompletionUserMessageParamContentUnion]( - ChatCompletionUserMessageParamContentArrayOfContentParts(blocks), - ), - } -} - -func UserMessageTextBlock(content string) ChatCompletionContentPartUnionParam { - return ChatCompletionContentPartTextParam{ - Type: F(ChatCompletionContentPartTextTypeText), - Text: F(content), - } -} - -func UserMessageImageBlock(url string) ChatCompletionContentPartUnionParam { - return ChatCompletionContentPartImageParam{ - Type: F(ChatCompletionContentPartImageTypeImageURL), - ImageURL: F(ChatCompletionContentPartImageImageURLParam{ - URL: F(url), - }), - } -} - -func AssistantMessage(content string) ChatCompletionMessageParamUnion { - return ChatCompletionAssistantMessageParam{ - Role: F(ChatCompletionAssistantMessageParamRoleAssistant), - Content: F(content), - } -} - -func ToolMessage(toolCallID, content string) ChatCompletionMessageParamUnion { - return ChatCompletionToolMessageParam{ - Role: F(ChatCompletionToolMessageParamRoleTool), - ToolCallID: F(toolCallID), - Content: F(content), - } -} - -func SystemMessage(content string) ChatCompletionMessageParamUnion { - return ChatCompletionSystemMessageParam{ - Role: F(ChatCompletionSystemMessageParamRoleSystem), - Content: F(content), - } -} - -func FunctionMessage(name, content string) ChatCompletionMessageParamUnion { - return ChatCompletionFunctionMessageParam{ - Role: F(ChatCompletionFunctionMessageParamRoleFunction), - Name: F(name), - Content: F(content), - } -} - // ChatCompletionService contains methods and other services that help with // interacting with the openai API. // @@ -231,7 +166,9 @@ func (r ChatCompletionChoicesFinishReason) IsKnown() bool { // Log probability information for the choice. type ChatCompletionChoicesLogprobs struct { // A list of message content tokens with log probability information. - Content []ChatCompletionTokenLogprob `json:"content,required,nullable"` + Content []ChatCompletionTokenLogprob `json:"content,required,nullable"` + // A list of message refusal tokens with log probability information. + Refusal []ChatCompletionTokenLogprob `json:"refusal,required,nullable"` JSON chatCompletionChoicesLogprobsJSON `json:"-"` } @@ -239,6 +176,7 @@ type ChatCompletionChoicesLogprobs struct { // [ChatCompletionChoicesLogprobs] type chatCompletionChoicesLogprobsJSON struct { Content apijson.Field + Refusal apijson.Field raw string ExtraFields map[string]apijson.Field } @@ -288,13 +226,15 @@ type ChatCompletionAssistantMessageParam struct { Role param.Field[ChatCompletionAssistantMessageParamRole] `json:"role,required"` // The contents of the assistant message. Required unless `tool_calls` or // `function_call` is specified. - Content param.Field[string] `json:"content"` + Content param.Field[ChatCompletionAssistantMessageParamContentUnion] `json:"content"` // Deprecated and replaced by `tool_calls`. The name and arguments of a function // that should be called, as generated by the model. FunctionCall param.Field[ChatCompletionAssistantMessageParamFunctionCall] `json:"function_call"` // An optional name for the participant. Provides the model information to // differentiate between participants of the same role. Name param.Field[string] `json:"name"` + // The refusal message by the assistant. + Refusal param.Field[string] `json:"refusal"` // The tool calls generated by the model, such as function calls. ToolCalls param.Field[[]ChatCompletionMessageToolCallParam] `json:"tool_calls"` } @@ -320,6 +260,59 @@ func (r ChatCompletionAssistantMessageParamRole) IsKnown() bool { return false } +// The contents of the assistant message. Required unless `tool_calls` or +// `function_call` is specified. +// +// Satisfied by [shared.UnionString], +// [ChatCompletionAssistantMessageParamContentArrayOfContentParts]. +type ChatCompletionAssistantMessageParamContentUnion interface { + ImplementsChatCompletionAssistantMessageParamContentUnion() +} + +type ChatCompletionAssistantMessageParamContentArrayOfContentParts []ChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem + +func (r ChatCompletionAssistantMessageParamContentArrayOfContentParts) ImplementsChatCompletionAssistantMessageParamContentUnion() { +} + +type ChatCompletionAssistantMessageParamContentArrayOfContentPart struct { + // The type of the content part. + Type param.Field[ChatCompletionAssistantMessageParamContentArrayOfContentPartsType] `json:"type,required"` + // The text content. + Text param.Field[string] `json:"text"` + // The refusal message generated by the model. + Refusal param.Field[string] `json:"refusal"` +} + +func (r ChatCompletionAssistantMessageParamContentArrayOfContentPart) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionAssistantMessageParamContentArrayOfContentPart) implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem() { +} + +// Satisfied by [ChatCompletionContentPartTextParam], +// [ChatCompletionContentPartRefusalParam], +// [ChatCompletionAssistantMessageParamContentArrayOfContentPart]. +type ChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem interface { + implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem() +} + +// The type of the content part. +type ChatCompletionAssistantMessageParamContentArrayOfContentPartsType string + +const ( + ChatCompletionAssistantMessageParamContentArrayOfContentPartsTypeText ChatCompletionAssistantMessageParamContentArrayOfContentPartsType = "text" + ChatCompletionAssistantMessageParamContentArrayOfContentPartsTypeRefusal ChatCompletionAssistantMessageParamContentArrayOfContentPartsType = "refusal" +) + +func (r ChatCompletionAssistantMessageParamContentArrayOfContentPartsType) IsKnown() bool { + switch r { + case ChatCompletionAssistantMessageParamContentArrayOfContentPartsTypeText, ChatCompletionAssistantMessageParamContentArrayOfContentPartsTypeRefusal: + return true + } + return false +} + // Deprecated and replaced by `tool_calls`. The name and arguments of a function // that should be called, as generated by the model. type ChatCompletionAssistantMessageParamFunctionCall struct { @@ -433,6 +426,8 @@ type ChatCompletionChunkChoicesDelta struct { // Deprecated and replaced by `tool_calls`. The name and arguments of a function // that should be called, as generated by the model. FunctionCall ChatCompletionChunkChoicesDeltaFunctionCall `json:"function_call"` + // The refusal message generated by the model. + Refusal string `json:"refusal,nullable"` // The role of the author of this message. Role ChatCompletionChunkChoicesDeltaRole `json:"role"` ToolCalls []ChatCompletionChunkChoicesDeltaToolCall `json:"tool_calls"` @@ -444,6 +439,7 @@ type ChatCompletionChunkChoicesDelta struct { type chatCompletionChunkChoicesDeltaJSON struct { Content apijson.Field FunctionCall apijson.Field + Refusal apijson.Field Role apijson.Field ToolCalls apijson.Field raw string @@ -605,7 +601,9 @@ func (r ChatCompletionChunkChoicesFinishReason) IsKnown() bool { // Log probability information for the choice. type ChatCompletionChunkChoicesLogprobs struct { // A list of message content tokens with log probability information. - Content []ChatCompletionTokenLogprob `json:"content,required,nullable"` + Content []ChatCompletionTokenLogprob `json:"content,required,nullable"` + // A list of message refusal tokens with log probability information. + Refusal []ChatCompletionTokenLogprob `json:"refusal,required,nullable"` JSON chatCompletionChunkChoicesLogprobsJSON `json:"-"` } @@ -613,6 +611,7 @@ type ChatCompletionChunkChoicesLogprobs struct { // [ChatCompletionChunkChoicesLogprobs] type chatCompletionChunkChoicesLogprobsJSON struct { Content apijson.Field + Refusal apijson.Field raw string ExtraFields map[string]apijson.Field } @@ -750,6 +749,38 @@ func (r ChatCompletionContentPartImageType) IsKnown() bool { return false } +type ChatCompletionContentPartRefusalParam struct { + // The refusal message generated by the model. + Refusal param.Field[string] `json:"refusal,required"` + // The type of the content part. + Type param.Field[ChatCompletionContentPartRefusalType] `json:"type,required"` +} + +func (r ChatCompletionContentPartRefusalParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ChatCompletionContentPartRefusalParam) implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem() { +} + +func (r ChatCompletionContentPartRefusalParam) implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnion() { +} + +// The type of the content part. +type ChatCompletionContentPartRefusalType string + +const ( + ChatCompletionContentPartRefusalTypeRefusal ChatCompletionContentPartRefusalType = "refusal" +) + +func (r ChatCompletionContentPartRefusalType) IsKnown() bool { + switch r { + case ChatCompletionContentPartRefusalTypeRefusal: + return true + } + return false +} + type ChatCompletionContentPartTextParam struct { // The text content. Text param.Field[string] `json:"text,required"` @@ -761,8 +792,14 @@ func (r ChatCompletionContentPartTextParam) MarshalJSON() (data []byte, err erro return apijson.MarshalRoot(r) } +func (r ChatCompletionContentPartTextParam) implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnionItem() { +} + func (r ChatCompletionContentPartTextParam) implementsChatCompletionContentPartUnionParam() {} +func (r ChatCompletionContentPartTextParam) implementsChatCompletionAssistantMessageParamContentArrayOfContentPartsUnion() { +} + // The type of the content part. type ChatCompletionContentPartTextType string @@ -825,6 +862,8 @@ func (r ChatCompletionFunctionMessageParamRole) IsKnown() bool { type ChatCompletionMessage struct { // The contents of the message. Content string `json:"content,required,nullable"` + // The refusal message generated by the model. + Refusal string `json:"refusal,required,nullable"` // The role of the author of this message. Role ChatCompletionMessageRole `json:"role,required"` // Deprecated and replaced by `tool_calls`. The name and arguments of a function @@ -839,6 +878,7 @@ type ChatCompletionMessage struct { // [ChatCompletionMessage] type chatCompletionMessageJSON struct { Content apijson.Field + Refusal apijson.Field Role apijson.Field FunctionCall apijson.Field ToolCalls apijson.Field @@ -850,35 +890,10 @@ func (r *ChatCompletionMessage) UnmarshalJSON(data []byte) (err error) { return apijson.UnmarshalRoot(data, r) } -func (r ChatCompletionMessage) MarshalJSON() (data []byte, err error) { - s := "" - s, _ = sjson.Set(s, "role", r.Role) - - if r.FunctionCall.Name != "" { - b, err := apijson.Marshal(r.FunctionCall) - if err != nil { - return nil, err - } - s, _ = sjson.SetRaw(s, "function_call", string(b)) - } else if len(r.ToolCalls) > 0 { - b, err := apijson.Marshal(r.ToolCalls) - if err != nil { - return nil, err - } - s, _ = sjson.SetRaw(s, "tool_calls", string(b)) - } else { - s, _ = sjson.Set(s, "content", r.Content) - } - - return []byte(s), nil -} - func (r chatCompletionMessageJSON) RawJSON() string { return r.raw } -func (r ChatCompletionMessage) implementsChatCompletionMessageParamUnion() {} - // The role of the author of this message. type ChatCompletionMessageRole string @@ -930,7 +945,9 @@ type ChatCompletionMessageParam struct { Role param.Field[ChatCompletionMessageParamRole] `json:"role,required"` // An optional name for the participant. Provides the model information to // differentiate between participants of the same role. - Name param.Field[string] `json:"name"` + Name param.Field[string] `json:"name"` + // The refusal message by the assistant. + Refusal param.Field[string] `json:"refusal"` ToolCalls param.Field[interface{}] `json:"tool_calls,required"` FunctionCall param.Field[interface{}] `json:"function_call,required"` // Tool call that this message is responding to. @@ -947,8 +964,6 @@ func (r ChatCompletionMessageParam) implementsChatCompletionMessageParamUnion() // [ChatCompletionUserMessageParam], [ChatCompletionAssistantMessageParam], // [ChatCompletionToolMessageParam], [ChatCompletionFunctionMessageParam], // [ChatCompletionMessageParam]. -// -// This union is additionally satisfied by the return types [ChatCompletionMessage] type ChatCompletionMessageParamUnion interface { implementsChatCompletionMessageParamUnion() } @@ -1125,7 +1140,7 @@ func (r ChatCompletionStreamOptionsParam) MarshalJSON() (data []byte, err error) type ChatCompletionSystemMessageParam struct { // The contents of the system message. - Content param.Field[string] `json:"content,required"` + Content param.Field[ChatCompletionSystemMessageParamContentUnion] `json:"content,required"` // The role of the messages author, in this case `system`. Role param.Field[ChatCompletionSystemMessageParamRole] `json:"role,required"` // An optional name for the participant. Provides the model information to @@ -1139,6 +1154,19 @@ func (r ChatCompletionSystemMessageParam) MarshalJSON() (data []byte, err error) func (r ChatCompletionSystemMessageParam) implementsChatCompletionMessageParamUnion() {} +// The contents of the system message. +// +// Satisfied by [shared.UnionString], +// [ChatCompletionSystemMessageParamContentArrayOfContentParts]. +type ChatCompletionSystemMessageParamContentUnion interface { + ImplementsChatCompletionSystemMessageParamContentUnion() +} + +type ChatCompletionSystemMessageParamContentArrayOfContentParts []ChatCompletionContentPartTextParam + +func (r ChatCompletionSystemMessageParamContentArrayOfContentParts) ImplementsChatCompletionSystemMessageParamContentUnion() { +} + // The role of the messages author, in this case `system`. type ChatCompletionSystemMessageParamRole string @@ -1289,7 +1317,7 @@ func (r ChatCompletionToolChoiceOptionString) implementsChatCompletionToolChoice type ChatCompletionToolMessageParam struct { // The contents of the tool message. - Content param.Field[string] `json:"content,required"` + Content param.Field[ChatCompletionToolMessageParamContentUnion] `json:"content,required"` // The role of the messages author, in this case `tool`. Role param.Field[ChatCompletionToolMessageParamRole] `json:"role,required"` // Tool call that this message is responding to. @@ -1302,6 +1330,19 @@ func (r ChatCompletionToolMessageParam) MarshalJSON() (data []byte, err error) { func (r ChatCompletionToolMessageParam) implementsChatCompletionMessageParamUnion() {} +// The contents of the tool message. +// +// Satisfied by [shared.UnionString], +// [ChatCompletionToolMessageParamContentArrayOfContentParts]. +type ChatCompletionToolMessageParamContentUnion interface { + ImplementsChatCompletionToolMessageParamContentUnion() +} + +type ChatCompletionToolMessageParamContentArrayOfContentParts []ChatCompletionContentPartTextParam + +func (r ChatCompletionToolMessageParamContentArrayOfContentParts) ImplementsChatCompletionToolMessageParamContentUnion() { +} + // The role of the messages author, in this case `tool`. type ChatCompletionToolMessageParamRole string @@ -1426,6 +1467,8 @@ type ChatCompletionNewParams struct { // [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) PresencePenalty param.Field[float64] `json:"presence_penalty"` // An object specifying the format that the model must output. Compatible with + // [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + // [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and // all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. // @@ -1439,7 +1482,7 @@ type ChatCompletionNewParams struct { // the message content may be partially cut off if `finish_reason="length"`, which // indicates the generation exceeded `max_tokens` or the conversation exceeded the // max context length. - ResponseFormat param.Field[ChatCompletionNewParamsResponseFormat] `json:"response_format"` + ResponseFormat param.Field[ChatCompletionNewParamsResponseFormatUnion] `json:"response_format"` // This feature is in Beta. If specified, our system will make a best effort to // sample deterministically, such that repeated requests with the same `seed` and // parameters should return the same result. Determinism is not guaranteed, and you @@ -1562,6 +1605,8 @@ func (r ChatCompletionNewParamsFunction) MarshalJSON() (data []byte, err error) } // An object specifying the format that the model must output. Compatible with +// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), // [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and // all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. // @@ -1576,25 +1621,54 @@ func (r ChatCompletionNewParamsFunction) MarshalJSON() (data []byte, err error) // indicates the generation exceeded `max_tokens` or the conversation exceeded the // max context length. type ChatCompletionNewParamsResponseFormat struct { - // Must be one of `text` or `json_object`. - Type param.Field[ChatCompletionNewParamsResponseFormatType] `json:"type"` + // The type of response format being defined: `text` + Type param.Field[ChatCompletionNewParamsResponseFormatType] `json:"type,required"` + JSONSchema param.Field[interface{}] `json:"json_schema,required"` } func (r ChatCompletionNewParamsResponseFormat) MarshalJSON() (data []byte, err error) { return apijson.MarshalRoot(r) } -// Must be one of `text` or `json_object`. +func (r ChatCompletionNewParamsResponseFormat) ImplementsChatCompletionNewParamsResponseFormatUnion() { +} + +// An object specifying the format that the model must output. Compatible with +// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), +// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), +// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and +// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. +// +// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the +// message the model generates is valid JSON. +// +// **Important:** when using JSON mode, you **must** also instruct the model to +// produce JSON yourself via a system or user message. Without this, the model may +// generate an unending stream of whitespace until the generation reaches the token +// limit, resulting in a long-running and seemingly "stuck" request. Also note that +// the message content may be partially cut off if `finish_reason="length"`, which +// indicates the generation exceeded `max_tokens` or the conversation exceeded the +// max context length. +// +// Satisfied by [shared.ResponseFormatTextParam], +// [shared.ResponseFormatJSONObjectParam], [shared.ResponseFormatJSONSchemaParam], +// [ChatCompletionNewParamsResponseFormat]. +type ChatCompletionNewParamsResponseFormatUnion interface { + ImplementsChatCompletionNewParamsResponseFormatUnion() +} + +// The type of response format being defined: `text` type ChatCompletionNewParamsResponseFormatType string const ( ChatCompletionNewParamsResponseFormatTypeText ChatCompletionNewParamsResponseFormatType = "text" ChatCompletionNewParamsResponseFormatTypeJSONObject ChatCompletionNewParamsResponseFormatType = "json_object" + ChatCompletionNewParamsResponseFormatTypeJSONSchema ChatCompletionNewParamsResponseFormatType = "json_schema" ) func (r ChatCompletionNewParamsResponseFormatType) IsKnown() bool { switch r { - case ChatCompletionNewParamsResponseFormatTypeText, ChatCompletionNewParamsResponseFormatTypeJSONObject: + case ChatCompletionNewParamsResponseFormatTypeText, ChatCompletionNewParamsResponseFormatTypeJSONObject, ChatCompletionNewParamsResponseFormatTypeJSONSchema: return true } return false diff --git a/chatcompletion_test.go b/chatcompletion_test.go index e93623d..6c0be39 100644 --- a/chatcompletion_test.go +++ b/chatcompletion_test.go @@ -28,7 +28,7 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) { ) _, err := client.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{ Messages: openai.F([]openai.ChatCompletionMessageParamUnion{openai.ChatCompletionSystemMessageParam{ - Content: openai.F("content"), + Content: openai.F[openai.ChatCompletionSystemMessageParamContentUnion](shared.UnionString("string")), Role: openai.F(openai.ChatCompletionSystemMessageParamRoleSystem), Name: openai.F("name"), }}), @@ -50,8 +50,8 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) { N: openai.F(int64(1)), ParallelToolCalls: openai.F(true), PresencePenalty: openai.F(-2.000000), - ResponseFormat: openai.F(openai.ChatCompletionNewParamsResponseFormat{ - Type: openai.F(openai.ChatCompletionNewParamsResponseFormatTypeJSONObject), + ResponseFormat: openai.F[openai.ChatCompletionNewParamsResponseFormatUnion](shared.ResponseFormatTextParam{ + Type: openai.F(shared.ResponseFormatTextTypeText), }), Seed: openai.F(int64(-9007199254740991)), ServiceTier: openai.F(openai.ChatCompletionNewParamsServiceTierAuto), @@ -69,6 +69,7 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) { Parameters: openai.F(shared.FunctionParameters{ "foo": "bar", }), + Strict: openai.F(true), }), }, { Type: openai.F(openai.ChatCompletionToolTypeFunction), @@ -78,6 +79,7 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) { Parameters: openai.F(shared.FunctionParameters{ "foo": "bar", }), + Strict: openai.F(true), }), }, { Type: openai.F(openai.ChatCompletionToolTypeFunction), @@ -87,6 +89,7 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) { Parameters: openai.F(shared.FunctionParameters{ "foo": "bar", }), + Strict: openai.F(true), }), }}), TopLogprobs: openai.F(int64(0)), diff --git a/file.go b/file.go index 76fda1e..25d90bb 100644 --- a/file.go +++ b/file.go @@ -331,7 +331,7 @@ type FileListParams struct { // URLQuery serializes [FileListParams]'s query parameters as `url.Values`. func (r FileListParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/finetuningjob.go b/finetuningjob.go index fe162ed..be03054 100644 --- a/finetuningjob.go +++ b/finetuningjob.go @@ -498,7 +498,7 @@ func (r fineTuningJobWandbIntegrationJSON) RawJSON() string { type FineTuningJobNewParams struct { // The name of the model to fine-tune. You can select one of the - // [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + // [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). Model param.Field[FineTuningJobNewParamsModel] `json:"model,required"` // The ID of an uploaded file that contains training data. // @@ -528,7 +528,7 @@ type FineTuningJobNewParams struct { // name. // // For example, a `suffix` of "custom-model-name" would produce a model name like - // `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + // `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. Suffix param.Field[string] `json:"suffix"` // The ID of an uploaded file that contains validation data. // @@ -555,11 +555,12 @@ const ( FineTuningJobNewParamsModelBabbage002 FineTuningJobNewParamsModel = "babbage-002" FineTuningJobNewParamsModelDavinci002 FineTuningJobNewParamsModel = "davinci-002" FineTuningJobNewParamsModelGPT3_5Turbo FineTuningJobNewParamsModel = "gpt-3.5-turbo" + FineTuningJobNewParamsModelGPT4oMini FineTuningJobNewParamsModel = "gpt-4o-mini" ) func (r FineTuningJobNewParamsModel) IsKnown() bool { switch r { - case FineTuningJobNewParamsModelBabbage002, FineTuningJobNewParamsModelDavinci002, FineTuningJobNewParamsModelGPT3_5Turbo: + case FineTuningJobNewParamsModelBabbage002, FineTuningJobNewParamsModelDavinci002, FineTuningJobNewParamsModelGPT3_5Turbo, FineTuningJobNewParamsModelGPT4oMini: return true } return false @@ -727,7 +728,7 @@ type FineTuningJobListParams struct { // `url.Values`. func (r FineTuningJobListParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } @@ -743,7 +744,7 @@ type FineTuningJobListEventsParams struct { // `url.Values`. func (r FineTuningJobListEventsParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/finetuningjobcheckpoint.go b/finetuningjobcheckpoint.go index 6279e24..4ca6383 100644 --- a/finetuningjobcheckpoint.go +++ b/finetuningjobcheckpoint.go @@ -165,7 +165,7 @@ type FineTuningJobCheckpointListParams struct { // `url.Values`. func (r FineTuningJobCheckpointListParams) URLQuery() (v url.Values) { return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ - ArrayFormat: apiquery.ArrayQueryFormatComma, + ArrayFormat: apiquery.ArrayQueryFormatBrackets, NestedFormat: apiquery.NestedQueryFormatBrackets, }) } diff --git a/go.mod b/go.mod index a487ea3..1e064e6 100644 --- a/go.mod +++ b/go.mod @@ -3,27 +3,9 @@ module github.com/openai/openai-go go 1.19 require ( - github.com/tidwall/gjson v1.14.4 - github.com/tidwall/sjson v1.2.5 -) - -require ( - github.com/google/uuid v1.6.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/tidwall/gjson v1.14.4 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect -) - -require ( - // NOTE: these dependencies are only used for the `azure` subpackage. - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + github.com/tidwall/sjson v1.2.5 // indirect ) diff --git a/go.sum b/go.sum index 240415f..569e555 100644 --- a/go.sum +++ b/go.sum @@ -1,22 +1,5 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -27,13 +10,3 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/image.go b/image.go index 89af593..7a17c05 100644 --- a/image.go +++ b/image.go @@ -89,6 +89,13 @@ func (r imageJSON) RawJSON() string { return r.raw } +type ImageModel = string + +const ( + ImageModelDallE2 ImageModel = "dall-e-2" + ImageModelDallE3 ImageModel = "dall-e-3" +) + type ImagesResponse struct { Created int64 `json:"created,required"` Data []Image `json:"data,required"` @@ -117,7 +124,7 @@ type ImageNewVariationParams struct { Image param.Field[io.Reader] `json:"image,required" format:"binary"` // The model to use for image generation. Only `dall-e-2` is supported at this // time. - Model param.Field[ImageNewVariationParamsModel] `json:"model"` + Model param.Field[ImageModel] `json:"model"` // The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only // `n=1` is supported. N param.Field[int64] `json:"n"` @@ -149,20 +156,6 @@ func (r ImageNewVariationParams) MarshalMultipart() (data []byte, contentType st return buf.Bytes(), writer.FormDataContentType(), nil } -type ImageNewVariationParamsModel string - -const ( - ImageNewVariationParamsModelDallE2 ImageNewVariationParamsModel = "dall-e-2" -) - -func (r ImageNewVariationParamsModel) IsKnown() bool { - switch r { - case ImageNewVariationParamsModelDallE2: - return true - } - return false -} - // The format in which the generated images are returned. Must be one of `url` or // `b64_json`. URLs are only valid for 60 minutes after the image has been // generated. @@ -212,7 +205,7 @@ type ImageEditParams struct { Mask param.Field[io.Reader] `json:"mask" format:"binary"` // The model to use for image generation. Only `dall-e-2` is supported at this // time. - Model param.Field[ImageEditParamsModel] `json:"model"` + Model param.Field[ImageModel] `json:"model"` // The number of images to generate. Must be between 1 and 10. N param.Field[int64] `json:"n"` // The format in which the generated images are returned. Must be one of `url` or @@ -243,20 +236,6 @@ func (r ImageEditParams) MarshalMultipart() (data []byte, contentType string, er return buf.Bytes(), writer.FormDataContentType(), nil } -type ImageEditParamsModel string - -const ( - ImageEditParamsModelDallE2 ImageEditParamsModel = "dall-e-2" -) - -func (r ImageEditParamsModel) IsKnown() bool { - switch r { - case ImageEditParamsModelDallE2: - return true - } - return false -} - // The format in which the generated images are returned. Must be one of `url` or // `b64_json`. URLs are only valid for 60 minutes after the image has been // generated. @@ -298,7 +277,7 @@ type ImageGenerateParams struct { // characters for `dall-e-2` and 4000 characters for `dall-e-3`. Prompt param.Field[string] `json:"prompt,required"` // The model to use for image generation. - Model param.Field[ImageGenerateParamsModel] `json:"model"` + Model param.Field[ImageModel] `json:"model"` // The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only // `n=1` is supported. N param.Field[int64] `json:"n"` @@ -329,21 +308,6 @@ func (r ImageGenerateParams) MarshalJSON() (data []byte, err error) { return apijson.MarshalRoot(r) } -type ImageGenerateParamsModel string - -const ( - ImageGenerateParamsModelDallE2 ImageGenerateParamsModel = "dall-e-2" - ImageGenerateParamsModelDallE3 ImageGenerateParamsModel = "dall-e-3" -) - -func (r ImageGenerateParamsModel) IsKnown() bool { - switch r { - case ImageGenerateParamsModelDallE2, ImageGenerateParamsModelDallE3: - return true - } - return false -} - // The quality of the image that will be generated. `hd` creates images with finer // details and greater consistency across the image. This param is only supported // for `dall-e-3`. diff --git a/image_test.go b/image_test.go index 755003d..308a93d 100644 --- a/image_test.go +++ b/image_test.go @@ -29,7 +29,7 @@ func TestImageNewVariationWithOptionalParams(t *testing.T) { ) _, err := client.Images.NewVariation(context.TODO(), openai.ImageNewVariationParams{ Image: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), - Model: openai.F(openai.ImageNewVariationParamsModelDallE2), + Model: openai.F(openai.ImageModelDallE2), N: openai.F(int64(1)), ResponseFormat: openai.F(openai.ImageNewVariationParamsResponseFormatURL), Size: openai.F(openai.ImageNewVariationParamsSize1024x1024), @@ -60,7 +60,7 @@ func TestImageEditWithOptionalParams(t *testing.T) { Image: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), Prompt: openai.F("A cute baby sea otter wearing a beret"), Mask: openai.F(io.Reader(bytes.NewBuffer([]byte("some file contents")))), - Model: openai.F(openai.ImageEditParamsModelDallE2), + Model: openai.F(openai.ImageModelDallE2), N: openai.F(int64(1)), ResponseFormat: openai.F(openai.ImageEditParamsResponseFormatURL), Size: openai.F(openai.ImageEditParamsSize1024x1024), @@ -89,7 +89,7 @@ func TestImageGenerateWithOptionalParams(t *testing.T) { ) _, err := client.Images.Generate(context.TODO(), openai.ImageGenerateParams{ Prompt: openai.F("A cute baby sea otter"), - Model: openai.F(openai.ImageGenerateParamsModelDallE2), + Model: openai.F(openai.ImageModelDallE2), N: openai.F(int64(1)), Quality: openai.F(openai.ImageGenerateParamsQualityStandard), ResponseFormat: openai.F(openai.ImageGenerateParamsResponseFormatURL), diff --git a/internal/apijson/decoder.go b/internal/apijson/decoder.go index deb0bac..e1b21b7 100644 --- a/internal/apijson/decoder.go +++ b/internal/apijson/decoder.go @@ -214,15 +214,29 @@ func (d *decoderBuilder) newUnionDecoder(t reflect.Type) decoderFunc { decoders = append(decoders, decoder) } return func(n gjson.Result, v reflect.Value, state *decoderState) error { - // Set bestExactness to worse than loose - bestExactness := loose - 1 - + // If there is a discriminator match, circumvent the exactness logic entirely for idx, variant := range unionEntry.variants { decoder := decoders[idx] if variant.TypeFilter != n.Type { continue } - if len(unionEntry.discriminatorKey) != 0 && n.Get(unionEntry.discriminatorKey).Value() != variant.DiscriminatorValue { + + if len(unionEntry.discriminatorKey) != 0 { + discriminatorValue := n.Get(unionEntry.discriminatorKey).Value() + if discriminatorValue == variant.DiscriminatorValue { + inner := reflect.New(variant.Type).Elem() + err := decoder(n, inner, state) + v.Set(inner) + return err + } + } + } + + // Set bestExactness to worse than loose + bestExactness := loose - 1 + for idx, variant := range unionEntry.variants { + decoder := decoders[idx] + if variant.TypeFilter != n.Type { continue } sub := decoderState{strict: state.strict, exactness: exact} @@ -325,62 +339,58 @@ func (d *decoderBuilder) newArrayTypeDecoder(t reflect.Type) decoderFunc { func (d *decoderBuilder) newStructTypeDecoder(t reflect.Type) decoderFunc { // map of json field name to struct field decoders decoderFields := map[string]decoderField{} + anonymousDecoders := []decoderField{} extraDecoder := (*decoderField)(nil) inlineDecoder := (*decoderField)(nil) - // This helper allows us to recursively collect field encoders into a flat - // array. The parameter `index` keeps track of the access patterns necessary - // to get to some field. - var collectFieldDecoders func(r reflect.Type, index []int) - collectFieldDecoders = func(r reflect.Type, index []int) { - for i := 0; i < r.NumField(); i++ { - idx := append(index, i) - field := t.FieldByIndex(idx) - if !field.IsExported() { - continue - } - // If this is an embedded struct, traverse one level deeper to extract - // the fields and get their encoders as well. - if field.Anonymous { - collectFieldDecoders(field.Type, idx) - continue - } - // If json tag is not present, then we skip, which is intentionally - // different behavior from the stdlib. - ptag, ok := parseJSONStructTag(field) - if !ok { - continue - } - // We only want to support unexported fields if they're tagged with - // `extras` because that field shouldn't be part of the public API. We - // also want to only keep the top level extras - if ptag.extras && len(index) == 0 { - extraDecoder = &decoderField{ptag, d.typeDecoder(field.Type.Elem()), idx, field.Name} - continue - } - if ptag.inline && len(index) == 0 { - inlineDecoder = &decoderField{ptag, d.typeDecoder(field.Type), idx, field.Name} - continue - } - if ptag.metadata { - continue - } + for i := 0; i < t.NumField(); i++ { + idx := []int{i} + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the fields and get their encoders as well. + if field.Anonymous { + anonymousDecoders = append(anonymousDecoders, decoderField{ + fn: d.typeDecoder(field.Type), + idx: idx[:], + }) + continue + } + // If json tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + // We only want to support unexported fields if they're tagged with + // `extras` because that field shouldn't be part of the public API. + if ptag.extras { + extraDecoder = &decoderField{ptag, d.typeDecoder(field.Type.Elem()), idx, field.Name} + continue + } + if ptag.inline { + inlineDecoder = &decoderField{ptag, d.typeDecoder(field.Type), idx, field.Name} + continue + } + if ptag.metadata { + continue + } - oldFormat := d.dateFormat - dateFormat, ok := parseFormatStructTag(field) - if ok { - switch dateFormat { - case "date-time": - d.dateFormat = time.RFC3339 - case "date": - d.dateFormat = "2006-01-02" - } + oldFormat := d.dateFormat + dateFormat, ok := parseFormatStructTag(field) + if ok { + switch dateFormat { + case "date-time": + d.dateFormat = time.RFC3339 + case "date": + d.dateFormat = "2006-01-02" } - decoderFields[ptag.name] = decoderField{ptag, d.typeDecoder(field.Type), idx, field.Name} - d.dateFormat = oldFormat } + decoderFields[ptag.name] = decoderField{ptag, d.typeDecoder(field.Type), idx, field.Name} + d.dateFormat = oldFormat } - collectFieldDecoders(t, []int{}) return func(node gjson.Result, value reflect.Value, state *decoderState) (err error) { if field := value.FieldByName("JSON"); field.IsValid() { @@ -389,6 +399,11 @@ func (d *decoderBuilder) newStructTypeDecoder(t reflect.Type) decoderFunc { } } + for _, decoder := range anonymousDecoders { + // ignore errors + decoder.fn(node, value.FieldByIndex(decoder.idx), state) + } + if inlineDecoder != nil { var meta Field dest := value.FieldByIndex(inlineDecoder.idx) diff --git a/internal/apijson/json_test.go b/internal/apijson/json_test.go index 43cea30..72bc4c2 100644 --- a/internal/apijson/json_test.go +++ b/internal/apijson/json_test.go @@ -48,10 +48,32 @@ type TypedAdditionalProperties struct { ExtraFields map[string]int `json:"-,extras"` } +type EmbeddedStruct struct { + A bool `json:"a"` + B string `json:"b"` + + JSON EmbeddedStructJSON +} + +type EmbeddedStructJSON struct { + A Field + B Field + ExtraFields map[string]Field + raw string +} + type EmbeddedStructs struct { - AdditionalProperties - A *int `json:"number2"` + EmbeddedStruct + A *int `json:"a"` ExtraFields map[string]interface{} `json:"-,extras"` + + JSON EmbeddedStructsJSON +} + +type EmbeddedStructsJSON struct { + A Field + ExtraFields map[string]Field + raw string } type Recursive struct { @@ -332,9 +354,34 @@ var tests = map[string]struct { }, }, + "embedded_struct": { + `{"a":1,"b":"bar"}`, + EmbeddedStructs{ + EmbeddedStruct: EmbeddedStruct{ + A: true, + B: "bar", + JSON: EmbeddedStructJSON{ + A: Field{raw: `1`, status: valid}, + B: Field{raw: `"bar"`, status: valid}, + raw: `{"a":1,"b":"bar"}`, + }, + }, + A: P(1), + ExtraFields: map[string]interface{}{"b": "bar"}, + JSON: EmbeddedStructsJSON{ + A: Field{raw: `1`, status: valid}, + ExtraFields: map[string]Field{ + "b": {raw: `"bar"`, status: valid}, + }, + raw: `{"a":1,"b":"bar"}`, + }, + }, + }, + "recursive_struct": { `{"child":{"name":"Alex"},"name":"Robert"}`, - Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, }, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + }, "metadata_coerce": { `{"a":"12","b":"12","c":null,"extra_typed":12,"extra_untyped":{"foo":"bar"}}`, diff --git a/internal/version.go b/internal/version.go index 4ff68e4..5ed22d7 100644 --- a/internal/version.go +++ b/internal/version.go @@ -2,4 +2,4 @@ package internal -const PackageVersion = "0.0.1-alpha.0" // x-release-please-version +const PackageVersion = "0.1.0-alpha.1" // x-release-please-version diff --git a/model_test.go b/model_test.go index 205b5d0..194b16f 100644 --- a/model_test.go +++ b/model_test.go @@ -25,7 +25,7 @@ func TestModelGet(t *testing.T) { option.WithBaseURL(baseURL), option.WithAPIKey("My API Key"), ) - _, err := client.Models.Get(context.TODO(), "gpt-3.5-turbo") + _, err := client.Models.Get(context.TODO(), "gpt-4o-mini") if err != nil { var apierr *openai.Error if errors.As(err, &apierr) { @@ -69,7 +69,7 @@ func TestModelDelete(t *testing.T) { option.WithBaseURL(baseURL), option.WithAPIKey("My API Key"), ) - _, err := client.Models.Delete(context.TODO(), "ft:gpt-3.5-turbo:acemeco:suffix:abc123") + _, err := client.Models.Delete(context.TODO(), "ft:gpt-4o-mini:acemeco:suffix:abc123") if err != nil { var apierr *openai.Error if errors.As(err, &apierr) { diff --git a/moderation.go b/moderation.go index 3870493..7cf711d 100644 --- a/moderation.go +++ b/moderation.go @@ -185,6 +185,13 @@ func (r moderationCategoryScoresJSON) RawJSON() string { return r.raw } +type ModerationModel = string + +const ( + ModerationModelTextModerationLatest ModerationModel = "text-moderation-latest" + ModerationModelTextModerationStable ModerationModel = "text-moderation-stable" +) + // Represents if a given text input is potentially harmful. type ModerationNewResponse struct { // The unique identifier for the moderation request. @@ -225,7 +232,7 @@ type ModerationNewParams struct { // `text-moderation-stable`, we will provide advanced notice before updating the // model. Accuracy of `text-moderation-stable` may be slightly lower than for // `text-moderation-latest`. - Model param.Field[ModerationNewParamsModel] `json:"model"` + Model param.Field[ModerationModel] `json:"model"` } func (r ModerationNewParams) MarshalJSON() (data []byte, err error) { @@ -242,18 +249,3 @@ type ModerationNewParamsInputUnion interface { type ModerationNewParamsInputArray []string func (r ModerationNewParamsInputArray) ImplementsModerationNewParamsInputUnion() {} - -type ModerationNewParamsModel string - -const ( - ModerationNewParamsModelTextModerationLatest ModerationNewParamsModel = "text-moderation-latest" - ModerationNewParamsModelTextModerationStable ModerationNewParamsModel = "text-moderation-stable" -) - -func (r ModerationNewParamsModel) IsKnown() bool { - switch r { - case ModerationNewParamsModelTextModerationLatest, ModerationNewParamsModelTextModerationStable: - return true - } - return false -} diff --git a/moderation_test.go b/moderation_test.go index 80000e2..9d0b8f2 100644 --- a/moderation_test.go +++ b/moderation_test.go @@ -28,7 +28,7 @@ func TestModerationNewWithOptionalParams(t *testing.T) { ) _, err := client.Moderations.New(context.TODO(), openai.ModerationNewParams{ Input: openai.F[openai.ModerationNewParamsInputUnion](shared.UnionString("I want to kill them.")), - Model: openai.F(openai.ModerationNewParamsModelTextModerationLatest), + Model: openai.F(openai.ModerationModelTextModerationLatest), }) if err != nil { var apierr *openai.Error diff --git a/shared/shared.go b/shared/shared.go index 4c0f3ca..18331e2 100644 --- a/shared/shared.go +++ b/shared/shared.go @@ -47,8 +47,14 @@ type FunctionDefinition struct { // documentation about the format. // // Omitting `parameters` defines a function with an empty parameter list. - Parameters FunctionParameters `json:"parameters"` - JSON functionDefinitionJSON `json:"-"` + Parameters FunctionParameters `json:"parameters"` + // Whether to enable strict schema adherence when generating the function call. If + // set to true, the model will follow the exact schema defined in the `parameters` + // field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn + // more about Structured Outputs in the + // [function calling guide](docs/guides/function-calling). + Strict bool `json:"strict,nullable"` + JSON functionDefinitionJSON `json:"-"` } // functionDefinitionJSON contains the JSON metadata for the struct @@ -57,6 +63,7 @@ type functionDefinitionJSON struct { Name apijson.Field Description apijson.Field Parameters apijson.Field + Strict apijson.Field raw string ExtraFields map[string]apijson.Field } @@ -84,6 +91,12 @@ type FunctionDefinitionParam struct { // // Omitting `parameters` defines a function with an empty parameter list. Parameters param.Field[FunctionParameters] `json:"parameters"` + // Whether to enable strict schema adherence when generating the function call. If + // set to true, the model will follow the exact schema defined in the `parameters` + // field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn + // more about Structured Outputs in the + // [function calling guide](docs/guides/function-calling). + Strict param.Field[bool] `json:"strict"` } func (r FunctionDefinitionParam) MarshalJSON() (data []byte, err error) { @@ -91,3 +104,103 @@ func (r FunctionDefinitionParam) MarshalJSON() (data []byte, err error) { } type FunctionParameters map[string]interface{} + +type ResponseFormatJSONObjectParam struct { + // The type of response format being defined: `json_object` + Type param.Field[ResponseFormatJSONObjectType] `json:"type,required"` +} + +func (r ResponseFormatJSONObjectParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ResponseFormatJSONObjectParam) ImplementsChatCompletionNewParamsResponseFormatUnion() {} + +// The type of response format being defined: `json_object` +type ResponseFormatJSONObjectType string + +const ( + ResponseFormatJSONObjectTypeJSONObject ResponseFormatJSONObjectType = "json_object" +) + +func (r ResponseFormatJSONObjectType) IsKnown() bool { + switch r { + case ResponseFormatJSONObjectTypeJSONObject: + return true + } + return false +} + +type ResponseFormatJSONSchemaParam struct { + JSONSchema param.Field[ResponseFormatJSONSchemaJSONSchemaParam] `json:"json_schema,required"` + // The type of response format being defined: `json_schema` + Type param.Field[ResponseFormatJSONSchemaType] `json:"type,required"` +} + +func (r ResponseFormatJSONSchemaParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ResponseFormatJSONSchemaParam) ImplementsChatCompletionNewParamsResponseFormatUnion() {} + +type ResponseFormatJSONSchemaJSONSchemaParam struct { + // The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + // and dashes, with a maximum length of 64. + Name param.Field[string] `json:"name,required"` + // A description of what the response format is for, used by the model to determine + // how to respond in the format. + Description param.Field[string] `json:"description"` + // The schema for the response format, described as a JSON Schema object. + Schema param.Field[map[string]interface{}] `json:"schema"` + // Whether to enable strict schema adherence when generating the output. If set to + // true, the model will always follow the exact schema defined in the `schema` + // field. Only a subset of JSON Schema is supported when `strict` is `true`. To + // learn more, read the + // [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Strict param.Field[bool] `json:"strict"` +} + +func (r ResponseFormatJSONSchemaJSONSchemaParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +// The type of response format being defined: `json_schema` +type ResponseFormatJSONSchemaType string + +const ( + ResponseFormatJSONSchemaTypeJSONSchema ResponseFormatJSONSchemaType = "json_schema" +) + +func (r ResponseFormatJSONSchemaType) IsKnown() bool { + switch r { + case ResponseFormatJSONSchemaTypeJSONSchema: + return true + } + return false +} + +type ResponseFormatTextParam struct { + // The type of response format being defined: `text` + Type param.Field[ResponseFormatTextType] `json:"type,required"` +} + +func (r ResponseFormatTextParam) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +func (r ResponseFormatTextParam) ImplementsChatCompletionNewParamsResponseFormatUnion() {} + +// The type of response format being defined: `text` +type ResponseFormatTextType string + +const ( + ResponseFormatTextTypeText ResponseFormatTextType = "text" +) + +func (r ResponseFormatTextType) IsKnown() bool { + switch r { + case ResponseFormatTextTypeText: + return true + } + return false +} diff --git a/shared/union.go b/shared/union.go index cea8002..7bab9fa 100644 --- a/shared/union.go +++ b/shared/union.go @@ -6,7 +6,10 @@ type UnionString string func (UnionString) ImplementsCompletionNewParamsPromptUnion() {} func (UnionString) ImplementsCompletionNewParamsStopUnion() {} +func (UnionString) ImplementsChatCompletionAssistantMessageParamContentUnion() {} +func (UnionString) ImplementsChatCompletionSystemMessageParamContentUnion() {} func (UnionString) ImplementsChatCompletionUserMessageParamContentUnion() {} +func (UnionString) ImplementsChatCompletionToolMessageParamContentUnion() {} func (UnionString) ImplementsChatCompletionNewParamsStopUnion() {} func (UnionString) ImplementsEmbeddingNewParamsInputUnion() {} func (UnionString) ImplementsModerationNewParamsInputUnion() {}