diff --git a/.changeset/pretty-jeans-sit.md b/.changeset/pretty-jeans-sit.md new file mode 100644 index 000000000000..98924cc849d3 --- /dev/null +++ b/.changeset/pretty-jeans-sit.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +feat (@ai-sdk/openai): add parallelToolCalls setting diff --git a/content/providers/01-ai-sdk-providers/01-openai.mdx b/content/providers/01-ai-sdk-providers/01-openai.mdx index 520322e2d8af..9ce616c36484 100644 --- a/content/providers/01-ai-sdk-providers/01-openai.mdx +++ b/content/providers/01-ai-sdk-providers/01-openai.mdx @@ -171,6 +171,10 @@ The following optional settings are available for OpenAI chat models: Setting to a number will return the log probabilities of the top n tokens that were generated. +- **parallelToolCalls** _boolean_ + + Whether to enable parallel function calling during tool use. Default to true. + - **user** _string_ A unique identifier representing your end-user, which can help OpenAI to diff --git a/content/providers/01-ai-sdk-providers/02-azure.mdx b/content/providers/01-ai-sdk-providers/02-azure.mdx index f76080333e59..3ed924c167d2 100644 --- a/content/providers/01-ai-sdk-providers/02-azure.mdx +++ b/content/providers/01-ai-sdk-providers/02-azure.mdx @@ -133,6 +133,10 @@ The following optional settings are available for OpenAI chat models: Setting to a number will return the log probabilities of the top n tokens that were generated. +- **parallelToolCalls** _boolean_ + + Whether to enable parallel function calling during tool use. Default to true. + - **user** _string_ A unique identifier representing your end-user, which can help OpenAI to diff --git a/packages/openai/src/openai-chat-language-model.test.ts b/packages/openai/src/openai-chat-language-model.test.ts index b24838e3773a..4b9315fe9afb 100644 --- a/packages/openai/src/openai-chat-language-model.test.ts +++ b/packages/openai/src/openai-chat-language-model.test.ts @@ -146,7 +146,7 @@ describe('doGenerate', () => { | null; } | null; finish_reason?: string; - }) { + } = {}) { server.responseBodyJson = { id: 'chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd', object: 'chat.completion', @@ -283,6 +283,33 @@ describe('doGenerate', () => { }); }); + it('should pass settings', async () => { + prepareJsonResponse(); + + await provider + .chat('gpt-3.5-turbo', { + logitBias: { 50256: -100 }, + logprobs: 2, + parallelToolCalls: false, + user: 'test-user-id', + }) + .doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + }); + + expect(await server.getRequestBodyJson()).toStrictEqual({ + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Hello' }], + logprobs: true, + top_logprobs: 2, + logit_bias: { 50256: -100 }, + parallel_tool_calls: false, + user: 'test-user-id', + }); + }); + it('should pass tools and toolChoice', async () => { prepareJsonResponse({ content: '' }); diff --git a/packages/openai/src/openai-chat-language-model.ts b/packages/openai/src/openai-chat-language-model.ts index 426a6b2d6dd2..725bc016035b 100644 --- a/packages/openai/src/openai-chat-language-model.ts +++ b/packages/openai/src/openai-chat-language-model.ts @@ -84,6 +84,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { : undefined : undefined, user: this.settings.user, + parallel_tool_calls: this.settings.parallelToolCalls, // standardized settings: max_tokens: maxTokens, diff --git a/packages/openai/src/openai-chat-settings.ts b/packages/openai/src/openai-chat-settings.ts index b28348375d7f..3e9d557f8dd8 100644 --- a/packages/openai/src/openai-chat-settings.ts +++ b/packages/openai/src/openai-chat-settings.ts @@ -50,6 +50,11 @@ tokens that were generated. */ logprobs?: boolean | number; + /** +Whether to enable parallel function calling during tool use. Default to true. + */ + parallelToolCalls?: boolean; + /** A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.