diff --git a/bun.lock b/bun.lock index 12e5eb6e03a..c225d055d1b 100644 --- a/bun.lock +++ b/bun.lock @@ -224,6 +224,8 @@ "@ai-sdk/mcp": "0.0.8", "@ai-sdk/openai": "2.0.71", "@ai-sdk/openai-compatible": "1.0.27", + "@ai-sdk/provider": "2.0.0", + "@ai-sdk/provider-utils": "3.0.18", "@clack/prompts": "1.0.0-alpha.1", "@hono/standard-validator": "0.1.5", "@hono/zod-validator": "catalog:", @@ -503,7 +505,7 @@ "@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="], - "@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="], + "@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.18", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ypv1xXMsgGcNKUP+hglKqtdDuMg68nWHucPPAhIENrbFAI+xCHiqPVN8Zllxyv1TNZwGWUghPxJXU+Mqps0YRQ=="], "@alloc/quick-lru": ["@alloc/quick-lru@5.2.0", "", {}, "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw=="], @@ -3737,20 +3739,22 @@ "@ai-sdk/amazon-bedrock/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="], + "@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="], + "@ai-sdk/azure/@ai-sdk/openai": ["@ai-sdk/openai@2.0.71", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.17" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-tg+gj+R0z/On9P4V7hy7/7o04cQPjKGayMCL3gzWD/aNGjAKkhEnaocuNDidSnghizt8g2zJn16cAuAolnW+qQ=="], "@ai-sdk/azure/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="], "@ai-sdk/gateway/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="], - "@ai-sdk/google/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.18", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ypv1xXMsgGcNKUP+hglKqtdDuMg68nWHucPPAhIENrbFAI+xCHiqPVN8Zllxyv1TNZwGWUghPxJXU+Mqps0YRQ=="], - "@ai-sdk/google-vertex/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.50", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-21PaHfoLmouOXXNINTsZJsMw+wE5oLR2He/1kq/sKokTVKyq7ObGT1LDk6ahwxaz/GoaNaGankMh+EgVcdv2Cw=="], - "@ai-sdk/google-vertex/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.18", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ypv1xXMsgGcNKUP+hglKqtdDuMg68nWHucPPAhIENrbFAI+xCHiqPVN8Zllxyv1TNZwGWUghPxJXU+Mqps0YRQ=="], - "@ai-sdk/mcp/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="], + "@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="], + + "@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="], + "@astrojs/cloudflare/vite": ["vite@6.4.1", "", { "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.4", "picomatch": "^4.0.2", "postcss": "^8.5.3", "rollup": "^4.34.9", "tinyglobby": "^0.2.13" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "peerDependencies": { "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", "jiti": ">=1.21.0", "less": "*", "lightningcss": "^1.21.0", "sass": "*", "sass-embedded": "*", "stylus": "*", "sugarss": "*", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" }, "optionalPeers": ["@types/node", "jiti", "less", "lightningcss", "sass", "sass-embedded", "stylus", "sugarss", "terser", "tsx", "yaml"], "bin": { "vite": "bin/vite.js" } }, "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g=="], "@astrojs/markdown-remark/@astrojs/internal-helpers": ["@astrojs/internal-helpers@0.6.1", "", {}, "sha512-l5Pqf6uZu31aG+3Lv8nl/3s4DbUzdlxTWDof4pEpto6GUJNhhCbelVi9dEyurOVyqaelwmS9oSyOWOENSfgo9A=="], @@ -4619,8 +4623,6 @@ "jsonwebtoken/jws/jwa": ["jwa@1.4.2", "", { "dependencies": { "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw=="], - "opencode/@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.18", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ypv1xXMsgGcNKUP+hglKqtdDuMg68nWHucPPAhIENrbFAI+xCHiqPVN8Zllxyv1TNZwGWUghPxJXU+Mqps0YRQ=="], - "opencode/@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="], "opencode/@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="], diff --git a/flake.lock b/flake.lock index f35c345f0ba..211be53aa99 100644 --- a/flake.lock +++ b/flake.lock @@ -2,11 +2,11 @@ "nodes": { "nixpkgs": { "locked": { - "lastModified": 1764384123, - "narHash": "sha256-UoliURDJFaOolycBZYrjzd9Cc66zULEyHqGFH3QHEq0=", + "lastModified": 1764445028, + "narHash": "sha256-ik6H/0Zl+qHYDKTXFPpzuVHSZE+uvVz2XQuQd1IVXzo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "59b6c96beacc898566c9be1052ae806f3835f87d", + "rev": "a09378c0108815dbf3961a0e085936f4146ec415", "type": "github" }, "original": { diff --git a/nix/hashes.json b/nix/hashes.json index 8f73ecceaba..3ca8d4f9144 100644 --- a/nix/hashes.json +++ b/nix/hashes.json @@ -1,3 +1,3 @@ { - "nodeModules": "sha256-+PJZG5jNxBGkxblpnNa4lvfBi9YEvHaGQRE0+avNwHY=" + "nodeModules": "sha256-jLrT8GVq0Fh34tN1MPgJpPKd9SGhOauaBl8f1oZ/XgI=" } diff --git a/packages/opencode/package.json b/packages/opencode/package.json index 8a9b0ce712f..494ec828ce4 100644 --- a/packages/opencode/package.json +++ b/packages/opencode/package.json @@ -50,6 +50,8 @@ "@ai-sdk/mcp": "0.0.8", "@ai-sdk/openai": "2.0.71", "@ai-sdk/openai-compatible": "1.0.27", + "@ai-sdk/provider": "2.0.0", + "@ai-sdk/provider-utils": "3.0.18", "@clack/prompts": "1.0.0-alpha.1", "@hono/standard-validator": "0.1.5", "@hono/zod-validator": "catalog:", diff --git a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx index b61176553fb..b4dc26168a0 100644 --- a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx @@ -843,6 +843,7 @@ export function Prompt(props: PromptProps) { justifyContent={status().type === "retry" ? "space-between" : "flex-start"} > + {/* @ts-ignore // SpinnerOptions doesn't support marginLeft */} {(() => { diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index ded0b9b1913..9da4ca161fb 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -23,6 +23,7 @@ import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic" import { createOpenAI } from "@ai-sdk/openai" import { createOpenAICompatible } from "@ai-sdk/openai-compatible" import { createOpenRouter } from "@openrouter/ai-sdk-provider" +import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/openai-compatible/src" export namespace Provider { const log = Log.create({ service: "provider" }) @@ -37,6 +38,8 @@ export namespace Provider { "@ai-sdk/openai": createOpenAI, "@ai-sdk/openai-compatible": createOpenAICompatible, "@openrouter/ai-sdk-provider": createOpenRouter, + // @ts-ignore (TODO: kill this code so we dont have to maintain it) + "@ai-sdk/github-copilot": createGitHubCopilotOpenAICompatible, } type CustomLoader = (provider: ModelsDev.Provider) => Promise<{ @@ -87,6 +90,30 @@ export namespace Provider { options: {}, } }, + "github-copilot": async () => { + return { + autoload: false, + async getModel(sdk: any, modelID: string, _options?: Record) { + if (modelID.includes("gpt-5")) { + return sdk.responses(modelID) + } + return sdk.chat(modelID) + }, + options: {}, + } + }, + "github-copilot-enterprise": async () => { + return { + autoload: false, + async getModel(sdk: any, modelID: string, _options?: Record) { + if (modelID.includes("gpt-5")) { + return sdk.responses(modelID) + } + return sdk.chat(modelID) + }, + options: {}, + } + }, azure: async () => { return { autoload: false, @@ -428,15 +455,6 @@ export namespace Provider { } } - // load custom - for (const [providerID, fn] of Object.entries(CUSTOM_LOADERS)) { - if (disabled.has(providerID)) continue - const result = await fn(database[providerID]) - if (result && (result.autoload || providers[providerID])) { - mergeProvider(providerID, result.options ?? {}, "custom", result.getModel) - } - } - for (const plugin of await Plugin.list()) { if (!plugin.auth) continue const providerID = plugin.auth.provider @@ -478,6 +496,14 @@ export namespace Provider { } } + for (const [providerID, fn] of Object.entries(CUSTOM_LOADERS)) { + if (disabled.has(providerID)) continue + const result = await fn(database[providerID]) + if (result && (result.autoload || providers[providerID])) { + mergeProvider(providerID, result.options ?? {}, "custom", result.getModel) + } + } + // load config for (const [providerID, provider] of configProviders) { mergeProvider(providerID, provider.options ?? {}, "config") @@ -489,6 +515,10 @@ export namespace Provider { continue } + if (providerID === "github-copilot") { + provider.info.npm = "@ai-sdk/github-copilot" + } + const configProvider = config.provider?.[providerID] const filteredModels = Object.fromEntries( Object.entries(provider.info.models) diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/README.md b/packages/opencode/src/provider/sdk/openai-compatible/src/README.md new file mode 100644 index 00000000000..593e644538f --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/README.md @@ -0,0 +1,5 @@ +This is a temporary package used primarily for github copilot compatibility. + +Avoid making changes to these files unless you want to only affect Copilot provider. + +Also this should ONLY be used for Copilot provider. diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/index.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/index.ts new file mode 100644 index 00000000000..a3435c53e1c --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/index.ts @@ -0,0 +1,2 @@ +export { createOpenaiCompatible, openaiCompatible } from "./openai-compatible-provider" +export type { OpenaiCompatibleProvider, OpenaiCompatibleProviderSettings } from "./openai-compatible-provider" diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/openai-compatible-provider.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/openai-compatible-provider.ts new file mode 100644 index 00000000000..e71658c2fa0 --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/openai-compatible-provider.ts @@ -0,0 +1,100 @@ +import type { LanguageModelV2 } from "@ai-sdk/provider" +import { OpenAICompatibleChatLanguageModel } from "@ai-sdk/openai-compatible" +import { type FetchFunction, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils" +import { OpenAIResponsesLanguageModel } from "./responses/openai-responses-language-model" + +// Import the version or define it +const VERSION = "0.1.0" + +export type OpenaiCompatibleModelId = string + +export interface OpenaiCompatibleProviderSettings { + /** + * API key for authenticating requests. + */ + apiKey?: string + + /** + * Base URL for the OpenAI Compatible API calls. + */ + baseURL?: string + + /** + * Name of the provider. + */ + name?: string + + /** + * Custom headers to include in the requests. + */ + headers?: Record + + /** + * Custom fetch implementation. + */ + fetch?: FetchFunction +} + +export interface OpenaiCompatibleProvider { + (modelId: OpenaiCompatibleModelId): LanguageModelV2 + chat(modelId: OpenaiCompatibleModelId): LanguageModelV2 + responses(modelId: OpenaiCompatibleModelId): LanguageModelV2 + languageModel(modelId: OpenaiCompatibleModelId): LanguageModelV2 + + // embeddingModel(modelId: any): EmbeddingModelV2 + + // imageModel(modelId: any): ImageModelV2 +} + +/** + * Create an OpenAI Compatible provider instance. + */ +export function createOpenaiCompatible(options: OpenaiCompatibleProviderSettings = {}): OpenaiCompatibleProvider { + const baseURL = withoutTrailingSlash(options.baseURL ?? "https://api.openai.com/v1") + + if (!baseURL) { + throw new Error("baseURL is required") + } + + // Merge headers: defaults first, then user overrides + const headers = { + // Default OpenAI Compatible headers (can be overridden by user) + ...(options.apiKey && { Authorization: `Bearer ${options.apiKey}` }), + ...options.headers, + } + + const getHeaders = () => withUserAgentSuffix(headers, `ai-sdk/openai-compatible/${VERSION}`) + + const createChatModel = (modelId: OpenaiCompatibleModelId) => { + return new OpenAICompatibleChatLanguageModel(modelId, { + provider: `${options.name ?? "openai-compatible"}.chat`, + headers: getHeaders, + url: ({ path }) => `${baseURL}${path}`, + fetch: options.fetch, + }) + } + + const createResponsesModel = (modelId: OpenaiCompatibleModelId) => { + return new OpenAIResponsesLanguageModel(modelId, { + provider: `${options.name ?? "openai-compatible"}.responses`, + headers: getHeaders, + url: ({ path }) => `${baseURL}${path}`, + fetch: options.fetch, + }) + } + + const createLanguageModel = (modelId: OpenaiCompatibleModelId) => createChatModel(modelId) + + const provider = function (modelId: OpenaiCompatibleModelId) { + return createChatModel(modelId) + } + + provider.languageModel = createLanguageModel + provider.chat = createChatModel + provider.responses = createResponsesModel + + return provider as OpenaiCompatibleProvider +} + +// Default OpenAI Compatible provider instance +export const openaiCompatible = createOpenaiCompatible() diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/convert-to-openai-responses-input.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/convert-to-openai-responses-input.ts new file mode 100644 index 00000000000..b53da112164 --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/convert-to-openai-responses-input.ts @@ -0,0 +1,303 @@ +import { + type LanguageModelV2CallWarning, + type LanguageModelV2Prompt, + type LanguageModelV2ToolCallPart, + UnsupportedFunctionalityError, +} from "@ai-sdk/provider" +import { convertToBase64, parseProviderOptions } from "@ai-sdk/provider-utils" +import { z } from "zod/v4" +import type { OpenAIResponsesInput, OpenAIResponsesReasoning } from "./openai-responses-api-types" +import { localShellInputSchema, localShellOutputSchema } from "./tool/local-shell" + +/** + * Check if a string is a file ID based on the given prefixes + * Returns false if prefixes is undefined (disables file ID detection) + */ +function isFileId(data: string, prefixes?: readonly string[]): boolean { + if (!prefixes) return false + return prefixes.some((prefix) => data.startsWith(prefix)) +} + +export async function convertToOpenAIResponsesInput({ + prompt, + systemMessageMode, + fileIdPrefixes, + store, + hasLocalShellTool = false, +}: { + prompt: LanguageModelV2Prompt + systemMessageMode: "system" | "developer" | "remove" + fileIdPrefixes?: readonly string[] + store: boolean + hasLocalShellTool?: boolean +}): Promise<{ + input: OpenAIResponsesInput + warnings: Array +}> { + const input: OpenAIResponsesInput = [] + const warnings: Array = [] + + for (const { role, content } of prompt) { + switch (role) { + case "system": { + switch (systemMessageMode) { + case "system": { + input.push({ role: "system", content }) + break + } + case "developer": { + input.push({ role: "developer", content }) + break + } + case "remove": { + warnings.push({ + type: "other", + message: "system messages are removed for this model", + }) + break + } + default: { + const _exhaustiveCheck: never = systemMessageMode + throw new Error(`Unsupported system message mode: ${_exhaustiveCheck}`) + } + } + break + } + + case "user": { + input.push({ + role: "user", + content: content.map((part, index) => { + switch (part.type) { + case "text": { + return { type: "input_text", text: part.text } + } + case "file": { + if (part.mediaType.startsWith("image/")) { + const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType + + return { + type: "input_image", + ...(part.data instanceof URL + ? { image_url: part.data.toString() } + : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) + ? { file_id: part.data } + : { + image_url: `data:${mediaType};base64,${convertToBase64(part.data)}`, + }), + detail: part.providerOptions?.openai?.imageDetail, + } + } else if (part.mediaType === "application/pdf") { + if (part.data instanceof URL) { + return { + type: "input_file", + file_url: part.data.toString(), + } + } + return { + type: "input_file", + ...(typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) + ? { file_id: part.data } + : { + filename: part.filename ?? `part-${index}.pdf`, + file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`, + }), + } + } else { + throw new UnsupportedFunctionalityError({ + functionality: `file part media type ${part.mediaType}`, + }) + } + } + } + }), + }) + + break + } + + case "assistant": { + const reasoningMessages: Record = {} + const toolCallParts: Record = {} + + for (const part of content) { + switch (part.type) { + case "text": { + input.push({ + role: "assistant", + content: [{ type: "output_text", text: part.text }], + id: (part.providerOptions?.openai?.itemId as string) ?? undefined, + }) + break + } + case "tool-call": { + toolCallParts[part.toolCallId] = part + + if (part.providerExecuted) { + break + } + + if (hasLocalShellTool && part.toolName === "local_shell") { + const parsedInput = localShellInputSchema.parse(part.input) + input.push({ + type: "local_shell_call", + call_id: part.toolCallId, + id: (part.providerOptions?.openai?.itemId as string) ?? undefined, + action: { + type: "exec", + command: parsedInput.action.command, + timeout_ms: parsedInput.action.timeoutMs, + user: parsedInput.action.user, + working_directory: parsedInput.action.workingDirectory, + env: parsedInput.action.env, + }, + }) + + break + } + + input.push({ + type: "function_call", + call_id: part.toolCallId, + name: part.toolName, + arguments: JSON.stringify(part.input), + id: (part.providerOptions?.openai?.itemId as string) ?? undefined, + }) + break + } + + // assistant tool result parts are from provider-executed tools: + case "tool-result": { + if (store) { + // use item references to refer to tool results from built-in tools + input.push({ type: "item_reference", id: part.toolCallId }) + } else { + warnings.push({ + type: "other", + message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`, + }) + } + + break + } + + case "reasoning": { + const providerOptions = await parseProviderOptions({ + provider: "openai", + providerOptions: part.providerOptions, + schema: openaiResponsesReasoningProviderOptionsSchema, + }) + + const reasoningId = providerOptions?.itemId + + if (reasoningId != null) { + const reasoningMessage = reasoningMessages[reasoningId] + + if (store) { + if (reasoningMessage === undefined) { + // use item references to refer to reasoning (single reference) + input.push({ type: "item_reference", id: reasoningId }) + + // store unused reasoning message to mark id as used + reasoningMessages[reasoningId] = { + type: "reasoning", + id: reasoningId, + summary: [], + } + } + } else { + const summaryParts: Array<{ + type: "summary_text" + text: string + }> = [] + + if (part.text.length > 0) { + summaryParts.push({ + type: "summary_text", + text: part.text, + }) + } else if (reasoningMessage !== undefined) { + warnings.push({ + type: "other", + message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`, + }) + } + + if (reasoningMessage === undefined) { + reasoningMessages[reasoningId] = { + type: "reasoning", + id: reasoningId, + encrypted_content: providerOptions?.reasoningEncryptedContent, + summary: summaryParts, + } + input.push(reasoningMessages[reasoningId]) + } else { + reasoningMessage.summary.push(...summaryParts) + } + } + } else { + warnings.push({ + type: "other", + message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`, + }) + } + break + } + } + } + + break + } + + case "tool": { + for (const part of content) { + const output = part.output + + if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") { + input.push({ + type: "local_shell_call_output", + call_id: part.toolCallId, + output: localShellOutputSchema.parse(output.value).output, + }) + break + } + + let contentValue: string + switch (output.type) { + case "text": + case "error-text": + contentValue = output.value + break + case "content": + case "json": + case "error-json": + contentValue = JSON.stringify(output.value) + break + } + + input.push({ + type: "function_call_output", + call_id: part.toolCallId, + output: contentValue, + }) + } + + break + } + + default: { + const _exhaustiveCheck: never = role + throw new Error(`Unsupported role: ${_exhaustiveCheck}`) + } + } + } + + return { input, warnings } +} + +const openaiResponsesReasoningProviderOptionsSchema = z.object({ + itemId: z.string().nullish(), + reasoningEncryptedContent: z.string().nullish(), +}) + +export type OpenAIResponsesReasoningProviderOptions = z.infer diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/map-openai-responses-finish-reason.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/map-openai-responses-finish-reason.ts new file mode 100644 index 00000000000..54bb9056d79 --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/map-openai-responses-finish-reason.ts @@ -0,0 +1,22 @@ +import type { LanguageModelV2FinishReason } from "@ai-sdk/provider" + +export function mapOpenAIResponseFinishReason({ + finishReason, + hasFunctionCall, +}: { + finishReason: string | null | undefined + // flag that checks if there have been client-side tool calls (not executed by openai) + hasFunctionCall: boolean +}): LanguageModelV2FinishReason { + switch (finishReason) { + case undefined: + case null: + return hasFunctionCall ? "tool-calls" : "stop" + case "max_output_tokens": + return "length" + case "content_filter": + return "content-filter" + default: + return hasFunctionCall ? "tool-calls" : "unknown" + } +} diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-config.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-config.ts new file mode 100644 index 00000000000..2241dbb5249 --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-config.ts @@ -0,0 +1,18 @@ +import type { FetchFunction } from "@ai-sdk/provider-utils" + +export type OpenAIConfig = { + provider: string + url: (options: { modelId: string; path: string }) => string + headers: () => Record + fetch?: FetchFunction + generateId?: () => string + /** + * File ID prefixes used to identify file IDs in Responses API. + * When undefined, all file data is treated as base64 content. + * + * Examples: + * - OpenAI: ['file-'] for IDs like 'file-abc123' + * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123' + */ + fileIdPrefixes?: readonly string[] +} diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-error.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-error.ts new file mode 100644 index 00000000000..e78824d3641 --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-error.ts @@ -0,0 +1,22 @@ +import { z } from "zod/v4" +import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils" + +export const openaiErrorDataSchema = z.object({ + error: z.object({ + message: z.string(), + + // The additional information below is handled loosely to support + // OpenAI-compatible providers that have slightly different error + // responses: + type: z.string().nullish(), + param: z.any().nullish(), + code: z.union([z.string(), z.number()]).nullish(), + }), +}) + +export type OpenAIErrorData = z.infer + +export const openaiFailedResponseHandler: any = createJsonErrorResponseHandler({ + errorSchema: openaiErrorDataSchema, + errorToMessage: (data) => data.error.message, +}) diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-api-types.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-api-types.ts new file mode 100644 index 00000000000..cf1a3ba2fbf --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-api-types.ts @@ -0,0 +1,207 @@ +import type { JSONSchema7 } from "@ai-sdk/provider" + +export type OpenAIResponsesInput = Array + +export type OpenAIResponsesInputItem = + | OpenAIResponsesSystemMessage + | OpenAIResponsesUserMessage + | OpenAIResponsesAssistantMessage + | OpenAIResponsesFunctionCall + | OpenAIResponsesFunctionCallOutput + | OpenAIResponsesComputerCall + | OpenAIResponsesLocalShellCall + | OpenAIResponsesLocalShellCallOutput + | OpenAIResponsesReasoning + | OpenAIResponsesItemReference + +export type OpenAIResponsesIncludeValue = + | "web_search_call.action.sources" + | "code_interpreter_call.outputs" + | "computer_call_output.output.image_url" + | "file_search_call.results" + | "message.input_image.image_url" + | "message.output_text.logprobs" + | "reasoning.encrypted_content" + +export type OpenAIResponsesIncludeOptions = Array | undefined | null + +export type OpenAIResponsesSystemMessage = { + role: "system" | "developer" + content: string +} + +export type OpenAIResponsesUserMessage = { + role: "user" + content: Array< + | { type: "input_text"; text: string } + | { type: "input_image"; image_url: string } + | { type: "input_image"; file_id: string } + | { type: "input_file"; file_url: string } + | { type: "input_file"; filename: string; file_data: string } + | { type: "input_file"; file_id: string } + > +} + +export type OpenAIResponsesAssistantMessage = { + role: "assistant" + content: Array<{ type: "output_text"; text: string }> + id?: string +} + +export type OpenAIResponsesFunctionCall = { + type: "function_call" + call_id: string + name: string + arguments: string + id?: string +} + +export type OpenAIResponsesFunctionCallOutput = { + type: "function_call_output" + call_id: string + output: string +} + +export type OpenAIResponsesComputerCall = { + type: "computer_call" + id: string + status?: string +} + +export type OpenAIResponsesLocalShellCall = { + type: "local_shell_call" + id: string + call_id: string + action: { + type: "exec" + command: string[] + timeout_ms?: number + user?: string + working_directory?: string + env?: Record + } +} + +export type OpenAIResponsesLocalShellCallOutput = { + type: "local_shell_call_output" + call_id: string + output: string +} + +export type OpenAIResponsesItemReference = { + type: "item_reference" + id: string +} + +/** + * A filter used to compare a specified attribute key to a given value using a defined comparison operation. + */ +export type OpenAIResponsesFileSearchToolComparisonFilter = { + /** + * The key to compare against the value. + */ + key: string + + /** + * Specifies the comparison operator: eq, ne, gt, gte, lt, lte. + */ + type: "eq" | "ne" | "gt" | "gte" | "lt" | "lte" + + /** + * The value to compare against the attribute key; supports string, number, or boolean types. + */ + value: string | number | boolean +} + +/** + * Combine multiple filters using and or or. + */ +export type OpenAIResponsesFileSearchToolCompoundFilter = { + /** + * Type of operation: and or or. + */ + type: "and" | "or" + + /** + * Array of filters to combine. Items can be ComparisonFilter or CompoundFilter. + */ + filters: Array +} + +export type OpenAIResponsesTool = + | { + type: "function" + name: string + description: string | undefined + parameters: JSONSchema7 + strict: boolean | undefined + } + | { + type: "web_search" + filters: { allowed_domains: string[] | undefined } | undefined + search_context_size: "low" | "medium" | "high" | undefined + user_location: + | { + type: "approximate" + city?: string + country?: string + region?: string + timezone?: string + } + | undefined + } + | { + type: "web_search_preview" + search_context_size: "low" | "medium" | "high" | undefined + user_location: + | { + type: "approximate" + city?: string + country?: string + region?: string + timezone?: string + } + | undefined + } + | { + type: "code_interpreter" + container: string | { type: "auto"; file_ids: string[] | undefined } + } + | { + type: "file_search" + vector_store_ids: string[] + max_num_results: number | undefined + ranking_options: { ranker?: string; score_threshold?: number } | undefined + filters: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter | undefined + } + | { + type: "image_generation" + background: "auto" | "opaque" | "transparent" | undefined + input_fidelity: "low" | "high" | undefined + input_image_mask: + | { + file_id: string | undefined + image_url: string | undefined + } + | undefined + model: string | undefined + moderation: "auto" | undefined + output_compression: number | undefined + output_format: "png" | "jpeg" | "webp" | undefined + partial_images: number | undefined + quality: "auto" | "low" | "medium" | "high" | undefined + size: "auto" | "1024x1024" | "1024x1536" | "1536x1024" | undefined + } + | { + type: "local_shell" + } + +export type OpenAIResponsesReasoning = { + type: "reasoning" + id: string + encrypted_content?: string | null + summary: Array<{ + type: "summary_text" + text: string + }> +} diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-language-model.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-language-model.ts new file mode 100644 index 00000000000..94b0edaf3f4 --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-language-model.ts @@ -0,0 +1,1713 @@ +import { + APICallError, + type LanguageModelV2, + type LanguageModelV2CallWarning, + type LanguageModelV2Content, + type LanguageModelV2FinishReason, + type LanguageModelV2ProviderDefinedTool, + type LanguageModelV2StreamPart, + type LanguageModelV2Usage, + type SharedV2ProviderMetadata, +} from "@ai-sdk/provider" +import { + combineHeaders, + createEventSourceResponseHandler, + createJsonResponseHandler, + generateId, + parseProviderOptions, + type ParseResult, + postJsonToApi, +} from "@ai-sdk/provider-utils" +import { z } from "zod/v4" +import type { OpenAIConfig } from "./openai-config" +import { openaiFailedResponseHandler } from "./openai-error" +import { codeInterpreterInputSchema, codeInterpreterOutputSchema } from "./tool/code-interpreter" +import { fileSearchOutputSchema } from "./tool/file-search" +import { imageGenerationOutputSchema } from "./tool/image-generation" +import { convertToOpenAIResponsesInput } from "./convert-to-openai-responses-input" +import { mapOpenAIResponseFinishReason } from "./map-openai-responses-finish-reason" +import type { OpenAIResponsesIncludeOptions, OpenAIResponsesIncludeValue } from "./openai-responses-api-types" +import { prepareResponsesTools } from "./openai-responses-prepare-tools" +import type { OpenAIResponsesModelId } from "./openai-responses-settings" +import { localShellInputSchema } from "./tool/local-shell" + +const webSearchCallItem = z.object({ + type: z.literal("web_search_call"), + id: z.string(), + status: z.string(), + action: z + .discriminatedUnion("type", [ + z.object({ + type: z.literal("search"), + query: z.string().nullish(), + }), + z.object({ + type: z.literal("open_page"), + url: z.string(), + }), + z.object({ + type: z.literal("find"), + url: z.string(), + pattern: z.string(), + }), + ]) + .nullish(), +}) + +const fileSearchCallItem = z.object({ + type: z.literal("file_search_call"), + id: z.string(), + queries: z.array(z.string()), + results: z + .array( + z.object({ + attributes: z.record(z.string(), z.unknown()), + file_id: z.string(), + filename: z.string(), + score: z.number(), + text: z.string(), + }), + ) + .nullish(), +}) + +const codeInterpreterCallItem = z.object({ + type: z.literal("code_interpreter_call"), + id: z.string(), + code: z.string().nullable(), + container_id: z.string(), + outputs: z + .array( + z.discriminatedUnion("type", [ + z.object({ type: z.literal("logs"), logs: z.string() }), + z.object({ type: z.literal("image"), url: z.string() }), + ]), + ) + .nullable(), +}) + +const localShellCallItem = z.object({ + type: z.literal("local_shell_call"), + id: z.string(), + call_id: z.string(), + action: z.object({ + type: z.literal("exec"), + command: z.array(z.string()), + timeout_ms: z.number().optional(), + user: z.string().optional(), + working_directory: z.string().optional(), + env: z.record(z.string(), z.string()).optional(), + }), +}) + +const imageGenerationCallItem = z.object({ + type: z.literal("image_generation_call"), + id: z.string(), + result: z.string(), +}) + +/** + * `top_logprobs` request body argument can be set to an integer between + * 0 and 20 specifying the number of most likely tokens to return at each + * token position, each with an associated log probability. + * + * @see https://platform.openai.com/docs/api-reference/responses/create#responses_create-top_logprobs + */ +const TOP_LOGPROBS_MAX = 20 + +const LOGPROBS_SCHEMA = z.array( + z.object({ + token: z.string(), + logprob: z.number(), + top_logprobs: z.array( + z.object({ + token: z.string(), + logprob: z.number(), + }), + ), + }), +) + +export class OpenAIResponsesLanguageModel implements LanguageModelV2 { + readonly specificationVersion = "v2" + + readonly modelId: OpenAIResponsesModelId + + private readonly config: OpenAIConfig + + constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig) { + this.modelId = modelId + this.config = config + } + + readonly supportedUrls: Record = { + "image/*": [/^https?:\/\/.*$/], + "application/pdf": [/^https?:\/\/.*$/], + } + + get provider(): string { + return this.config.provider + } + + private async getArgs({ + maxOutputTokens, + temperature, + stopSequences, + topP, + topK, + presencePenalty, + frequencyPenalty, + seed, + prompt, + providerOptions, + tools, + toolChoice, + responseFormat, + }: Parameters[0]) { + const warnings: LanguageModelV2CallWarning[] = [] + const modelConfig = getResponsesModelConfig(this.modelId) + + if (topK != null) { + warnings.push({ type: "unsupported-setting", setting: "topK" }) + } + + if (seed != null) { + warnings.push({ type: "unsupported-setting", setting: "seed" }) + } + + if (presencePenalty != null) { + warnings.push({ + type: "unsupported-setting", + setting: "presencePenalty", + }) + } + + if (frequencyPenalty != null) { + warnings.push({ + type: "unsupported-setting", + setting: "frequencyPenalty", + }) + } + + if (stopSequences != null) { + warnings.push({ type: "unsupported-setting", setting: "stopSequences" }) + } + + const openaiOptions = await parseProviderOptions({ + provider: "openai", + providerOptions, + schema: openaiResponsesProviderOptionsSchema, + }) + + const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({ + prompt, + systemMessageMode: modelConfig.systemMessageMode, + fileIdPrefixes: this.config.fileIdPrefixes, + store: openaiOptions?.store ?? true, + hasLocalShellTool: hasOpenAITool("openai.local_shell"), + }) + + warnings.push(...inputWarnings) + + const strictJsonSchema = openaiOptions?.strictJsonSchema ?? false + + let include: OpenAIResponsesIncludeOptions = openaiOptions?.include + + function addInclude(key: OpenAIResponsesIncludeValue) { + include = include != null ? [...include, key] : [key] + } + + function hasOpenAITool(id: string) { + return tools?.find((tool) => tool.type === "provider-defined" && tool.id === id) != null + } + + // when logprobs are requested, automatically include them: + const topLogprobs = + typeof openaiOptions?.logprobs === "number" + ? openaiOptions?.logprobs + : openaiOptions?.logprobs === true + ? TOP_LOGPROBS_MAX + : undefined + + if (topLogprobs) { + addInclude("message.output_text.logprobs") + } + + // when a web search tool is present, automatically include the sources: + const webSearchToolName = ( + tools?.find( + (tool) => + tool.type === "provider-defined" && + (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"), + ) as LanguageModelV2ProviderDefinedTool | undefined + )?.name + + if (webSearchToolName) { + addInclude("web_search_call.action.sources") + } + + // when a code interpreter tool is present, automatically include the outputs: + if (hasOpenAITool("openai.code_interpreter")) { + addInclude("code_interpreter_call.outputs") + } + + const baseArgs = { + model: this.modelId, + input, + temperature, + top_p: topP, + max_output_tokens: maxOutputTokens, + + ...((responseFormat?.type === "json" || openaiOptions?.textVerbosity) && { + text: { + ...(responseFormat?.type === "json" && { + format: + responseFormat.schema != null + ? { + type: "json_schema", + strict: strictJsonSchema, + name: responseFormat.name ?? "response", + description: responseFormat.description, + schema: responseFormat.schema, + } + : { type: "json_object" }, + }), + ...(openaiOptions?.textVerbosity && { + verbosity: openaiOptions.textVerbosity, + }), + }, + }), + + // provider options: + max_tool_calls: openaiOptions?.maxToolCalls, + metadata: openaiOptions?.metadata, + parallel_tool_calls: openaiOptions?.parallelToolCalls, + previous_response_id: openaiOptions?.previousResponseId, + store: openaiOptions?.store, + user: openaiOptions?.user, + instructions: openaiOptions?.instructions, + service_tier: openaiOptions?.serviceTier, + include, + prompt_cache_key: openaiOptions?.promptCacheKey, + safety_identifier: openaiOptions?.safetyIdentifier, + top_logprobs: topLogprobs, + + // model-specific settings: + ...(modelConfig.isReasoningModel && + (openaiOptions?.reasoningEffort != null || openaiOptions?.reasoningSummary != null) && { + reasoning: { + ...(openaiOptions?.reasoningEffort != null && { + effort: openaiOptions.reasoningEffort, + }), + ...(openaiOptions?.reasoningSummary != null && { + summary: openaiOptions.reasoningSummary, + }), + }, + }), + ...(modelConfig.requiredAutoTruncation && { + truncation: "auto", + }), + } + + if (modelConfig.isReasoningModel) { + // remove unsupported settings for reasoning models + // see https://platform.openai.com/docs/guides/reasoning#limitations + if (baseArgs.temperature != null) { + baseArgs.temperature = undefined + warnings.push({ + type: "unsupported-setting", + setting: "temperature", + details: "temperature is not supported for reasoning models", + }) + } + + if (baseArgs.top_p != null) { + baseArgs.top_p = undefined + warnings.push({ + type: "unsupported-setting", + setting: "topP", + details: "topP is not supported for reasoning models", + }) + } + } else { + if (openaiOptions?.reasoningEffort != null) { + warnings.push({ + type: "unsupported-setting", + setting: "reasoningEffort", + details: "reasoningEffort is not supported for non-reasoning models", + }) + } + + if (openaiOptions?.reasoningSummary != null) { + warnings.push({ + type: "unsupported-setting", + setting: "reasoningSummary", + details: "reasoningSummary is not supported for non-reasoning models", + }) + } + } + + // Validate flex processing support + if (openaiOptions?.serviceTier === "flex" && !modelConfig.supportsFlexProcessing) { + warnings.push({ + type: "unsupported-setting", + setting: "serviceTier", + details: "flex processing is only available for o3, o4-mini, and gpt-5 models", + }) + // Remove from args if not supported + delete (baseArgs as any).service_tier + } + + // Validate priority processing support + if (openaiOptions?.serviceTier === "priority" && !modelConfig.supportsPriorityProcessing) { + warnings.push({ + type: "unsupported-setting", + setting: "serviceTier", + details: + "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported", + }) + // Remove from args if not supported + delete (baseArgs as any).service_tier + } + + const { + tools: openaiTools, + toolChoice: openaiToolChoice, + toolWarnings, + } = prepareResponsesTools({ + tools, + toolChoice, + strictJsonSchema, + }) + + return { + webSearchToolName, + args: { + ...baseArgs, + tools: openaiTools, + tool_choice: openaiToolChoice, + }, + warnings: [...warnings, ...toolWarnings], + } + } + + async doGenerate( + options: Parameters[0], + ): Promise>> { + const { args: body, warnings, webSearchToolName } = await this.getArgs(options) + const url = this.config.url({ + path: "/responses", + modelId: this.modelId, + }) + + const { + responseHeaders, + value: response, + rawValue: rawResponse, + } = await postJsonToApi({ + url, + headers: combineHeaders(this.config.headers(), options.headers), + body, + failedResponseHandler: openaiFailedResponseHandler, + successfulResponseHandler: createJsonResponseHandler( + z.object({ + id: z.string(), + created_at: z.number(), + error: z + .object({ + code: z.string(), + message: z.string(), + }) + .nullish(), + model: z.string(), + output: z.array( + z.discriminatedUnion("type", [ + z.object({ + type: z.literal("message"), + role: z.literal("assistant"), + id: z.string(), + content: z.array( + z.object({ + type: z.literal("output_text"), + text: z.string(), + logprobs: LOGPROBS_SCHEMA.nullish(), + annotations: z.array( + z.discriminatedUnion("type", [ + z.object({ + type: z.literal("url_citation"), + start_index: z.number(), + end_index: z.number(), + url: z.string(), + title: z.string(), + }), + z.object({ + type: z.literal("file_citation"), + file_id: z.string(), + filename: z.string().nullish(), + index: z.number().nullish(), + start_index: z.number().nullish(), + end_index: z.number().nullish(), + quote: z.string().nullish(), + }), + z.object({ + type: z.literal("container_file_citation"), + }), + ]), + ), + }), + ), + }), + webSearchCallItem, + fileSearchCallItem, + codeInterpreterCallItem, + imageGenerationCallItem, + localShellCallItem, + z.object({ + type: z.literal("function_call"), + call_id: z.string(), + name: z.string(), + arguments: z.string(), + id: z.string(), + }), + z.object({ + type: z.literal("computer_call"), + id: z.string(), + status: z.string().optional(), + }), + z.object({ + type: z.literal("reasoning"), + id: z.string(), + encrypted_content: z.string().nullish(), + summary: z.array( + z.object({ + type: z.literal("summary_text"), + text: z.string(), + }), + ), + }), + ]), + ), + service_tier: z.string().nullish(), + incomplete_details: z.object({ reason: z.string() }).nullish(), + usage: usageSchema, + }), + ), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }) + + if (response.error) { + throw new APICallError({ + message: response.error.message, + url, + requestBodyValues: body, + statusCode: 400, + responseHeaders, + responseBody: rawResponse as string, + isRetryable: false, + }) + } + + const content: Array = [] + const logprobs: Array> = [] + + // flag that checks if there have been client-side tool calls (not executed by openai) + let hasFunctionCall = false + + // map response content to content array + for (const part of response.output) { + switch (part.type) { + case "reasoning": { + // when there are no summary parts, we need to add an empty reasoning part: + if (part.summary.length === 0) { + part.summary.push({ type: "summary_text", text: "" }) + } + + for (const summary of part.summary) { + content.push({ + type: "reasoning" as const, + text: summary.text, + providerMetadata: { + openai: { + itemId: part.id, + reasoningEncryptedContent: part.encrypted_content ?? null, + }, + }, + }) + } + break + } + + case "image_generation_call": { + content.push({ + type: "tool-call", + toolCallId: part.id, + toolName: "image_generation", + input: "{}", + providerExecuted: true, + }) + + content.push({ + type: "tool-result", + toolCallId: part.id, + toolName: "image_generation", + result: { + result: part.result, + } satisfies z.infer, + providerExecuted: true, + }) + + break + } + + case "local_shell_call": { + content.push({ + type: "tool-call", + toolCallId: part.call_id, + toolName: "local_shell", + input: JSON.stringify({ action: part.action } satisfies z.infer), + providerMetadata: { + openai: { + itemId: part.id, + }, + }, + }) + + break + } + + case "message": { + for (const contentPart of part.content) { + if (options.providerOptions?.openai?.logprobs && contentPart.logprobs) { + logprobs.push(contentPart.logprobs) + } + + content.push({ + type: "text", + text: contentPart.text, + providerMetadata: { + openai: { + itemId: part.id, + }, + }, + }) + + for (const annotation of contentPart.annotations) { + if (annotation.type === "url_citation") { + content.push({ + type: "source", + sourceType: "url", + id: this.config.generateId?.() ?? generateId(), + url: annotation.url, + title: annotation.title, + }) + } else if (annotation.type === "file_citation") { + content.push({ + type: "source", + sourceType: "document", + id: this.config.generateId?.() ?? generateId(), + mediaType: "text/plain", + title: annotation.quote ?? annotation.filename ?? "Document", + filename: annotation.filename ?? annotation.file_id, + }) + } + } + } + + break + } + + case "function_call": { + hasFunctionCall = true + + content.push({ + type: "tool-call", + toolCallId: part.call_id, + toolName: part.name, + input: part.arguments, + providerMetadata: { + openai: { + itemId: part.id, + }, + }, + }) + break + } + + case "web_search_call": { + content.push({ + type: "tool-call", + toolCallId: part.id, + toolName: webSearchToolName ?? "web_search", + input: JSON.stringify({ action: part.action }), + providerExecuted: true, + }) + + content.push({ + type: "tool-result", + toolCallId: part.id, + toolName: webSearchToolName ?? "web_search", + result: { status: part.status }, + providerExecuted: true, + }) + + break + } + + case "computer_call": { + content.push({ + type: "tool-call", + toolCallId: part.id, + toolName: "computer_use", + input: "", + providerExecuted: true, + }) + + content.push({ + type: "tool-result", + toolCallId: part.id, + toolName: "computer_use", + result: { + type: "computer_use_tool_result", + status: part.status || "completed", + }, + providerExecuted: true, + }) + break + } + + case "file_search_call": { + content.push({ + type: "tool-call", + toolCallId: part.id, + toolName: "file_search", + input: "{}", + providerExecuted: true, + }) + + content.push({ + type: "tool-result", + toolCallId: part.id, + toolName: "file_search", + result: { + queries: part.queries, + results: + part.results?.map((result) => ({ + attributes: result.attributes, + fileId: result.file_id, + filename: result.filename, + score: result.score, + text: result.text, + })) ?? null, + } satisfies z.infer, + providerExecuted: true, + }) + break + } + + case "code_interpreter_call": { + content.push({ + type: "tool-call", + toolCallId: part.id, + toolName: "code_interpreter", + input: JSON.stringify({ + code: part.code, + containerId: part.container_id, + } satisfies z.infer), + providerExecuted: true, + }) + + content.push({ + type: "tool-result", + toolCallId: part.id, + toolName: "code_interpreter", + result: { + outputs: part.outputs, + } satisfies z.infer, + providerExecuted: true, + }) + break + } + } + } + + const providerMetadata: SharedV2ProviderMetadata = { + openai: { responseId: response.id }, + } + + if (logprobs.length > 0) { + providerMetadata.openai.logprobs = logprobs + } + + if (typeof response.service_tier === "string") { + providerMetadata.openai.serviceTier = response.service_tier + } + + return { + content, + finishReason: mapOpenAIResponseFinishReason({ + finishReason: response.incomplete_details?.reason, + hasFunctionCall, + }), + usage: { + inputTokens: response.usage.input_tokens, + outputTokens: response.usage.output_tokens, + totalTokens: response.usage.input_tokens + response.usage.output_tokens, + reasoningTokens: response.usage.output_tokens_details?.reasoning_tokens ?? undefined, + cachedInputTokens: response.usage.input_tokens_details?.cached_tokens ?? undefined, + }, + request: { body }, + response: { + id: response.id, + timestamp: new Date(response.created_at * 1000), + modelId: response.model, + headers: responseHeaders, + body: rawResponse, + }, + providerMetadata, + warnings, + } + } + + async doStream( + options: Parameters[0], + ): Promise>> { + const { args: body, warnings, webSearchToolName } = await this.getArgs(options) + + const { responseHeaders, value: response } = await postJsonToApi({ + url: this.config.url({ + path: "/responses", + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: { + ...body, + stream: true, + }, + failedResponseHandler: openaiFailedResponseHandler, + successfulResponseHandler: createEventSourceResponseHandler(openaiResponsesChunkSchema), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }) + + const self = this + + let finishReason: LanguageModelV2FinishReason = "unknown" + const usage: LanguageModelV2Usage = { + inputTokens: undefined, + outputTokens: undefined, + totalTokens: undefined, + } + const logprobs: Array> = [] + let responseId: string | null = null + const ongoingToolCalls: Record< + number, + | { + toolName: string + toolCallId: string + codeInterpreter?: { + containerId: string + } + } + | undefined + > = {} + + // flag that checks if there have been client-side tool calls (not executed by openai) + let hasFunctionCall = false + + const activeReasoning: Record< + string, + { + encryptedContent?: string | null + summaryParts: number[] + } + > = {} + + // Track a stable text part id for the current assistant message. + // Copilot may change item_id across text deltas; normalize to one id. + let currentTextId: string | null = null + + let serviceTier: string | undefined + + return { + stream: response.pipeThrough( + new TransformStream>, LanguageModelV2StreamPart>({ + start(controller) { + controller.enqueue({ type: "stream-start", warnings }) + }, + + transform(chunk, controller) { + if (options.includeRawChunks) { + controller.enqueue({ type: "raw", rawValue: chunk.rawValue }) + } + + // handle failed chunk parsing / validation: + if (!chunk.success) { + finishReason = "error" + controller.enqueue({ type: "error", error: chunk.error }) + return + } + + const value = chunk.value + + if (isResponseOutputItemAddedChunk(value)) { + if (value.item.type === "function_call") { + ongoingToolCalls[value.output_index] = { + toolName: value.item.name, + toolCallId: value.item.call_id, + } + + controller.enqueue({ + type: "tool-input-start", + id: value.item.call_id, + toolName: value.item.name, + }) + } else if (value.item.type === "web_search_call") { + ongoingToolCalls[value.output_index] = { + toolName: webSearchToolName ?? "web_search", + toolCallId: value.item.id, + } + + controller.enqueue({ + type: "tool-input-start", + id: value.item.id, + toolName: webSearchToolName ?? "web_search", + }) + } else if (value.item.type === "computer_call") { + ongoingToolCalls[value.output_index] = { + toolName: "computer_use", + toolCallId: value.item.id, + } + + controller.enqueue({ + type: "tool-input-start", + id: value.item.id, + toolName: "computer_use", + }) + } else if (value.item.type === "code_interpreter_call") { + ongoingToolCalls[value.output_index] = { + toolName: "code_interpreter", + toolCallId: value.item.id, + codeInterpreter: { + containerId: value.item.container_id, + }, + } + + controller.enqueue({ + type: "tool-input-start", + id: value.item.id, + toolName: "code_interpreter", + }) + + controller.enqueue({ + type: "tool-input-delta", + id: value.item.id, + delta: `{"containerId":"${value.item.container_id}","code":"`, + }) + } else if (value.item.type === "file_search_call") { + controller.enqueue({ + type: "tool-call", + toolCallId: value.item.id, + toolName: "file_search", + input: "{}", + providerExecuted: true, + }) + } else if (value.item.type === "image_generation_call") { + controller.enqueue({ + type: "tool-call", + toolCallId: value.item.id, + toolName: "image_generation", + input: "{}", + providerExecuted: true, + }) + } else if (value.item.type === "message") { + // Start a stable text part for this assistant message + currentTextId = value.item.id + controller.enqueue({ + type: "text-start", + id: value.item.id, + providerMetadata: { + openai: { + itemId: value.item.id, + }, + }, + }) + } else if (isResponseOutputItemAddedReasoningChunk(value)) { + activeReasoning[value.item.id] = { + encryptedContent: value.item.encrypted_content, + summaryParts: [0], + } + + controller.enqueue({ + type: "reasoning-start", + id: `${value.item.id}:0`, + providerMetadata: { + openai: { + itemId: value.item.id, + reasoningEncryptedContent: value.item.encrypted_content ?? null, + }, + }, + }) + } + } else if (isResponseOutputItemDoneChunk(value)) { + if (value.item.type === "function_call") { + ongoingToolCalls[value.output_index] = undefined + hasFunctionCall = true + + controller.enqueue({ + type: "tool-input-end", + id: value.item.call_id, + }) + + controller.enqueue({ + type: "tool-call", + toolCallId: value.item.call_id, + toolName: value.item.name, + input: value.item.arguments, + providerMetadata: { + openai: { + itemId: value.item.id, + }, + }, + }) + } else if (value.item.type === "web_search_call") { + ongoingToolCalls[value.output_index] = undefined + + controller.enqueue({ + type: "tool-input-end", + id: value.item.id, + }) + + controller.enqueue({ + type: "tool-call", + toolCallId: value.item.id, + toolName: "web_search", + input: JSON.stringify({ action: value.item.action }), + providerExecuted: true, + }) + + controller.enqueue({ + type: "tool-result", + toolCallId: value.item.id, + toolName: "web_search", + result: { status: value.item.status }, + providerExecuted: true, + }) + } else if (value.item.type === "computer_call") { + ongoingToolCalls[value.output_index] = undefined + + controller.enqueue({ + type: "tool-input-end", + id: value.item.id, + }) + + controller.enqueue({ + type: "tool-call", + toolCallId: value.item.id, + toolName: "computer_use", + input: "", + providerExecuted: true, + }) + + controller.enqueue({ + type: "tool-result", + toolCallId: value.item.id, + toolName: "computer_use", + result: { + type: "computer_use_tool_result", + status: value.item.status || "completed", + }, + providerExecuted: true, + }) + } else if (value.item.type === "file_search_call") { + ongoingToolCalls[value.output_index] = undefined + + controller.enqueue({ + type: "tool-result", + toolCallId: value.item.id, + toolName: "file_search", + result: { + queries: value.item.queries, + results: + value.item.results?.map((result) => ({ + attributes: result.attributes, + fileId: result.file_id, + filename: result.filename, + score: result.score, + text: result.text, + })) ?? null, + } satisfies z.infer, + providerExecuted: true, + }) + } else if (value.item.type === "code_interpreter_call") { + ongoingToolCalls[value.output_index] = undefined + + controller.enqueue({ + type: "tool-result", + toolCallId: value.item.id, + toolName: "code_interpreter", + result: { + outputs: value.item.outputs, + } satisfies z.infer, + providerExecuted: true, + }) + } else if (value.item.type === "image_generation_call") { + controller.enqueue({ + type: "tool-result", + toolCallId: value.item.id, + toolName: "image_generation", + result: { + result: value.item.result, + } satisfies z.infer, + providerExecuted: true, + }) + } else if (value.item.type === "local_shell_call") { + ongoingToolCalls[value.output_index] = undefined + + controller.enqueue({ + type: "tool-call", + toolCallId: value.item.call_id, + toolName: "local_shell", + input: JSON.stringify({ + action: { + type: "exec", + command: value.item.action.command, + timeoutMs: value.item.action.timeout_ms, + user: value.item.action.user, + workingDirectory: value.item.action.working_directory, + env: value.item.action.env, + }, + } satisfies z.infer), + providerMetadata: { + openai: { itemId: value.item.id }, + }, + }) + } else if (value.item.type === "message") { + if (currentTextId) { + controller.enqueue({ + type: "text-end", + id: currentTextId, + }) + currentTextId = null + } + } else if (isResponseOutputItemDoneReasoningChunk(value)) { + const activeReasoningPart = activeReasoning[value.item.id] + if (activeReasoningPart) { + for (const summaryIndex of activeReasoningPart.summaryParts) { + controller.enqueue({ + type: "reasoning-end", + id: `${value.item.id}:${summaryIndex}`, + providerMetadata: { + openai: { + itemId: value.item.id, + reasoningEncryptedContent: value.item.encrypted_content ?? null, + }, + }, + }) + } + } + delete activeReasoning[value.item.id] + } + } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) { + const toolCall = ongoingToolCalls[value.output_index] + + if (toolCall != null) { + controller.enqueue({ + type: "tool-input-delta", + id: toolCall.toolCallId, + delta: value.delta, + }) + } + } else if (isResponseImageGenerationCallPartialImageChunk(value)) { + controller.enqueue({ + type: "tool-result", + toolCallId: value.item_id, + toolName: "image_generation", + result: { + result: value.partial_image_b64, + } satisfies z.infer, + providerExecuted: true, + }) + } else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) { + const toolCall = ongoingToolCalls[value.output_index] + + if (toolCall != null) { + controller.enqueue({ + type: "tool-input-delta", + id: toolCall.toolCallId, + // The delta is code, which is embedding in a JSON string. + // To escape it, we use JSON.stringify and slice to remove the outer quotes. + delta: JSON.stringify(value.delta).slice(1, -1), + }) + } + } else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) { + const toolCall = ongoingToolCalls[value.output_index] + + if (toolCall != null) { + controller.enqueue({ + type: "tool-input-delta", + id: toolCall.toolCallId, + delta: '"}', + }) + + controller.enqueue({ + type: "tool-input-end", + id: toolCall.toolCallId, + }) + + // immediately send the tool call after the input end: + controller.enqueue({ + type: "tool-call", + toolCallId: toolCall.toolCallId, + toolName: "code_interpreter", + input: JSON.stringify({ + code: value.code, + containerId: toolCall.codeInterpreter!.containerId, + } satisfies z.infer), + providerExecuted: true, + }) + } + } else if (isResponseCreatedChunk(value)) { + responseId = value.response.id + controller.enqueue({ + type: "response-metadata", + id: value.response.id, + timestamp: new Date(value.response.created_at * 1000), + modelId: value.response.model, + }) + } else if (isTextDeltaChunk(value)) { + // Ensure a text-start exists, and normalize deltas to a stable id + if (!currentTextId) { + currentTextId = value.item_id + controller.enqueue({ + type: "text-start", + id: currentTextId, + providerMetadata: { + openai: { itemId: value.item_id }, + }, + }) + } + + controller.enqueue({ + type: "text-delta", + id: currentTextId, + delta: value.delta, + }) + + if (options.providerOptions?.openai?.logprobs && value.logprobs) { + logprobs.push(value.logprobs) + } + } else if (isResponseReasoningSummaryPartAddedChunk(value)) { + // the first reasoning start is pushed in isResponseOutputItemAddedReasoningChunk. + if (value.summary_index > 0) { + activeReasoning[value.item_id]?.summaryParts.push(value.summary_index) + + controller.enqueue({ + type: "reasoning-start", + id: `${value.item_id}:${value.summary_index}`, + providerMetadata: { + openai: { + itemId: value.item_id, + reasoningEncryptedContent: activeReasoning[value.item_id]?.encryptedContent ?? null, + }, + }, + }) + } + } else if (isResponseReasoningSummaryTextDeltaChunk(value)) { + controller.enqueue({ + type: "reasoning-delta", + id: `${value.item_id}:${value.summary_index}`, + delta: value.delta, + providerMetadata: { + openai: { + itemId: value.item_id, + }, + }, + }) + } else if (isResponseFinishedChunk(value)) { + finishReason = mapOpenAIResponseFinishReason({ + finishReason: value.response.incomplete_details?.reason, + hasFunctionCall, + }) + usage.inputTokens = value.response.usage.input_tokens + usage.outputTokens = value.response.usage.output_tokens + usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens + usage.reasoningTokens = value.response.usage.output_tokens_details?.reasoning_tokens ?? undefined + usage.cachedInputTokens = value.response.usage.input_tokens_details?.cached_tokens ?? undefined + if (typeof value.response.service_tier === "string") { + serviceTier = value.response.service_tier + } + } else if (isResponseAnnotationAddedChunk(value)) { + if (value.annotation.type === "url_citation") { + controller.enqueue({ + type: "source", + sourceType: "url", + id: self.config.generateId?.() ?? generateId(), + url: value.annotation.url, + title: value.annotation.title, + }) + } else if (value.annotation.type === "file_citation") { + controller.enqueue({ + type: "source", + sourceType: "document", + id: self.config.generateId?.() ?? generateId(), + mediaType: "text/plain", + title: value.annotation.quote ?? value.annotation.filename ?? "Document", + filename: value.annotation.filename ?? value.annotation.file_id, + }) + } + } else if (isErrorChunk(value)) { + controller.enqueue({ type: "error", error: value }) + } + }, + + flush(controller) { + // Close any dangling text part + if (currentTextId) { + controller.enqueue({ type: "text-end", id: currentTextId }) + currentTextId = null + } + + const providerMetadata: SharedV2ProviderMetadata = { + openai: { + responseId, + }, + } + + if (logprobs.length > 0) { + providerMetadata.openai.logprobs = logprobs + } + + if (serviceTier !== undefined) { + providerMetadata.openai.serviceTier = serviceTier + } + + controller.enqueue({ + type: "finish", + finishReason, + usage, + providerMetadata, + }) + }, + }), + ), + request: { body }, + response: { headers: responseHeaders }, + } + } +} + +const usageSchema = z.object({ + input_tokens: z.number(), + input_tokens_details: z.object({ cached_tokens: z.number().nullish() }).nullish(), + output_tokens: z.number(), + output_tokens_details: z.object({ reasoning_tokens: z.number().nullish() }).nullish(), +}) + +const textDeltaChunkSchema = z.object({ + type: z.literal("response.output_text.delta"), + item_id: z.string(), + delta: z.string(), + logprobs: LOGPROBS_SCHEMA.nullish(), +}) + +const errorChunkSchema = z.object({ + type: z.literal("error"), + code: z.string(), + message: z.string(), + param: z.string().nullish(), + sequence_number: z.number(), +}) + +const responseFinishedChunkSchema = z.object({ + type: z.enum(["response.completed", "response.incomplete"]), + response: z.object({ + incomplete_details: z.object({ reason: z.string() }).nullish(), + usage: usageSchema, + service_tier: z.string().nullish(), + }), +}) + +const responseCreatedChunkSchema = z.object({ + type: z.literal("response.created"), + response: z.object({ + id: z.string(), + created_at: z.number(), + model: z.string(), + service_tier: z.string().nullish(), + }), +}) + +const responseOutputItemAddedSchema = z.object({ + type: z.literal("response.output_item.added"), + output_index: z.number(), + item: z.discriminatedUnion("type", [ + z.object({ + type: z.literal("message"), + id: z.string(), + }), + z.object({ + type: z.literal("reasoning"), + id: z.string(), + encrypted_content: z.string().nullish(), + }), + z.object({ + type: z.literal("function_call"), + id: z.string(), + call_id: z.string(), + name: z.string(), + arguments: z.string(), + }), + z.object({ + type: z.literal("web_search_call"), + id: z.string(), + status: z.string(), + action: z + .object({ + type: z.literal("search"), + query: z.string().optional(), + }) + .nullish(), + }), + z.object({ + type: z.literal("computer_call"), + id: z.string(), + status: z.string(), + }), + z.object({ + type: z.literal("file_search_call"), + id: z.string(), + }), + z.object({ + type: z.literal("image_generation_call"), + id: z.string(), + }), + z.object({ + type: z.literal("code_interpreter_call"), + id: z.string(), + container_id: z.string(), + code: z.string().nullable(), + outputs: z + .array( + z.discriminatedUnion("type", [ + z.object({ type: z.literal("logs"), logs: z.string() }), + z.object({ type: z.literal("image"), url: z.string() }), + ]), + ) + .nullable(), + status: z.string(), + }), + ]), +}) + +const responseOutputItemDoneSchema = z.object({ + type: z.literal("response.output_item.done"), + output_index: z.number(), + item: z.discriminatedUnion("type", [ + z.object({ + type: z.literal("message"), + id: z.string(), + }), + z.object({ + type: z.literal("reasoning"), + id: z.string(), + encrypted_content: z.string().nullish(), + }), + z.object({ + type: z.literal("function_call"), + id: z.string(), + call_id: z.string(), + name: z.string(), + arguments: z.string(), + status: z.literal("completed"), + }), + codeInterpreterCallItem, + imageGenerationCallItem, + webSearchCallItem, + fileSearchCallItem, + localShellCallItem, + z.object({ + type: z.literal("computer_call"), + id: z.string(), + status: z.literal("completed"), + }), + ]), +}) + +const responseFunctionCallArgumentsDeltaSchema = z.object({ + type: z.literal("response.function_call_arguments.delta"), + item_id: z.string(), + output_index: z.number(), + delta: z.string(), +}) + +const responseImageGenerationCallPartialImageSchema = z.object({ + type: z.literal("response.image_generation_call.partial_image"), + item_id: z.string(), + output_index: z.number(), + partial_image_b64: z.string(), +}) + +const responseCodeInterpreterCallCodeDeltaSchema = z.object({ + type: z.literal("response.code_interpreter_call_code.delta"), + item_id: z.string(), + output_index: z.number(), + delta: z.string(), +}) + +const responseCodeInterpreterCallCodeDoneSchema = z.object({ + type: z.literal("response.code_interpreter_call_code.done"), + item_id: z.string(), + output_index: z.number(), + code: z.string(), +}) + +const responseAnnotationAddedSchema = z.object({ + type: z.literal("response.output_text.annotation.added"), + annotation: z.discriminatedUnion("type", [ + z.object({ + type: z.literal("url_citation"), + url: z.string(), + title: z.string(), + }), + z.object({ + type: z.literal("file_citation"), + file_id: z.string(), + filename: z.string().nullish(), + index: z.number().nullish(), + start_index: z.number().nullish(), + end_index: z.number().nullish(), + quote: z.string().nullish(), + }), + ]), +}) + +const responseReasoningSummaryPartAddedSchema = z.object({ + type: z.literal("response.reasoning_summary_part.added"), + item_id: z.string(), + summary_index: z.number(), +}) + +const responseReasoningSummaryTextDeltaSchema = z.object({ + type: z.literal("response.reasoning_summary_text.delta"), + item_id: z.string(), + summary_index: z.number(), + delta: z.string(), +}) + +const openaiResponsesChunkSchema = z.union([ + textDeltaChunkSchema, + responseFinishedChunkSchema, + responseCreatedChunkSchema, + responseOutputItemAddedSchema, + responseOutputItemDoneSchema, + responseFunctionCallArgumentsDeltaSchema, + responseImageGenerationCallPartialImageSchema, + responseCodeInterpreterCallCodeDeltaSchema, + responseCodeInterpreterCallCodeDoneSchema, + responseAnnotationAddedSchema, + responseReasoningSummaryPartAddedSchema, + responseReasoningSummaryTextDeltaSchema, + errorChunkSchema, + z.object({ type: z.string() }).loose(), // fallback for unknown chunks +]) + +type ExtractByType = T extends { type: K } ? T : never + +function isTextDeltaChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.output_text.delta" +} + +function isResponseOutputItemDoneChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.output_item.done" +} + +function isResponseOutputItemDoneReasoningChunk(chunk: z.infer): chunk is z.infer< + typeof responseOutputItemDoneSchema +> & { + item: ExtractByType["item"], "reasoning"> +} { + return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning" +} + +function isResponseFinishedChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.completed" || chunk.type === "response.incomplete" +} + +function isResponseCreatedChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.created" +} + +function isResponseFunctionCallArgumentsDeltaChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.function_call_arguments.delta" +} +function isResponseImageGenerationCallPartialImageChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.image_generation_call.partial_image" +} + +function isResponseCodeInterpreterCallCodeDeltaChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.code_interpreter_call_code.delta" +} + +function isResponseCodeInterpreterCallCodeDoneChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.code_interpreter_call_code.done" +} + +function isResponseOutputItemAddedChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.output_item.added" +} + +function isResponseOutputItemAddedReasoningChunk(chunk: z.infer): chunk is z.infer< + typeof responseOutputItemAddedSchema +> & { + item: ExtractByType["item"], "reasoning"> +} { + return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning" +} + +function isResponseAnnotationAddedChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.output_text.annotation.added" +} + +function isResponseReasoningSummaryPartAddedChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.reasoning_summary_part.added" +} + +function isResponseReasoningSummaryTextDeltaChunk( + chunk: z.infer, +): chunk is z.infer { + return chunk.type === "response.reasoning_summary_text.delta" +} + +function isErrorChunk(chunk: z.infer): chunk is z.infer { + return chunk.type === "error" +} + +type ResponsesModelConfig = { + isReasoningModel: boolean + systemMessageMode: "remove" | "system" | "developer" + requiredAutoTruncation: boolean + supportsFlexProcessing: boolean + supportsPriorityProcessing: boolean +} + +function getResponsesModelConfig(modelId: string): ResponsesModelConfig { + const supportsFlexProcessing = + modelId.startsWith("o3") || + modelId.startsWith("o4-mini") || + (modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat")) + const supportsPriorityProcessing = + modelId.startsWith("gpt-4") || + modelId.startsWith("gpt-5-mini") || + (modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat")) || + modelId.startsWith("o3") || + modelId.startsWith("o4-mini") + const defaults = { + requiredAutoTruncation: false, + systemMessageMode: "system" as const, + supportsFlexProcessing, + supportsPriorityProcessing, + } + + // gpt-5-chat models are non-reasoning + if (modelId.startsWith("gpt-5-chat")) { + return { + ...defaults, + isReasoningModel: false, + } + } + + // o series reasoning models: + if ( + modelId.startsWith("o") || + modelId.startsWith("gpt-5") || + modelId.startsWith("codex-") || + modelId.startsWith("computer-use") + ) { + if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) { + return { + ...defaults, + isReasoningModel: true, + systemMessageMode: "remove", + } + } + + return { + ...defaults, + isReasoningModel: true, + systemMessageMode: "developer", + } + } + + // gpt models: + return { + ...defaults, + isReasoningModel: false, + } +} + +// TODO AI SDK 6: use optional here instead of nullish +const openaiResponsesProviderOptionsSchema = z.object({ + include: z + .array(z.enum(["reasoning.encrypted_content", "file_search_call.results", "message.output_text.logprobs"])) + .nullish(), + instructions: z.string().nullish(), + + /** + * Return the log probabilities of the tokens. + * + * Setting to true will return the log probabilities of the tokens that + * were generated. + * + * Setting to a number will return the log probabilities of the top n + * tokens that were generated. + * + * @see https://platform.openai.com/docs/api-reference/responses/create + * @see https://cookbook.openai.com/examples/using_logprobs + */ + logprobs: z.union([z.boolean(), z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(), + + /** + * The maximum number of total calls to built-in tools that can be processed in a response. + * This maximum number applies across all built-in tool calls, not per individual tool. + * Any further attempts to call a tool by the model will be ignored. + */ + maxToolCalls: z.number().nullish(), + + metadata: z.any().nullish(), + parallelToolCalls: z.boolean().nullish(), + previousResponseId: z.string().nullish(), + promptCacheKey: z.string().nullish(), + reasoningEffort: z.string().nullish(), + reasoningSummary: z.string().nullish(), + safetyIdentifier: z.string().nullish(), + serviceTier: z.enum(["auto", "flex", "priority"]).nullish(), + store: z.boolean().nullish(), + strictJsonSchema: z.boolean().nullish(), + textVerbosity: z.enum(["low", "medium", "high"]).nullish(), + user: z.string().nullish(), +}) + +export type OpenAIResponsesProviderOptions = z.infer diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-prepare-tools.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-prepare-tools.ts new file mode 100644 index 00000000000..791de3e7cfa --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-prepare-tools.ts @@ -0,0 +1,177 @@ +import { + type LanguageModelV2CallOptions, + type LanguageModelV2CallWarning, + UnsupportedFunctionalityError, +} from "@ai-sdk/provider" +import { codeInterpreterArgsSchema } from "./tool/code-interpreter" +import { fileSearchArgsSchema } from "./tool/file-search" +import { webSearchArgsSchema } from "./tool/web-search" +import { webSearchPreviewArgsSchema } from "./tool/web-search-preview" +import { imageGenerationArgsSchema } from "./tool/image-generation" +import type { OpenAIResponsesTool } from "./openai-responses-api-types" + +export function prepareResponsesTools({ + tools, + toolChoice, + strictJsonSchema, +}: { + tools: LanguageModelV2CallOptions["tools"] + toolChoice?: LanguageModelV2CallOptions["toolChoice"] + strictJsonSchema: boolean +}): { + tools?: Array + toolChoice?: + | "auto" + | "none" + | "required" + | { type: "file_search" } + | { type: "web_search_preview" } + | { type: "web_search" } + | { type: "function"; name: string } + | { type: "code_interpreter" } + | { type: "image_generation" } + toolWarnings: LanguageModelV2CallWarning[] +} { + // when the tools array is empty, change it to undefined to prevent errors: + tools = tools?.length ? tools : undefined + + const toolWarnings: LanguageModelV2CallWarning[] = [] + + if (tools == null) { + return { tools: undefined, toolChoice: undefined, toolWarnings } + } + + const openaiTools: Array = [] + + for (const tool of tools) { + switch (tool.type) { + case "function": + openaiTools.push({ + type: "function", + name: tool.name, + description: tool.description, + parameters: tool.inputSchema, + strict: strictJsonSchema, + }) + break + case "provider-defined": { + switch (tool.id) { + case "openai.file_search": { + const args = fileSearchArgsSchema.parse(tool.args) + + openaiTools.push({ + type: "file_search", + vector_store_ids: args.vectorStoreIds, + max_num_results: args.maxNumResults, + ranking_options: args.ranking + ? { + ranker: args.ranking.ranker, + score_threshold: args.ranking.scoreThreshold, + } + : undefined, + filters: args.filters, + }) + + break + } + case "openai.local_shell": { + openaiTools.push({ + type: "local_shell", + }) + break + } + case "openai.web_search_preview": { + const args = webSearchPreviewArgsSchema.parse(tool.args) + openaiTools.push({ + type: "web_search_preview", + search_context_size: args.searchContextSize, + user_location: args.userLocation, + }) + break + } + case "openai.web_search": { + const args = webSearchArgsSchema.parse(tool.args) + openaiTools.push({ + type: "web_search", + filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : undefined, + search_context_size: args.searchContextSize, + user_location: args.userLocation, + }) + break + } + case "openai.code_interpreter": { + const args = codeInterpreterArgsSchema.parse(tool.args) + openaiTools.push({ + type: "code_interpreter", + container: + args.container == null + ? { type: "auto", file_ids: undefined } + : typeof args.container === "string" + ? args.container + : { type: "auto", file_ids: args.container.fileIds }, + }) + break + } + case "openai.image_generation": { + const args = imageGenerationArgsSchema.parse(tool.args) + openaiTools.push({ + type: "image_generation", + background: args.background, + input_fidelity: args.inputFidelity, + input_image_mask: args.inputImageMask + ? { + file_id: args.inputImageMask.fileId, + image_url: args.inputImageMask.imageUrl, + } + : undefined, + model: args.model, + moderation: args.moderation, + partial_images: args.partialImages, + quality: args.quality, + output_compression: args.outputCompression, + output_format: args.outputFormat, + size: args.size, + }) + break + } + } + break + } + default: + toolWarnings.push({ type: "unsupported-tool", tool }) + break + } + } + + if (toolChoice == null) { + return { tools: openaiTools, toolChoice: undefined, toolWarnings } + } + + const type = toolChoice.type + + switch (type) { + case "auto": + case "none": + case "required": + return { tools: openaiTools, toolChoice: type, toolWarnings } + case "tool": + return { + tools: openaiTools, + toolChoice: + toolChoice.toolName === "code_interpreter" || + toolChoice.toolName === "file_search" || + toolChoice.toolName === "image_generation" || + toolChoice.toolName === "web_search_preview" || + toolChoice.toolName === "web_search" + ? { type: toolChoice.toolName } + : { type: "function", name: toolChoice.toolName }, + toolWarnings, + } + default: { + const _exhaustiveCheck: never = type + throw new UnsupportedFunctionalityError({ + functionality: `tool choice type: ${_exhaustiveCheck}`, + }) + } + } +} diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-settings.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-settings.ts new file mode 100644 index 00000000000..76c97346fa2 --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-settings.ts @@ -0,0 +1 @@ +export type OpenAIResponsesModelId = string diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/code-interpreter.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/code-interpreter.ts new file mode 100644 index 00000000000..2bb4bce778d --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/code-interpreter.ts @@ -0,0 +1,88 @@ +import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils" +import { z } from "zod/v4" + +export const codeInterpreterInputSchema = z.object({ + code: z.string().nullish(), + containerId: z.string(), +}) + +export const codeInterpreterOutputSchema = z.object({ + outputs: z + .array( + z.discriminatedUnion("type", [ + z.object({ type: z.literal("logs"), logs: z.string() }), + z.object({ type: z.literal("image"), url: z.string() }), + ]), + ) + .nullish(), +}) + +export const codeInterpreterArgsSchema = z.object({ + container: z + .union([ + z.string(), + z.object({ + fileIds: z.array(z.string()).optional(), + }), + ]) + .optional(), +}) + +type CodeInterpreterArgs = { + /** + * The code interpreter container. + * Can be a container ID + * or an object that specifies uploaded file IDs to make available to your code. + */ + container?: string | { fileIds?: string[] } +} + +export const codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOutputSchema< + { + /** + * The code to run, or null if not available. + */ + code?: string | null + + /** + * The ID of the container used to run the code. + */ + containerId: string + }, + { + /** + * The outputs generated by the code interpreter, such as logs or images. + * Can be null if no outputs are available. + */ + outputs?: Array< + | { + type: "logs" + + /** + * The logs output from the code interpreter. + */ + logs: string + } + | { + type: "image" + + /** + * The URL of the image output from the code interpreter. + */ + url: string + } + > | null + }, + CodeInterpreterArgs +>({ + id: "openai.code_interpreter", + name: "code_interpreter", + inputSchema: codeInterpreterInputSchema, + outputSchema: codeInterpreterOutputSchema, +}) + +export const codeInterpreter = ( + args: CodeInterpreterArgs = {}, // default +) => { + return codeInterpreterToolFactory(args) +} diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/file-search.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/file-search.ts new file mode 100644 index 00000000000..1fccddaf63b --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/file-search.ts @@ -0,0 +1,128 @@ +import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils" +import type { + OpenAIResponsesFileSearchToolComparisonFilter, + OpenAIResponsesFileSearchToolCompoundFilter, +} from "../openai-responses-api-types" +import { z } from "zod/v4" + +const comparisonFilterSchema = z.object({ + key: z.string(), + type: z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]), + value: z.union([z.string(), z.number(), z.boolean()]), +}) + +const compoundFilterSchema: z.ZodType = z.object({ + type: z.enum(["and", "or"]), + filters: z.array(z.union([comparisonFilterSchema, z.lazy(() => compoundFilterSchema)])), +}) + +export const fileSearchArgsSchema = z.object({ + vectorStoreIds: z.array(z.string()), + maxNumResults: z.number().optional(), + ranking: z + .object({ + ranker: z.string().optional(), + scoreThreshold: z.number().optional(), + }) + .optional(), + filters: z.union([comparisonFilterSchema, compoundFilterSchema]).optional(), +}) + +export const fileSearchOutputSchema = z.object({ + queries: z.array(z.string()), + results: z + .array( + z.object({ + attributes: z.record(z.string(), z.unknown()), + fileId: z.string(), + filename: z.string(), + score: z.number(), + text: z.string(), + }), + ) + .nullable(), +}) + +export const fileSearch = createProviderDefinedToolFactoryWithOutputSchema< + {}, + { + /** + * The search query to execute. + */ + queries: string[] + + /** + * The results of the file search tool call. + */ + results: + | null + | { + /** + * Set of 16 key-value pairs that can be attached to an object. + * This can be useful for storing additional information about the object + * in a structured format, and querying for objects via API or the dashboard. + * Keys are strings with a maximum length of 64 characters. + * Values are strings with a maximum length of 512 characters, booleans, or numbers. + */ + attributes: Record + + /** + * The unique ID of the file. + */ + fileId: string + + /** + * The name of the file. + */ + filename: string + + /** + * The relevance score of the file - a value between 0 and 1. + */ + score: number + + /** + * The text that was retrieved from the file. + */ + text: string + }[] + }, + { + /** + * List of vector store IDs to search through. + */ + vectorStoreIds: string[] + + /** + * Maximum number of search results to return. Defaults to 10. + */ + maxNumResults?: number + + /** + * Ranking options for the search. + */ + ranking?: { + /** + * The ranker to use for the file search. + */ + ranker?: string + + /** + * The score threshold for the file search, a number between 0 and 1. + * Numbers closer to 1 will attempt to return only the most relevant results, + * but may return fewer results. + */ + scoreThreshold?: number + } + + /** + * A filter to apply. + */ + filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter + } +>({ + id: "openai.file_search", + name: "file_search", + inputSchema: z.object({}), + outputSchema: fileSearchOutputSchema, +}) diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/image-generation.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/image-generation.ts new file mode 100644 index 00000000000..7367a4802b7 --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/image-generation.ts @@ -0,0 +1,115 @@ +import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils" +import { z } from "zod/v4" + +export const imageGenerationArgsSchema = z + .object({ + background: z.enum(["auto", "opaque", "transparent"]).optional(), + inputFidelity: z.enum(["low", "high"]).optional(), + inputImageMask: z + .object({ + fileId: z.string().optional(), + imageUrl: z.string().optional(), + }) + .optional(), + model: z.string().optional(), + moderation: z.enum(["auto"]).optional(), + outputCompression: z.number().int().min(0).max(100).optional(), + outputFormat: z.enum(["png", "jpeg", "webp"]).optional(), + partialImages: z.number().int().min(0).max(3).optional(), + quality: z.enum(["auto", "low", "medium", "high"]).optional(), + size: z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional(), + }) + .strict() + +export const imageGenerationOutputSchema = z.object({ + result: z.string(), +}) + +type ImageGenerationArgs = { + /** + * Background type for the generated image. Default is 'auto'. + */ + background?: "auto" | "opaque" | "transparent" + + /** + * Input fidelity for the generated image. Default is 'low'. + */ + inputFidelity?: "low" | "high" + + /** + * Optional mask for inpainting. + * Contains image_url (string, optional) and file_id (string, optional). + */ + inputImageMask?: { + /** + * File ID for the mask image. + */ + fileId?: string + + /** + * Base64-encoded mask image. + */ + imageUrl?: string + } + + /** + * The image generation model to use. Default: gpt-image-1. + */ + model?: string + + /** + * Moderation level for the generated image. Default: auto. + */ + moderation?: "auto" + + /** + * Compression level for the output image. Default: 100. + */ + outputCompression?: number + + /** + * The output format of the generated image. One of png, webp, or jpeg. + * Default: png + */ + outputFormat?: "png" | "jpeg" | "webp" + + /** + * Number of partial images to generate in streaming mode, from 0 (default value) to 3. + */ + partialImages?: number + + /** + * The quality of the generated image. + * One of low, medium, high, or auto. Default: auto. + */ + quality?: "auto" | "low" | "medium" | "high" + + /** + * The size of the generated image. + * One of 1024x1024, 1024x1536, 1536x1024, or auto. + * Default: auto. + */ + size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024" +} + +const imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema< + {}, + { + /** + * The generated image encoded in base64. + */ + result: string + }, + ImageGenerationArgs +>({ + id: "openai.image_generation", + name: "image_generation", + inputSchema: z.object({}), + outputSchema: imageGenerationOutputSchema, +}) + +export const imageGeneration = ( + args: ImageGenerationArgs = {}, // default +) => { + return imageGenerationToolFactory(args) +} diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/local-shell.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/local-shell.ts new file mode 100644 index 00000000000..4ceca0d6cd8 --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/local-shell.ts @@ -0,0 +1,65 @@ +import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils" +import { z } from "zod/v4" + +export const localShellInputSchema = z.object({ + action: z.object({ + type: z.literal("exec"), + command: z.array(z.string()), + timeoutMs: z.number().optional(), + user: z.string().optional(), + workingDirectory: z.string().optional(), + env: z.record(z.string(), z.string()).optional(), + }), +}) + +export const localShellOutputSchema = z.object({ + output: z.string(), +}) + +export const localShell = createProviderDefinedToolFactoryWithOutputSchema< + { + /** + * Execute a shell command on the server. + */ + action: { + type: "exec" + + /** + * The command to run. + */ + command: string[] + + /** + * Optional timeout in milliseconds for the command. + */ + timeoutMs?: number + + /** + * Optional user to run the command as. + */ + user?: string + + /** + * Optional working directory to run the command in. + */ + workingDirectory?: string + + /** + * Environment variables to set for the command. + */ + env?: Record + } + }, + { + /** + * The output of local shell tool call. + */ + output: string + }, + {} +>({ + id: "openai.local_shell", + name: "local_shell", + inputSchema: localShellInputSchema, + outputSchema: localShellOutputSchema, +}) diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/web-search-preview.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/web-search-preview.ts new file mode 100644 index 00000000000..69ea65ef0e5 --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/web-search-preview.ts @@ -0,0 +1,104 @@ +import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils" +import { z } from "zod/v4" + +// Args validation schema +export const webSearchPreviewArgsSchema = z.object({ + /** + * Search context size to use for the web search. + * - high: Most comprehensive context, highest cost, slower response + * - medium: Balanced context, cost, and latency (default) + * - low: Least context, lowest cost, fastest response + */ + searchContextSize: z.enum(["low", "medium", "high"]).optional(), + + /** + * User location information to provide geographically relevant search results. + */ + userLocation: z + .object({ + /** + * Type of location (always 'approximate') + */ + type: z.literal("approximate"), + /** + * Two-letter ISO country code (e.g., 'US', 'GB') + */ + country: z.string().optional(), + /** + * City name (free text, e.g., 'Minneapolis') + */ + city: z.string().optional(), + /** + * Region name (free text, e.g., 'Minnesota') + */ + region: z.string().optional(), + /** + * IANA timezone (e.g., 'America/Chicago') + */ + timezone: z.string().optional(), + }) + .optional(), +}) + +export const webSearchPreview = createProviderDefinedToolFactory< + { + // Web search doesn't take input parameters - it's controlled by the prompt + }, + { + /** + * Search context size to use for the web search. + * - high: Most comprehensive context, highest cost, slower response + * - medium: Balanced context, cost, and latency (default) + * - low: Least context, lowest cost, fastest response + */ + searchContextSize?: "low" | "medium" | "high" + + /** + * User location information to provide geographically relevant search results. + */ + userLocation?: { + /** + * Type of location (always 'approximate') + */ + type: "approximate" + /** + * Two-letter ISO country code (e.g., 'US', 'GB') + */ + country?: string + /** + * City name (free text, e.g., 'Minneapolis') + */ + city?: string + /** + * Region name (free text, e.g., 'Minnesota') + */ + region?: string + /** + * IANA timezone (e.g., 'America/Chicago') + */ + timezone?: string + } + } +>({ + id: "openai.web_search_preview", + name: "web_search_preview", + inputSchema: z.object({ + action: z + .discriminatedUnion("type", [ + z.object({ + type: z.literal("search"), + query: z.string().nullish(), + }), + z.object({ + type: z.literal("open_page"), + url: z.string(), + }), + z.object({ + type: z.literal("find"), + url: z.string(), + pattern: z.string(), + }), + ]) + .nullish(), + }), +}) diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/web-search.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/web-search.ts new file mode 100644 index 00000000000..89622ad3cea --- /dev/null +++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/tool/web-search.ts @@ -0,0 +1,103 @@ +import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils" +import { z } from "zod/v4" + +export const webSearchArgsSchema = z.object({ + filters: z + .object({ + allowedDomains: z.array(z.string()).optional(), + }) + .optional(), + + searchContextSize: z.enum(["low", "medium", "high"]).optional(), + + userLocation: z + .object({ + type: z.literal("approximate"), + country: z.string().optional(), + city: z.string().optional(), + region: z.string().optional(), + timezone: z.string().optional(), + }) + .optional(), +}) + +export const webSearchToolFactory = createProviderDefinedToolFactory< + { + // Web search doesn't take input parameters - it's controlled by the prompt + }, + { + /** + * Filters for the search. + */ + filters?: { + /** + * Allowed domains for the search. + * If not provided, all domains are allowed. + * Subdomains of the provided domains are allowed as well. + */ + allowedDomains?: string[] + } + + /** + * Search context size to use for the web search. + * - high: Most comprehensive context, highest cost, slower response + * - medium: Balanced context, cost, and latency (default) + * - low: Least context, lowest cost, fastest response + */ + searchContextSize?: "low" | "medium" | "high" + + /** + * User location information to provide geographically relevant search results. + */ + userLocation?: { + /** + * Type of location (always 'approximate') + */ + type: "approximate" + /** + * Two-letter ISO country code (e.g., 'US', 'GB') + */ + country?: string + /** + * City name (free text, e.g., 'Minneapolis') + */ + city?: string + /** + * Region name (free text, e.g., 'Minnesota') + */ + region?: string + /** + * IANA timezone (e.g., 'America/Chicago') + */ + timezone?: string + } + } +>({ + id: "openai.web_search", + name: "web_search", + inputSchema: z.object({ + action: z + .discriminatedUnion("type", [ + z.object({ + type: z.literal("search"), + query: z.string().nullish(), + }), + z.object({ + type: z.literal("open_page"), + url: z.string(), + }), + z.object({ + type: z.literal("find"), + url: z.string(), + pattern: z.string(), + }), + ]) + .nullish(), + }), +}) + +export const webSearch = ( + args: Parameters[0] = {}, // default +) => { + return webSearchToolFactory(args) +} diff --git a/packages/plugin/package.json b/packages/plugin/package.json index 409056da177..6a9f7c4b525 100644 --- a/packages/plugin/package.json +++ b/packages/plugin/package.json @@ -24,4 +24,4 @@ "typescript": "catalog:", "@typescript/native-preview": "catalog:" } -} \ No newline at end of file +} diff --git a/packages/sdk/js/package.json b/packages/sdk/js/package.json index bbc122db21a..18096746c5f 100644 --- a/packages/sdk/js/package.json +++ b/packages/sdk/js/package.json @@ -26,4 +26,4 @@ "publishConfig": { "directory": "dist" } -} \ No newline at end of file +}