diff --git a/packages/types/src/providers/gemini.ts b/packages/types/src/providers/gemini.ts index 61048f50f0f..3f69dfdb59c 100644 --- a/packages/types/src/providers/gemini.ts +++ b/packages/types/src/providers/gemini.ts @@ -15,6 +15,7 @@ export const geminiModels = { supportsPromptCache: true, supportsReasoningEffort: ["low", "high"], reasoningEffort: "low", + includedTools: ["write_file", "edit_file"], supportsTemperature: true, defaultTemperature: 1, inputPrice: 4.0, @@ -41,6 +42,7 @@ export const geminiModels = { supportsPromptCache: true, supportsReasoningEffort: ["minimal", "low", "medium", "high"], reasoningEffort: "medium", + includedTools: ["write_file", "edit_file"], supportsTemperature: true, defaultTemperature: 1, inputPrice: 0.3, @@ -56,6 +58,7 @@ export const geminiModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 2.5, // This is the pricing for prompts above 200k tokens. outputPrice: 15, cacheReadsPrice: 0.625, @@ -85,6 +88,7 @@ export const geminiModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 2.5, // This is the pricing for prompts above 200k tokens. outputPrice: 15, cacheReadsPrice: 0.625, @@ -113,6 +117,7 @@ export const geminiModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 2.5, // This is the pricing for prompts above 200k tokens. outputPrice: 15, cacheReadsPrice: 0.625, @@ -139,6 +144,7 @@ export const geminiModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 2.5, // This is the pricing for prompts above 200k tokens. outputPrice: 15, cacheReadsPrice: 0.625, @@ -169,6 +175,7 @@ export const geminiModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.3, outputPrice: 2.5, cacheReadsPrice: 0.075, @@ -183,6 +190,7 @@ export const geminiModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.3, outputPrice: 2.5, cacheReadsPrice: 0.075, @@ -197,6 +205,7 @@ export const geminiModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.3, outputPrice: 2.5, cacheReadsPrice: 0.075, @@ -213,6 +222,7 @@ export const geminiModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.1, outputPrice: 0.4, cacheReadsPrice: 0.025, @@ -227,6 +237,7 @@ export const geminiModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.1, outputPrice: 0.4, cacheReadsPrice: 0.025, diff --git a/packages/types/src/providers/vertex.ts b/packages/types/src/providers/vertex.ts index 373d180cd6c..db010b6c682 100644 --- a/packages/types/src/providers/vertex.ts +++ b/packages/types/src/providers/vertex.ts @@ -15,6 +15,7 @@ export const vertexModels = { supportsPromptCache: true, supportsReasoningEffort: ["low", "high"], reasoningEffort: "low", + includedTools: ["write_file", "edit_file"], supportsTemperature: true, defaultTemperature: 1, inputPrice: 4.0, @@ -41,6 +42,7 @@ export const vertexModels = { supportsPromptCache: true, supportsReasoningEffort: ["minimal", "low", "medium", "high"], reasoningEffort: "medium", + includedTools: ["write_file", "edit_file"], supportsTemperature: true, defaultTemperature: 1, inputPrice: 0.3, @@ -55,6 +57,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.15, outputPrice: 3.5, maxThinkingTokens: 24_576, @@ -68,6 +71,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.15, outputPrice: 0.6, }, @@ -78,6 +82,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.3, outputPrice: 2.5, cacheReadsPrice: 0.075, @@ -92,6 +97,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: false, + includedTools: ["write_file", "edit_file"], inputPrice: 0.15, outputPrice: 3.5, maxThinkingTokens: 24_576, @@ -105,6 +111,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: false, + includedTools: ["write_file", "edit_file"], inputPrice: 0.15, outputPrice: 0.6, }, @@ -115,6 +122,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 2.5, outputPrice: 15, }, @@ -125,6 +133,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 2.5, outputPrice: 15, }, @@ -135,6 +144,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 2.5, outputPrice: 15, maxThinkingTokens: 32_768, @@ -147,6 +157,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 2.5, outputPrice: 15, maxThinkingTokens: 32_768, @@ -174,6 +185,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: false, + includedTools: ["write_file", "edit_file"], inputPrice: 0, outputPrice: 0, }, @@ -184,6 +196,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: false, + includedTools: ["write_file", "edit_file"], inputPrice: 0, outputPrice: 0, }, @@ -194,6 +207,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.15, outputPrice: 0.6, }, @@ -204,6 +218,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: false, + includedTools: ["write_file", "edit_file"], inputPrice: 0.075, outputPrice: 0.3, }, @@ -214,6 +229,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: false, + includedTools: ["write_file", "edit_file"], inputPrice: 0, outputPrice: 0, }, @@ -224,6 +240,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.075, outputPrice: 0.3, }, @@ -234,6 +251,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: false, + includedTools: ["write_file", "edit_file"], inputPrice: 1.25, outputPrice: 5, }, @@ -381,6 +399,7 @@ export const vertexModels = { supportsNativeTools: true, defaultToolProtocol: "native", supportsPromptCache: true, + includedTools: ["write_file", "edit_file"], inputPrice: 0.1, outputPrice: 0.4, cacheReadsPrice: 0.025, diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index 20a0f58b994..4f4cc6184c7 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -35,6 +35,7 @@ import { BaseProvider } from "./base-provider" import type { ApiHandlerCreateMessageMetadata, SingleCompletionHandler } from "../index" import { handleOpenAIError } from "./utils/openai-error-handler" import { generateImageWithProvider, ImageGenerationResult } from "./utils/image-generation" +import { applyRouterToolPreferences } from "./utils/router-tool-preferences" // Add custom interface for OpenRouter params. type OpenRouterChatCompletionParams = OpenAI.Chat.ChatCompletionCreateParams & { @@ -528,15 +529,8 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH info = this.endpoints[this.options.openRouterSpecificProvider] } - // For OpenAI models via OpenRouter, exclude write_to_file and apply_diff, and include apply_patch - // This matches the behavior of the native OpenAI provider - if (id.startsWith("openai/")) { - info = { - ...info, - excludedTools: [...new Set([...(info.excludedTools || []), "apply_diff", "write_to_file"])], - includedTools: [...new Set([...(info.includedTools || []), "apply_patch"])], - } - } + // Apply tool preferences for models accessed through routers (OpenAI, Gemini) + info = applyRouterToolPreferences(id, info) const isDeepSeekR1 = id.startsWith("deepseek/deepseek-r1") || id === "perplexity/sonar-reasoning" diff --git a/src/api/providers/requesty.ts b/src/api/providers/requesty.ts index 3668265669a..43b32793183 100644 --- a/src/api/providers/requesty.ts +++ b/src/api/providers/requesty.ts @@ -18,6 +18,7 @@ import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" import { toRequestyServiceUrl } from "../../shared/utils/requesty" import { handleOpenAIError } from "./utils/openai-error-handler" +import { applyRouterToolPreferences } from "./utils/router-tool-preferences" // Requesty usage includes an extra field for Anthropic use cases. // Safely cast the prompt token details section to the appropriate structure. @@ -78,7 +79,10 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan override getModel() { const id = this.options.requestyModelId ?? requestyDefaultModelId - const info = this.models[id] ?? requestyDefaultModelInfo + let info = this.models[id] ?? requestyDefaultModelInfo + + // Apply tool preferences for models accessed through routers (OpenAI, Gemini) + info = applyRouterToolPreferences(id, info) const params = getModelParams({ format: "anthropic", diff --git a/src/api/providers/utils/router-tool-preferences.ts b/src/api/providers/utils/router-tool-preferences.ts new file mode 100644 index 00000000000..40f8518e3c7 --- /dev/null +++ b/src/api/providers/utils/router-tool-preferences.ts @@ -0,0 +1,40 @@ +import type { ModelInfo } from "@roo-code/types" + +/** + * Apply tool preferences for models accessed through dynamic routers (OpenRouter, Requesty). + * + * Different model families perform better with specific tools: + * - OpenAI models: Better results with apply_patch instead of apply_diff/write_to_file + * - Gemini models: Higher quality results with write_file and edit_file + * + * This function modifies the model info to apply these preferences consistently + * across all dynamic router providers. + * + * @param modelId The model identifier (e.g., "openai/gpt-4", "google/gemini-2.5-pro") + * @param info The original model info object + * @returns A new model info object with tool preferences applied + */ +export function applyRouterToolPreferences(modelId: string, info: ModelInfo): ModelInfo { + let result = info + + // For OpenAI models via routers, exclude write_to_file and apply_diff, and include apply_patch + // This matches the behavior of the native OpenAI provider + if (modelId.includes("openai")) { + result = { + ...result, + excludedTools: [...new Set([...(result.excludedTools || []), "apply_diff", "write_to_file"])], + includedTools: [...new Set([...(result.includedTools || []), "apply_patch"])], + } + } + + // For Gemini models via routers, include write_file and edit_file + // This matches the behavior of the native Gemini provider + if (modelId.includes("gemini")) { + result = { + ...result, + includedTools: [...new Set([...(result.includedTools || []), "write_file", "edit_file"])], + } + } + + return result +}