Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions packages/types/src/providers/gemini.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ export const geminiModels = {
supportsPromptCache: true,
supportsReasoningEffort: ["low", "high"],
reasoningEffort: "low",
includedTools: ["write_file", "edit_file"],
supportsTemperature: true,
defaultTemperature: 1,
inputPrice: 4.0,
Expand All @@ -41,6 +42,7 @@ export const geminiModels = {
supportsPromptCache: true,
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
reasoningEffort: "medium",
includedTools: ["write_file", "edit_file"],
supportsTemperature: true,
defaultTemperature: 1,
inputPrice: 0.3,
Expand All @@ -56,6 +58,7 @@ export const geminiModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
outputPrice: 15,
cacheReadsPrice: 0.625,
Expand Down Expand Up @@ -85,6 +88,7 @@ export const geminiModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
outputPrice: 15,
cacheReadsPrice: 0.625,
Expand Down Expand Up @@ -113,6 +117,7 @@ export const geminiModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
outputPrice: 15,
cacheReadsPrice: 0.625,
Expand All @@ -139,6 +144,7 @@ export const geminiModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
outputPrice: 15,
cacheReadsPrice: 0.625,
Expand Down Expand Up @@ -169,6 +175,7 @@ export const geminiModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.3,
outputPrice: 2.5,
cacheReadsPrice: 0.075,
Expand All @@ -183,6 +190,7 @@ export const geminiModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.3,
outputPrice: 2.5,
cacheReadsPrice: 0.075,
Expand All @@ -197,6 +205,7 @@ export const geminiModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.3,
outputPrice: 2.5,
cacheReadsPrice: 0.075,
Expand All @@ -213,6 +222,7 @@ export const geminiModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.1,
outputPrice: 0.4,
cacheReadsPrice: 0.025,
Expand All @@ -227,6 +237,7 @@ export const geminiModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.1,
outputPrice: 0.4,
cacheReadsPrice: 0.025,
Expand Down
19 changes: 19 additions & 0 deletions packages/types/src/providers/vertex.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ export const vertexModels = {
supportsPromptCache: true,
supportsReasoningEffort: ["low", "high"],
reasoningEffort: "low",
includedTools: ["write_file", "edit_file"],
supportsTemperature: true,
defaultTemperature: 1,
inputPrice: 4.0,
Expand All @@ -41,6 +42,7 @@ export const vertexModels = {
supportsPromptCache: true,
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
reasoningEffort: "medium",
includedTools: ["write_file", "edit_file"],
supportsTemperature: true,
defaultTemperature: 1,
inputPrice: 0.3,
Expand All @@ -55,6 +57,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.15,
outputPrice: 3.5,
maxThinkingTokens: 24_576,
Expand All @@ -68,6 +71,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.15,
outputPrice: 0.6,
},
Expand All @@ -78,6 +82,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.3,
outputPrice: 2.5,
cacheReadsPrice: 0.075,
Expand All @@ -92,6 +97,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: false,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.15,
outputPrice: 3.5,
maxThinkingTokens: 24_576,
Expand All @@ -105,6 +111,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: false,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.15,
outputPrice: 0.6,
},
Expand All @@ -115,6 +122,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 2.5,
outputPrice: 15,
},
Expand All @@ -125,6 +133,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 2.5,
outputPrice: 15,
},
Expand All @@ -135,6 +144,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 2.5,
outputPrice: 15,
maxThinkingTokens: 32_768,
Expand All @@ -147,6 +157,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 2.5,
outputPrice: 15,
maxThinkingTokens: 32_768,
Expand Down Expand Up @@ -174,6 +185,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: false,
includedTools: ["write_file", "edit_file"],
inputPrice: 0,
outputPrice: 0,
},
Expand All @@ -184,6 +196,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: false,
includedTools: ["write_file", "edit_file"],
inputPrice: 0,
outputPrice: 0,
},
Expand All @@ -194,6 +207,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.15,
outputPrice: 0.6,
},
Expand All @@ -204,6 +218,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: false,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.075,
outputPrice: 0.3,
},
Expand All @@ -214,6 +229,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: false,
includedTools: ["write_file", "edit_file"],
inputPrice: 0,
outputPrice: 0,
},
Expand All @@ -224,6 +240,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.075,
outputPrice: 0.3,
},
Expand All @@ -234,6 +251,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: false,
includedTools: ["write_file", "edit_file"],
inputPrice: 1.25,
outputPrice: 5,
},
Expand Down Expand Up @@ -381,6 +399,7 @@ export const vertexModels = {
supportsNativeTools: true,
defaultToolProtocol: "native",
supportsPromptCache: true,
includedTools: ["write_file", "edit_file"],
inputPrice: 0.1,
outputPrice: 0.4,
cacheReadsPrice: 0.025,
Expand Down
12 changes: 3 additions & 9 deletions src/api/providers/openrouter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import { BaseProvider } from "./base-provider"
import type { ApiHandlerCreateMessageMetadata, SingleCompletionHandler } from "../index"
import { handleOpenAIError } from "./utils/openai-error-handler"
import { generateImageWithProvider, ImageGenerationResult } from "./utils/image-generation"
import { applyRouterToolPreferences } from "./utils/router-tool-preferences"

// Add custom interface for OpenRouter params.
type OpenRouterChatCompletionParams = OpenAI.Chat.ChatCompletionCreateParams & {
Expand Down Expand Up @@ -528,15 +529,8 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
info = this.endpoints[this.options.openRouterSpecificProvider]
}

// For OpenAI models via OpenRouter, exclude write_to_file and apply_diff, and include apply_patch
// This matches the behavior of the native OpenAI provider
if (id.startsWith("openai/")) {
info = {
...info,
excludedTools: [...new Set([...(info.excludedTools || []), "apply_diff", "write_to_file"])],
includedTools: [...new Set([...(info.includedTools || []), "apply_patch"])],
}
}
// Apply tool preferences for models accessed through routers (OpenAI, Gemini)
info = applyRouterToolPreferences(id, info)

const isDeepSeekR1 = id.startsWith("deepseek/deepseek-r1") || id === "perplexity/sonar-reasoning"

Expand Down
6 changes: 5 additions & 1 deletion src/api/providers/requesty.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import { BaseProvider } from "./base-provider"
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
import { toRequestyServiceUrl } from "../../shared/utils/requesty"
import { handleOpenAIError } from "./utils/openai-error-handler"
import { applyRouterToolPreferences } from "./utils/router-tool-preferences"

// Requesty usage includes an extra field for Anthropic use cases.
// Safely cast the prompt token details section to the appropriate structure.
Expand Down Expand Up @@ -78,7 +79,10 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan

override getModel() {
const id = this.options.requestyModelId ?? requestyDefaultModelId
const info = this.models[id] ?? requestyDefaultModelInfo
let info = this.models[id] ?? requestyDefaultModelInfo

// Apply tool preferences for models accessed through routers (OpenAI, Gemini)
info = applyRouterToolPreferences(id, info)

const params = getModelParams({
format: "anthropic",
Expand Down
40 changes: 40 additions & 0 deletions src/api/providers/utils/router-tool-preferences.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import type { ModelInfo } from "@roo-code/types"

/**
* Apply tool preferences for models accessed through dynamic routers (OpenRouter, Requesty).
*
* Different model families perform better with specific tools:
* - OpenAI models: Better results with apply_patch instead of apply_diff/write_to_file
* - Gemini models: Higher quality results with write_file and edit_file
*
* This function modifies the model info to apply these preferences consistently
* across all dynamic router providers.
*
* @param modelId The model identifier (e.g., "openai/gpt-4", "google/gemini-2.5-pro")
* @param info The original model info object
* @returns A new model info object with tool preferences applied
*/
export function applyRouterToolPreferences(modelId: string, info: ModelInfo): ModelInfo {
let result = info

// For OpenAI models via routers, exclude write_to_file and apply_diff, and include apply_patch
// This matches the behavior of the native OpenAI provider
if (modelId.includes("openai")) {
result = {
...result,
excludedTools: [...new Set([...(result.excludedTools || []), "apply_diff", "write_to_file"])],
includedTools: [...new Set([...(result.includedTools || []), "apply_patch"])],
}
}

// For Gemini models via routers, include write_file and edit_file
// This matches the behavior of the native Gemini provider
if (modelId.includes("gemini")) {
result = {
...result,
includedTools: [...new Set([...(result.includedTools || []), "write_file", "edit_file"])],
}
}

return result
}
Loading