diff --git a/packages/types/src/providers/bedrock.ts b/packages/types/src/providers/bedrock.ts index fb7abd2217..8dec37108a 100644 --- a/packages/types/src/providers/bedrock.ts +++ b/packages/types/src/providers/bedrock.ts @@ -20,6 +20,7 @@ export const bedrockModels = { supportsPromptCache: true, supportsReasoningBudget: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 15.0, cacheWritesPrice: 3.75, @@ -104,6 +105,7 @@ export const bedrockModels = { supportsPromptCache: true, supportsReasoningBudget: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 15.0, cacheWritesPrice: 3.75, @@ -119,6 +121,7 @@ export const bedrockModels = { supportsPromptCache: true, supportsReasoningBudget: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 15.0, outputPrice: 75.0, cacheWritesPrice: 18.75, @@ -134,6 +137,7 @@ export const bedrockModels = { supportsPromptCache: true, supportsReasoningBudget: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 5.0, outputPrice: 25.0, cacheWritesPrice: 6.25, @@ -149,6 +153,7 @@ export const bedrockModels = { supportsPromptCache: true, supportsReasoningBudget: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 15.0, outputPrice: 75.0, cacheWritesPrice: 18.75, @@ -164,6 +169,7 @@ export const bedrockModels = { supportsPromptCache: true, supportsReasoningBudget: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 15.0, cacheWritesPrice: 3.75, @@ -178,6 +184,7 @@ export const bedrockModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 15.0, cacheWritesPrice: 3.75, @@ -192,6 +199,7 @@ export const bedrockModels = { supportsImages: false, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.8, outputPrice: 4.0, cacheWritesPrice: 1.0, @@ -207,6 +215,7 @@ export const bedrockModels = { supportsPromptCache: true, supportsReasoningBudget: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 1.0, outputPrice: 5.0, cacheWritesPrice: 1.25, // 5m cache writes @@ -221,6 +230,7 @@ export const bedrockModels = { supportsImages: true, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 15.0, }, @@ -230,6 +240,7 @@ export const bedrockModels = { supportsImages: true, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 15.0, outputPrice: 75.0, }, @@ -239,6 +250,7 @@ export const bedrockModels = { supportsImages: true, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 15.0, }, @@ -248,6 +260,7 @@ export const bedrockModels = { supportsImages: true, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.25, outputPrice: 1.25, }, @@ -257,6 +270,7 @@ export const bedrockModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 8.0, outputPrice: 24.0, description: "Claude 2.1", @@ -267,6 +281,7 @@ export const bedrockModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 8.0, outputPrice: 24.0, description: "Claude 2.0", @@ -277,6 +292,7 @@ export const bedrockModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.8, outputPrice: 2.4, description: "Claude Instant", diff --git a/packages/types/src/providers/cerebras.ts b/packages/types/src/providers/cerebras.ts index 1ac8f63704..54b314b6db 100644 --- a/packages/types/src/providers/cerebras.ts +++ b/packages/types/src/providers/cerebras.ts @@ -12,6 +12,7 @@ export const cerebrasModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0, outputPrice: 0, description: "Highly intelligent general purpose model with up to 1,000 tokens/s", @@ -22,6 +23,7 @@ export const cerebrasModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0, outputPrice: 0, description: "Intelligent model with ~1400 tokens/s", @@ -32,6 +34,7 @@ export const cerebrasModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0, outputPrice: 0, description: "Powerful model with ~2600 tokens/s", @@ -42,6 +45,7 @@ export const cerebrasModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0, outputPrice: 0, description: "SOTA coding performance with ~2500 tokens/s", @@ -52,6 +56,7 @@ export const cerebrasModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0, outputPrice: 0, description: diff --git a/packages/types/src/providers/doubao.ts b/packages/types/src/providers/doubao.ts index c822d69f0b..c0187f7a75 100644 --- a/packages/types/src/providers/doubao.ts +++ b/packages/types/src/providers/doubao.ts @@ -9,6 +9,7 @@ export const doubaoModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.0001, // $0.0001 per million tokens (cache miss) outputPrice: 0.0004, // $0.0004 per million tokens cacheWritesPrice: 0.0001, // $0.0001 per million tokens (cache miss) @@ -21,6 +22,7 @@ export const doubaoModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.0002, // $0.0002 per million tokens outputPrice: 0.0008, // $0.0008 per million tokens cacheWritesPrice: 0.0002, // $0.0002 per million @@ -33,6 +35,7 @@ export const doubaoModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.00015, // $0.00015 per million tokens outputPrice: 0.0006, // $0.0006 per million tokens cacheWritesPrice: 0.00015, // $0.00015 per million diff --git a/packages/types/src/providers/fireworks.ts b/packages/types/src/providers/fireworks.ts index 4e4f90dc72..1918826ca1 100644 --- a/packages/types/src/providers/fireworks.ts +++ b/packages/types/src/providers/fireworks.ts @@ -24,6 +24,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.6, outputPrice: 2.5, cacheReadsPrice: 0.15, @@ -36,6 +37,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.6, outputPrice: 2.5, description: @@ -47,6 +49,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.3, outputPrice: 1.2, description: @@ -58,6 +61,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.22, outputPrice: 0.88, description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025.", @@ -68,6 +72,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.45, outputPrice: 1.8, description: "Qwen3's most agentic code model to date.", @@ -78,6 +83,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3, outputPrice: 8, description: @@ -89,6 +95,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.9, outputPrice: 0.9, description: @@ -100,6 +107,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.56, outputPrice: 1.68, description: @@ -111,6 +119,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.55, outputPrice: 2.19, description: @@ -122,6 +131,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.55, outputPrice: 2.19, description: @@ -133,6 +143,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.55, outputPrice: 2.19, description: @@ -144,6 +155,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.07, outputPrice: 0.3, description: @@ -155,6 +167,7 @@ export const fireworksModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.15, outputPrice: 0.6, description: diff --git a/packages/types/src/providers/gemini.ts b/packages/types/src/providers/gemini.ts index e7a73e6d0e..af1c4c70ee 100644 --- a/packages/types/src/providers/gemini.ts +++ b/packages/types/src/providers/gemini.ts @@ -11,6 +11,7 @@ export const geminiModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, supportsReasoningEffort: ["low", "high"], reasoningEffort: "low", @@ -37,6 +38,7 @@ export const geminiModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 2.5, // This is the pricing for prompts above 200k tokens. outputPrice: 15, @@ -65,6 +67,7 @@ export const geminiModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 2.5, // This is the pricing for prompts above 200k tokens. outputPrice: 15, @@ -92,6 +95,7 @@ export const geminiModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 2.5, // This is the pricing for prompts above 200k tokens. outputPrice: 15, @@ -117,6 +121,7 @@ export const geminiModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 2.5, // This is the pricing for prompts above 200k tokens. outputPrice: 15, @@ -146,6 +151,7 @@ export const geminiModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.3, outputPrice: 2.5, @@ -159,6 +165,7 @@ export const geminiModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.3, outputPrice: 2.5, @@ -172,6 +179,7 @@ export const geminiModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.3, outputPrice: 2.5, @@ -187,6 +195,7 @@ export const geminiModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.1, outputPrice: 0.4, @@ -200,6 +209,7 @@ export const geminiModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.1, outputPrice: 0.4, diff --git a/packages/types/src/providers/groq.ts b/packages/types/src/providers/groq.ts index 99b8ee427a..a8d8ca7c98 100644 --- a/packages/types/src/providers/groq.ts +++ b/packages/types/src/providers/groq.ts @@ -25,6 +25,7 @@ export const groqModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.05, outputPrice: 0.08, description: "Meta Llama 3.1 8B Instant model, 128K context.", @@ -35,6 +36,7 @@ export const groqModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.59, outputPrice: 0.79, description: "Meta Llama 3.3 70B Versatile model, 128K context.", @@ -45,6 +47,7 @@ export const groqModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.11, outputPrice: 0.34, description: "Meta Llama 4 Scout 17B Instruct model, 128K context.", @@ -82,6 +85,7 @@ export const groqModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.29, outputPrice: 0.59, description: "Alibaba Qwen 3 32B model, 128K context.", @@ -111,6 +115,7 @@ export const groqModels = { supportsImages: false, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.6, outputPrice: 2.5, cacheReadsPrice: 0.15, @@ -123,6 +128,7 @@ export const groqModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.15, outputPrice: 0.75, description: @@ -134,6 +140,7 @@ export const groqModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.1, outputPrice: 0.5, description: diff --git a/packages/types/src/providers/mistral.ts b/packages/types/src/providers/mistral.ts index 25546e5a42..4f12d288ee 100644 --- a/packages/types/src/providers/mistral.ts +++ b/packages/types/src/providers/mistral.ts @@ -12,6 +12,7 @@ export const mistralModels = { supportsImages: true, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 2.0, outputPrice: 5.0, }, @@ -21,6 +22,7 @@ export const mistralModels = { supportsImages: true, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.4, outputPrice: 2.0, }, @@ -30,6 +32,7 @@ export const mistralModels = { supportsImages: true, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.4, outputPrice: 2.0, }, @@ -39,6 +42,7 @@ export const mistralModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.3, outputPrice: 0.9, }, @@ -48,6 +52,7 @@ export const mistralModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 2.0, outputPrice: 6.0, }, @@ -57,6 +62,7 @@ export const mistralModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.1, outputPrice: 0.1, }, @@ -66,6 +72,7 @@ export const mistralModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.04, outputPrice: 0.04, }, @@ -75,6 +82,7 @@ export const mistralModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.2, outputPrice: 0.6, }, @@ -84,6 +92,7 @@ export const mistralModels = { supportsImages: true, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 2.0, outputPrice: 6.0, }, diff --git a/packages/types/src/providers/openai.ts b/packages/types/src/providers/openai.ts index 264c5aa40f..1e7b44a45a 100644 --- a/packages/types/src/providers/openai.ts +++ b/packages/types/src/providers/openai.ts @@ -10,6 +10,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -29,6 +30,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -51,6 +53,7 @@ export const openAiNativeModels = { maxTokens: 16_384, contextWindow: 128_000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -64,6 +67,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -86,6 +90,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -104,6 +109,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -121,6 +127,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -142,6 +149,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -163,6 +171,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -180,6 +189,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -198,6 +208,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -211,6 +222,7 @@ export const openAiNativeModels = { maxTokens: 32_768, contextWindow: 1_047_576, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -227,6 +239,7 @@ export const openAiNativeModels = { maxTokens: 32_768, contextWindow: 1_047_576, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -243,6 +256,7 @@ export const openAiNativeModels = { maxTokens: 32_768, contextWindow: 1_047_576, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -259,6 +273,7 @@ export const openAiNativeModels = { maxTokens: 100_000, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 2.0, @@ -276,6 +291,7 @@ export const openAiNativeModels = { maxTokens: 100_000, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 2.0, @@ -288,6 +304,7 @@ export const openAiNativeModels = { maxTokens: 100_000, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 2.0, @@ -300,6 +317,7 @@ export const openAiNativeModels = { maxTokens: 100_000, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 1.1, @@ -317,6 +335,7 @@ export const openAiNativeModels = { maxTokens: 100_000, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 1.1, @@ -329,6 +348,7 @@ export const openAiNativeModels = { maxTokens: 100_000, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 1.1, @@ -341,6 +361,7 @@ export const openAiNativeModels = { maxTokens: 100_000, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: false, supportsPromptCache: true, inputPrice: 1.1, @@ -354,6 +375,7 @@ export const openAiNativeModels = { maxTokens: 100_000, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: false, supportsPromptCache: true, inputPrice: 1.1, @@ -366,6 +388,7 @@ export const openAiNativeModels = { maxTokens: 100_000, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: false, supportsPromptCache: true, inputPrice: 1.1, @@ -378,6 +401,7 @@ export const openAiNativeModels = { maxTokens: 100_000, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 15, @@ -389,6 +413,7 @@ export const openAiNativeModels = { maxTokens: 32_768, contextWindow: 128_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 15, @@ -400,6 +425,7 @@ export const openAiNativeModels = { maxTokens: 65_536, contextWindow: 128_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 1.1, @@ -411,6 +437,7 @@ export const openAiNativeModels = { maxTokens: 16_384, contextWindow: 128_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 2.5, @@ -425,6 +452,7 @@ export const openAiNativeModels = { maxTokens: 16_384, contextWindow: 128_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: true, supportsPromptCache: true, inputPrice: 0.15, @@ -439,6 +467,7 @@ export const openAiNativeModels = { maxTokens: 16_384, contextWindow: 200_000, supportsNativeTools: true, + defaultToolProtocol: "native", supportsImages: false, supportsPromptCache: false, inputPrice: 1.5, @@ -453,6 +482,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -474,6 +504,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, @@ -495,6 +526,7 @@ export const openAiNativeModels = { maxTokens: 128000, contextWindow: 400000, supportsNativeTools: true, + defaultToolProtocol: "native", includedTools: ["apply_patch"], excludedTools: ["apply_diff", "write_to_file"], supportsImages: true, diff --git a/packages/types/src/providers/requesty.ts b/packages/types/src/providers/requesty.ts index d312adb397..3fd18c3139 100644 --- a/packages/types/src/providers/requesty.ts +++ b/packages/types/src/providers/requesty.ts @@ -9,6 +9,8 @@ export const requestyDefaultModelInfo: ModelInfo = { contextWindow: 200_000, supportsImages: true, supportsPromptCache: true, + supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 15.0, cacheWritesPrice: 3.75, diff --git a/packages/types/src/providers/sambanova.ts b/packages/types/src/providers/sambanova.ts index 6ca04f48e3..7c04475c60 100644 --- a/packages/types/src/providers/sambanova.ts +++ b/packages/types/src/providers/sambanova.ts @@ -22,6 +22,7 @@ export const sambaNovaModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.1, outputPrice: 0.2, description: "Meta Llama 3.1 8B Instruct model with 16K context window.", @@ -32,6 +33,7 @@ export const sambaNovaModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.6, outputPrice: 1.2, description: "Meta Llama 3.3 70B Instruct model with 128K context window.", @@ -43,6 +45,7 @@ export const sambaNovaModels = { supportsPromptCache: false, supportsReasoningBudget: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 5.0, outputPrice: 7.0, description: "DeepSeek R1 reasoning model with 32K context window.", @@ -53,6 +56,7 @@ export const sambaNovaModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 4.5, description: "DeepSeek V3 model with 32K context window.", @@ -63,6 +67,7 @@ export const sambaNovaModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 4.5, description: "DeepSeek V3.1 model with 32K context window.", @@ -82,6 +87,7 @@ export const sambaNovaModels = { supportsImages: true, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.63, outputPrice: 1.8, description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window.", @@ -101,6 +107,7 @@ export const sambaNovaModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.4, outputPrice: 0.8, description: "Alibaba Qwen 3 32B model with 8K context window.", @@ -111,6 +118,7 @@ export const sambaNovaModels = { supportsImages: false, supportsPromptCache: false, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.22, outputPrice: 0.59, description: "OpenAI gpt oss 120b model with 128k context window.", diff --git a/packages/types/src/providers/vertex.ts b/packages/types/src/providers/vertex.ts index 53b67418cf..82f317a6a5 100644 --- a/packages/types/src/providers/vertex.ts +++ b/packages/types/src/providers/vertex.ts @@ -11,6 +11,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, supportsReasoningEffort: ["low", "high"], reasoningEffort: "low", @@ -36,6 +37,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.15, outputPrice: 3.5, @@ -48,6 +50,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.15, outputPrice: 0.6, @@ -57,6 +60,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.3, outputPrice: 2.5, @@ -70,6 +74,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: false, inputPrice: 0.15, outputPrice: 3.5, @@ -82,6 +87,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: false, inputPrice: 0.15, outputPrice: 0.6, @@ -91,6 +97,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 2.5, outputPrice: 15, @@ -100,6 +107,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 2.5, outputPrice: 15, @@ -109,6 +117,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 2.5, outputPrice: 15, @@ -120,6 +129,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 2.5, outputPrice: 15, @@ -146,6 +156,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: false, inputPrice: 0, outputPrice: 0, @@ -155,6 +166,7 @@ export const vertexModels = { contextWindow: 2_097_152, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: false, inputPrice: 0, outputPrice: 0, @@ -164,6 +176,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.15, outputPrice: 0.6, @@ -173,6 +186,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: false, inputPrice: 0.075, outputPrice: 0.3, @@ -182,6 +196,7 @@ export const vertexModels = { contextWindow: 32_768, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: false, inputPrice: 0, outputPrice: 0, @@ -191,6 +206,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.075, outputPrice: 0.3, @@ -200,6 +216,7 @@ export const vertexModels = { contextWindow: 2_097_152, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: false, inputPrice: 1.25, outputPrice: 5, @@ -346,6 +363,7 @@ export const vertexModels = { contextWindow: 1_048_576, supportsImages: true, supportsNativeTools: true, + defaultToolProtocol: "native", supportsPromptCache: true, inputPrice: 0.1, outputPrice: 0.4, diff --git a/packages/types/src/providers/xai.ts b/packages/types/src/providers/xai.ts index 3c4b1e0e53..23acb487aa 100644 --- a/packages/types/src/providers/xai.ts +++ b/packages/types/src/providers/xai.ts @@ -12,6 +12,7 @@ export const xaiModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.2, outputPrice: 1.5, cacheWritesPrice: 0.02, @@ -26,6 +27,7 @@ export const xaiModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.2, outputPrice: 0.5, cacheWritesPrice: 0.05, @@ -41,6 +43,7 @@ export const xaiModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.2, outputPrice: 0.5, cacheWritesPrice: 0.05, @@ -56,6 +59,7 @@ export const xaiModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.2, outputPrice: 0.5, cacheWritesPrice: 0.05, @@ -71,6 +75,7 @@ export const xaiModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.2, outputPrice: 0.5, cacheWritesPrice: 0.05, @@ -86,6 +91,7 @@ export const xaiModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 15.0, cacheWritesPrice: 0.75, @@ -100,6 +106,7 @@ export const xaiModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.3, outputPrice: 0.5, cacheWritesPrice: 0.07, @@ -116,6 +123,7 @@ export const xaiModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, outputPrice: 15.0, cacheWritesPrice: 0.75, diff --git a/src/api/providers/__tests__/bedrock-error-handling.spec.ts b/src/api/providers/__tests__/bedrock-error-handling.spec.ts index 4f9328c2f6..2041dde457 100644 --- a/src/api/providers/__tests__/bedrock-error-handling.spec.ts +++ b/src/api/providers/__tests__/bedrock-error-handling.spec.ts @@ -1,3 +1,13 @@ +// Mock TelemetryService - must come before other imports +const mockCaptureException = vi.hoisted(() => vi.fn()) +vi.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureException: mockCaptureException, + }, + }, +})) + // Mock BedrockRuntimeClient and commands const mockSend = vi.fn() @@ -27,6 +37,7 @@ describe("AwsBedrockHandler Error Handling", () => { beforeEach(() => { vi.clearAllMocks() + mockCaptureException.mockClear() handler = new AwsBedrockHandler({ apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0", awsAccessKey: "test-access-key", diff --git a/src/api/providers/__tests__/bedrock.spec.ts b/src/api/providers/__tests__/bedrock.spec.ts index dd6febcc89..d0b5771b5c 100644 --- a/src/api/providers/__tests__/bedrock.spec.ts +++ b/src/api/providers/__tests__/bedrock.spec.ts @@ -1,3 +1,14 @@ +// Mock TelemetryService before other imports +const mockCaptureException = vi.fn() + +vi.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureException: (...args: unknown[]) => mockCaptureException(...args), + }, + }, +})) + // Mock AWS SDK credential providers vi.mock("@aws-sdk/credential-providers", () => { const mockFromIni = vi.fn().mockReturnValue({ @@ -24,8 +35,8 @@ vi.mock("@aws-sdk/client-bedrock-runtime", () => { }) import { AwsBedrockHandler } from "../bedrock" -import { ConverseStreamCommand, BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime" -import { BEDROCK_1M_CONTEXT_MODEL_IDS, BEDROCK_SERVICE_TIER_MODEL_IDS, bedrockModels } from "@roo-code/types" +import { ConverseStreamCommand, BedrockRuntimeClient, ConverseCommand } from "@aws-sdk/client-bedrock-runtime" +import { BEDROCK_1M_CONTEXT_MODEL_IDS, BEDROCK_SERVICE_TIER_MODEL_IDS, bedrockModels, ApiProviderError } from "@roo-code/types" import type { Anthropic } from "@anthropic-ai/sdk" @@ -996,4 +1007,139 @@ describe("AwsBedrockHandler", () => { }) }) }) + + describe("error telemetry", () => { + let mockSend: ReturnType + + beforeEach(() => { + mockCaptureException.mockClear() + // Get access to the mock send function from the mocked client + mockSend = vi.mocked(BedrockRuntimeClient).mock.results[0]?.value?.send + }) + + it("should capture telemetry on createMessage error", async () => { + // Create a handler with a fresh mock + const errorHandler = new AwsBedrockHandler({ + apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0", + awsAccessKey: "test-access-key", + awsSecretKey: "test-secret-key", + awsRegion: "us-east-1", + }) + + // Get the mock send from the new handler instance + const clientInstance = + vi.mocked(BedrockRuntimeClient).mock.results[vi.mocked(BedrockRuntimeClient).mock.results.length - 1] + ?.value + const mockSendFn = clientInstance?.send as ReturnType + + // Mock the send to throw an error + mockSendFn.mockRejectedValueOnce(new Error("Bedrock API error")) + + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello", + }, + ] + + const generator = errorHandler.createMessage("You are a helpful assistant", messages) + + // Consume the generator - it should throw + await expect(async () => { + for await (const _chunk of generator) { + // Should throw before or during iteration + } + }).rejects.toThrow() + + // Verify telemetry was captured + expect(mockCaptureException).toHaveBeenCalledTimes(1) + expect(mockCaptureException).toHaveBeenCalledWith( + expect.objectContaining({ + message: "Bedrock API error", + provider: "Bedrock", + modelId: "anthropic.claude-3-5-sonnet-20241022-v2:0", + operation: "createMessage", + }), + ) + + // Verify it's an ApiProviderError + const capturedError = mockCaptureException.mock.calls[0][0] + expect(capturedError).toBeInstanceOf(ApiProviderError) + }) + + it("should capture telemetry on completePrompt error", async () => { + // Create a handler with a fresh mock + const errorHandler = new AwsBedrockHandler({ + apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0", + awsAccessKey: "test-access-key", + awsSecretKey: "test-secret-key", + awsRegion: "us-east-1", + }) + + // Get the mock send from the new handler instance + const clientInstance = + vi.mocked(BedrockRuntimeClient).mock.results[vi.mocked(BedrockRuntimeClient).mock.results.length - 1] + ?.value + const mockSendFn = clientInstance?.send as ReturnType + + // Mock the send to throw an error for ConverseCommand + mockSendFn.mockRejectedValueOnce(new Error("Bedrock completion error")) + + // Call completePrompt - it should throw + await expect(errorHandler.completePrompt("Test prompt")).rejects.toThrow() + + // Verify telemetry was captured + expect(mockCaptureException).toHaveBeenCalledTimes(1) + expect(mockCaptureException).toHaveBeenCalledWith( + expect.objectContaining({ + message: "Bedrock completion error", + provider: "Bedrock", + modelId: "anthropic.claude-3-5-sonnet-20241022-v2:0", + operation: "completePrompt", + }), + ) + + // Verify it's an ApiProviderError + const capturedError = mockCaptureException.mock.calls[0][0] + expect(capturedError).toBeInstanceOf(ApiProviderError) + }) + + it("should still throw the error after capturing telemetry", async () => { + // Create a handler with a fresh mock + const errorHandler = new AwsBedrockHandler({ + apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0", + awsAccessKey: "test-access-key", + awsSecretKey: "test-secret-key", + awsRegion: "us-east-1", + }) + + // Get the mock send from the new handler instance + const clientInstance = + vi.mocked(BedrockRuntimeClient).mock.results[vi.mocked(BedrockRuntimeClient).mock.results.length - 1] + ?.value + const mockSendFn = clientInstance?.send as ReturnType + + // Mock the send to throw an error + mockSendFn.mockRejectedValueOnce(new Error("Test error for throw verification")) + + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello", + }, + ] + + const generator = errorHandler.createMessage("You are a helpful assistant", messages) + + // Verify the error is still thrown after telemetry capture + await expect(async () => { + for await (const _chunk of generator) { + // Should throw + } + }).rejects.toThrow() + + // Telemetry should have been captured before the error was thrown + expect(mockCaptureException).toHaveBeenCalled() + }) + }) }) diff --git a/src/api/providers/__tests__/gemini.spec.ts b/src/api/providers/__tests__/gemini.spec.ts index e778524c26..8c2ee87a78 100644 --- a/src/api/providers/__tests__/gemini.spec.ts +++ b/src/api/providers/__tests__/gemini.spec.ts @@ -1,8 +1,18 @@ // npx vitest run src/api/providers/__tests__/gemini.spec.ts +const mockCaptureException = vitest.fn() + +vitest.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureException: (...args: unknown[]) => mockCaptureException(...args), + }, + }, +})) + import { Anthropic } from "@anthropic-ai/sdk" -import { type ModelInfo, geminiDefaultModelId } from "@roo-code/types" +import { type ModelInfo, geminiDefaultModelId, ApiProviderError } from "@roo-code/types" import { t } from "i18next" import { GeminiHandler } from "../gemini" @@ -13,6 +23,9 @@ describe("GeminiHandler", () => { let handler: GeminiHandler beforeEach(() => { + // Reset mocks + mockCaptureException.mockClear() + // Create mock functions const mockGenerateContentStream = vitest.fn() const mockGenerateContent = vitest.fn() @@ -229,4 +242,82 @@ describe("GeminiHandler", () => { expect(cost).toBeUndefined() }) }) + + describe("error telemetry", () => { + const mockMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello", + }, + ] + + const systemPrompt = "You are a helpful assistant" + + it("should capture telemetry on createMessage error", async () => { + const mockError = new Error("Gemini API error") + ;(handler["client"].models.generateContentStream as any).mockRejectedValue(mockError) + + const stream = handler.createMessage(systemPrompt, mockMessages) + + await expect(async () => { + for await (const _chunk of stream) { + // Should throw before yielding any chunks + } + }).rejects.toThrow() + + // Verify telemetry was captured + expect(mockCaptureException).toHaveBeenCalledTimes(1) + expect(mockCaptureException).toHaveBeenCalledWith( + expect.objectContaining({ + message: "Gemini API error", + provider: "Gemini", + modelId: GEMINI_MODEL_NAME, + operation: "createMessage", + }), + ) + + // Verify it's an ApiProviderError + const capturedError = mockCaptureException.mock.calls[0][0] + expect(capturedError).toBeInstanceOf(ApiProviderError) + }) + + it("should capture telemetry on completePrompt error", async () => { + const mockError = new Error("Gemini completion error") + ;(handler["client"].models.generateContent as any).mockRejectedValue(mockError) + + await expect(handler.completePrompt("Test prompt")).rejects.toThrow() + + // Verify telemetry was captured + expect(mockCaptureException).toHaveBeenCalledTimes(1) + expect(mockCaptureException).toHaveBeenCalledWith( + expect.objectContaining({ + message: "Gemini completion error", + provider: "Gemini", + modelId: GEMINI_MODEL_NAME, + operation: "completePrompt", + }), + ) + + // Verify it's an ApiProviderError + const capturedError = mockCaptureException.mock.calls[0][0] + expect(capturedError).toBeInstanceOf(ApiProviderError) + }) + + it("should still throw the error after capturing telemetry", async () => { + const mockError = new Error("Gemini API error") + ;(handler["client"].models.generateContentStream as any).mockRejectedValue(mockError) + + const stream = handler.createMessage(systemPrompt, mockMessages) + + // Verify the error is still thrown + await expect(async () => { + for await (const _chunk of stream) { + // Should throw + } + }).rejects.toThrow() + + // Telemetry should have been captured before the error was thrown + expect(mockCaptureException).toHaveBeenCalled() + }) + }) }) diff --git a/src/api/providers/__tests__/mistral.spec.ts b/src/api/providers/__tests__/mistral.spec.ts index 9c1a26763c..845481fdf7 100644 --- a/src/api/providers/__tests__/mistral.spec.ts +++ b/src/api/providers/__tests__/mistral.spec.ts @@ -1,3 +1,13 @@ +// Mock TelemetryService - must come before other imports +const mockCaptureException = vi.hoisted(() => vi.fn()) +vi.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureException: mockCaptureException, + }, + }, +})) + // Mock Mistral client - must come before other imports const mockCreate = vi.fn() const mockComplete = vi.fn() @@ -59,6 +69,7 @@ describe("MistralHandler", () => { handler = new MistralHandler(mockOptions) mockCreate.mockClear() mockComplete.mockClear() + mockCaptureException.mockClear() }) describe("constructor", () => { @@ -251,11 +262,10 @@ describe("MistralHandler", () => { }, ] - it("should include tools in request when toolProtocol is native", async () => { + it("should include tools in request by default (native is default)", async () => { const metadata: ApiHandlerCreateMessageMetadata = { taskId: "test-task", tools: mockTools, - toolProtocol: "native", } const iterator = handler.createMessage(systemPrompt, messages, metadata) @@ -329,7 +339,6 @@ describe("MistralHandler", () => { const metadata: ApiHandlerCreateMessageMetadata = { taskId: "test-task", tools: mockTools, - toolProtocol: "native", } const iterator = handler.createMessage(systemPrompt, messages, metadata) @@ -393,7 +402,6 @@ describe("MistralHandler", () => { const metadata: ApiHandlerCreateMessageMetadata = { taskId: "test-task", tools: mockTools, - toolProtocol: "native", } const iterator = handler.createMessage(systemPrompt, messages, metadata) @@ -427,7 +435,6 @@ describe("MistralHandler", () => { const metadata: ApiHandlerCreateMessageMetadata = { taskId: "test-task", tools: mockTools, - toolProtocol: "native", tool_choice: "auto", // This should be ignored } diff --git a/src/api/providers/__tests__/openai-native.spec.ts b/src/api/providers/__tests__/openai-native.spec.ts index 3196316f63..2f9f0bb9d7 100644 --- a/src/api/providers/__tests__/openai-native.spec.ts +++ b/src/api/providers/__tests__/openai-native.spec.ts @@ -1,7 +1,19 @@ // npx vitest run api/providers/__tests__/openai-native.spec.ts +const mockCaptureException = vitest.fn() + +vitest.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureException: (...args: unknown[]) => mockCaptureException(...args), + }, + }, +})) + import { Anthropic } from "@anthropic-ai/sdk" +import { ApiProviderError } from "@roo-code/types" + import { OpenAiNativeHandler } from "../openai-native" import { ApiHandlerOptions } from "../../../shared/api" @@ -37,6 +49,7 @@ describe("OpenAiNativeHandler", () => { } handler = new OpenAiNativeHandler(mockOptions) mockResponsesCreate.mockClear() + mockCaptureException.mockClear() // Clear fetch mock if it exists if ((global as any).fetch) { delete (global as any).fetch @@ -208,6 +221,45 @@ describe("OpenAiNativeHandler", () => { expect(modelInfo.id).toBe("gpt-5.1-codex-max") // Default model expect(modelInfo.info).toBeDefined() }) + + it("should have defaultToolProtocol: native for all OpenAI Native models", () => { + // Test that all models have defaultToolProtocol: native + const testModels = [ + "gpt-5.1-codex-max", + "gpt-5.2", + "gpt-5.1", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-4.1", + "gpt-4.1-mini", + "gpt-4.1-nano", + "o3", + "o3-high", + "o3-low", + "o4-mini", + "o4-mini-high", + "o4-mini-low", + "o3-mini", + "o3-mini-high", + "o3-mini-low", + "o1", + "o1-preview", + "o1-mini", + "gpt-4o", + "gpt-4o-mini", + "codex-mini-latest", + ] + + for (const modelId of testModels) { + const testHandler = new OpenAiNativeHandler({ + openAiNativeApiKey: "test-api-key", + apiModelId: modelId, + }) + const modelInfo = testHandler.getModel() + expect(modelInfo.info.defaultToolProtocol).toBe("native") + } + }) }) describe("GPT-5 models", () => { @@ -897,6 +949,150 @@ describe("OpenAiNativeHandler", () => { } }) }) + + describe("error telemetry", () => { + const errorMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello", + }, + ] + + const errorSystemPrompt = "You are a helpful assistant" + + beforeEach(() => { + mockCaptureException.mockClear() + }) + + it("should capture telemetry on createMessage error", async () => { + // Mock fetch to return error + const mockFetch = vitest.fn().mockResolvedValue({ + ok: false, + status: 500, + text: async () => "Internal Server Error", + }) + global.fetch = mockFetch as any + + // Mock SDK to fail so it falls back to fetch + mockResponsesCreate.mockRejectedValue(new Error("SDK not available")) + + const stream = handler.createMessage(errorSystemPrompt, errorMessages) + + await expect(async () => { + for await (const _chunk of stream) { + // Should throw before yielding any chunks + } + }).rejects.toThrow() + + // Verify telemetry was captured + expect(mockCaptureException).toHaveBeenCalledTimes(1) + expect(mockCaptureException).toHaveBeenCalledWith( + expect.objectContaining({ + message: expect.stringContaining("OpenAI service error"), + provider: "OpenAI Native", + modelId: "gpt-4.1", + operation: "createMessage", + }), + ) + + // Verify it's an ApiProviderError + const capturedError = mockCaptureException.mock.calls[0][0] + expect(capturedError).toBeInstanceOf(ApiProviderError) + }) + + it("should capture telemetry on stream processing error", async () => { + // Mock fetch to return a stream with an error event + const mockFetch = vitest.fn().mockResolvedValue({ + ok: true, + body: new ReadableStream({ + start(controller) { + controller.enqueue( + new TextEncoder().encode( + 'data: {"type":"response.error","error":{"message":"Model overloaded"}}\n\n', + ), + ) + controller.close() + }, + }), + }) + global.fetch = mockFetch as any + + // Mock SDK to fail so it falls back to fetch + mockResponsesCreate.mockRejectedValue(new Error("SDK not available")) + + const stream = handler.createMessage(errorSystemPrompt, errorMessages) + + await expect(async () => { + for await (const _chunk of stream) { + // Should throw when encountering error event + } + }).rejects.toThrow() + + // Verify telemetry was captured (may be called multiple times due to error propagation) + expect(mockCaptureException).toHaveBeenCalled() + + // Find the call with the stream error message + const streamErrorCall = mockCaptureException.mock.calls.find((call: any[]) => + call[0]?.message?.includes("Model overloaded"), + ) + expect(streamErrorCall).toBeDefined() + expect(streamErrorCall![0]).toMatchObject({ + provider: "OpenAI Native", + modelId: "gpt-4.1", + operation: "createMessage", + }) + + // Verify it's an ApiProviderError + expect(streamErrorCall![0]).toBeInstanceOf(ApiProviderError) + }) + + it("should capture telemetry on completePrompt error", async () => { + // Mock SDK to throw an error + mockResponsesCreate.mockRejectedValue(new Error("API Error")) + + await expect(handler.completePrompt("Test prompt")).rejects.toThrow() + + // Verify telemetry was captured + expect(mockCaptureException).toHaveBeenCalledTimes(1) + expect(mockCaptureException).toHaveBeenCalledWith( + expect.objectContaining({ + message: "API Error", + provider: "OpenAI Native", + modelId: "gpt-4.1", + operation: "completePrompt", + }), + ) + + // Verify it's an ApiProviderError + const capturedError = mockCaptureException.mock.calls[0][0] + expect(capturedError).toBeInstanceOf(ApiProviderError) + }) + + it("should still throw the error after capturing telemetry", async () => { + // Mock fetch to return error + const mockFetch = vitest.fn().mockResolvedValue({ + ok: false, + status: 500, + text: async () => "Internal Server Error", + }) + global.fetch = mockFetch as any + + // Mock SDK to fail + mockResponsesCreate.mockRejectedValue(new Error("SDK not available")) + + const stream = handler.createMessage(errorSystemPrompt, errorMessages) + + // Verify the error is still thrown + await expect(async () => { + for await (const _chunk of stream) { + // Should throw + } + }).rejects.toThrow() + + // Telemetry should have been captured before the error was thrown + expect(mockCaptureException).toHaveBeenCalled() + }) + }) }) // Additional tests for GPT-5 streaming event coverage diff --git a/src/api/providers/__tests__/xai.spec.ts b/src/api/providers/__tests__/xai.spec.ts index 6bdf7fa044..119e869e6f 100644 --- a/src/api/providers/__tests__/xai.spec.ts +++ b/src/api/providers/__tests__/xai.spec.ts @@ -1,5 +1,15 @@ // npx vitest api/providers/__tests__/xai.spec.ts +// Mock TelemetryService - must come before other imports +const mockCaptureException = vitest.hoisted(() => vitest.fn()) +vitest.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureException: mockCaptureException, + }, + }, +})) + const mockCreate = vitest.fn() vitest.mock("openai", () => { @@ -25,6 +35,7 @@ describe("XAIHandler", () => { // Reset all mocks vi.clearAllMocks() mockCreate.mockClear() + mockCaptureException.mockClear() // Create handler with mock handler = new XAIHandler({}) @@ -299,7 +310,7 @@ describe("XAIHandler", () => { }, ] - it("should include tools in request when model supports native tools and tools are provided", async () => { + it("should include tools in request when model supports native tools and tools are provided (native is default)", async () => { const handlerWithTools = new XAIHandler({ apiModelId: "grok-3" }) mockCreate.mockImplementationOnce(() => { @@ -315,7 +326,6 @@ describe("XAIHandler", () => { const messageGenerator = handlerWithTools.createMessage("test prompt", [], { taskId: "test-task-id", tools: testTools, - toolProtocol: "native", }) await messageGenerator.next() @@ -350,7 +360,6 @@ describe("XAIHandler", () => { const messageGenerator = handlerWithTools.createMessage("test prompt", [], { taskId: "test-task-id", tools: testTools, - toolProtocol: "native", tool_choice: "auto", }) await messageGenerator.next() @@ -443,7 +452,6 @@ describe("XAIHandler", () => { const stream = handlerWithTools.createMessage("test prompt", [], { taskId: "test-task-id", tools: testTools, - toolProtocol: "native", }) const chunks = [] @@ -484,7 +492,6 @@ describe("XAIHandler", () => { const messageGenerator = handlerWithTools.createMessage("test prompt", [], { taskId: "test-task-id", tools: testTools, - toolProtocol: "native", parallelToolCalls: true, }) await messageGenerator.next() @@ -551,7 +558,6 @@ describe("XAIHandler", () => { const stream = handlerWithTools.createMessage("test prompt", [], { taskId: "test-task-id", tools: testTools, - toolProtocol: "native", }) const chunks = [] diff --git a/src/api/providers/bedrock.ts b/src/api/providers/bedrock.ts index 51793df218..27dce1bb9f 100644 --- a/src/api/providers/bedrock.ts +++ b/src/api/providers/bedrock.ts @@ -30,7 +30,9 @@ import { BEDROCK_GLOBAL_INFERENCE_MODEL_IDS, BEDROCK_SERVICE_TIER_MODEL_IDS, BEDROCK_SERVICE_TIER_PRICING, + ApiProviderError, } from "@roo-code/types" +import { TelemetryService } from "@roo-code/telemetry" import { ApiStream } from "../transform/stream" import { BaseProvider } from "./base-provider" @@ -197,6 +199,7 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH protected options: ProviderSettings private client: BedrockRuntimeClient private arnInfo: any + private readonly providerName = "Bedrock" constructor(options: ProviderSettings) { super() @@ -690,6 +693,11 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH // Clear timeout on error clearTimeout(timeoutId) + // Capture error in telemetry before processing + const errorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(errorMessage, this.providerName, modelConfig.id, "createMessage") + TelemetryService.instance.captureException(apiError) + // Check if this is a throttling error that should trigger retry logic const errorType = this.getErrorType(error) @@ -793,6 +801,12 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH } return "" } catch (error) { + // Capture error in telemetry + const model = this.getModel() + const telemetryErrorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(telemetryErrorMessage, this.providerName, model.id, "completePrompt") + TelemetryService.instance.captureException(apiError) + // Use the extracted error handling method for all errors const errorResult = this.handleBedrockError(error, false) // false for non-streaming context // Since we're in a non-streaming context, we know the result is a string diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts index cb247abd47..2b576c0742 100644 --- a/src/api/providers/gemini.ts +++ b/src/api/providers/gemini.ts @@ -9,7 +9,14 @@ import { } from "@google/genai" import type { JWTInput } from "google-auth-library" -import { type ModelInfo, type GeminiModelId, geminiDefaultModelId, geminiModels } from "@roo-code/types" +import { + type ModelInfo, + type GeminiModelId, + geminiDefaultModelId, + geminiModels, + ApiProviderError, +} from "@roo-code/types" +import { TelemetryService } from "@roo-code/telemetry" import type { ApiHandlerOptions } from "../../shared/api" import { safeJsonParse } from "../../shared/safeJsonParse" @@ -32,6 +39,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl private client: GoogleGenAI private lastThoughtSignature?: string private lastResponseId?: string + private readonly providerName = "Gemini" constructor({ isVertex, ...options }: GeminiHandlerOptions) { super() @@ -338,6 +346,10 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl } } } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(errorMessage, this.providerName, model, "createMessage") + TelemetryService.instance.captureException(apiError) + if (error instanceof Error) { throw new Error(t("common:errors.gemini.generate_stream", { error: error.message })) } @@ -401,9 +413,9 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl } async completePrompt(prompt: string): Promise { - try { - const { id: model, info } = this.getModel() + const { id: model, info } = this.getModel() + try { const tools: GenerateContentConfig["tools"] = [] if (this.options.enableUrlContext) { tools.push({ urlContext: {} }) @@ -445,6 +457,10 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl return text } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(errorMessage, this.providerName, model, "completePrompt") + TelemetryService.instance.captureException(apiError) + if (error instanceof Error) { throw new Error(t("common:errors.gemini.generate_complete_prompt", { error: error.message })) } diff --git a/src/api/providers/mistral.ts b/src/api/providers/mistral.ts index 96d2c33255..f5f5534db2 100644 --- a/src/api/providers/mistral.ts +++ b/src/api/providers/mistral.ts @@ -2,7 +2,14 @@ import { Anthropic } from "@anthropic-ai/sdk" import { Mistral } from "@mistralai/mistralai" import OpenAI from "openai" -import { type MistralModelId, mistralDefaultModelId, mistralModels, MISTRAL_DEFAULT_TEMPERATURE } from "@roo-code/types" +import { + type MistralModelId, + mistralDefaultModelId, + mistralModels, + MISTRAL_DEFAULT_TEMPERATURE, + ApiProviderError, +} from "@roo-code/types" +import { TelemetryService } from "@roo-code/telemetry" import { ApiHandlerOptions } from "../../shared/api" @@ -43,6 +50,7 @@ type MistralTool = { export class MistralHandler extends BaseProvider implements SingleCompletionHandler { protected options: ApiHandlerOptions private client: Mistral + private readonly providerName = "Mistral" constructor(options: ApiHandlerOptions) { super() @@ -96,7 +104,15 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand // Temporary debug log for QA // console.log("[MISTRAL DEBUG] Raw API request body:", requestOptions) - const response = await this.client.chat.stream(requestOptions) + let response + try { + response = await this.client.chat.stream(requestOptions) + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(errorMessage, this.providerName, model, "createMessage") + TelemetryService.instance.captureException(apiError) + throw new Error(`Mistral completion error: ${errorMessage}`) + } for await (const event of response) { const delta = event.data.choices[0]?.delta @@ -181,9 +197,9 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand } async completePrompt(prompt: string): Promise { - try { - const { id: model, temperature } = this.getModel() + const { id: model, temperature } = this.getModel() + try { const response = await this.client.chat.complete({ model, messages: [{ role: "user", content: prompt }], @@ -202,11 +218,10 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand return content || "" } catch (error) { - if (error instanceof Error) { - throw new Error(`Mistral completion error: ${error.message}`) - } - - throw error + const errorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(errorMessage, this.providerName, model, "completePrompt") + TelemetryService.instance.captureException(apiError) + throw new Error(`Mistral completion error: ${errorMessage}`) } } } diff --git a/src/api/providers/openai-native.ts b/src/api/providers/openai-native.ts index b5fb417ee3..762b81fc83 100644 --- a/src/api/providers/openai-native.ts +++ b/src/api/providers/openai-native.ts @@ -11,7 +11,9 @@ import { type VerbosityLevel, type ReasoningEffortExtended, type ServiceTier, + ApiProviderError, } from "@roo-code/types" +import { TelemetryService } from "@roo-code/telemetry" import type { ApiHandlerOptions } from "../../shared/api" @@ -28,6 +30,7 @@ export type OpenAiNativeModel = ReturnType export class OpenAiNativeHandler extends BaseProvider implements SingleCompletionHandler { protected options: ApiHandlerOptions private client: OpenAI + private readonly providerName = "OpenAI Native" // Resolved service tier from Responses API (actual tier used by OpenAI) private lastServiceTier: ServiceTier | undefined // Complete response output array (includes reasoning items with encrypted_content) @@ -536,6 +539,11 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio // Handle streaming response yield* this.handleStreamResponse(response.body, model) } catch (error) { + const model = this.getModel() + const errorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(errorMessage, this.providerName, model.id, "createMessage") + TelemetryService.instance.captureException(apiError) + if (error instanceof Error) { // Re-throw with the original error message if it's already formatted if (error.message.includes("Responses API")) { @@ -1013,6 +1021,10 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio // If we didn't get any content, don't throw - the API might have returned an empty response // This can happen in certain edge cases and shouldn't break the flow } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(errorMessage, this.providerName, model.id, "createMessage") + TelemetryService.instance.captureException(apiError) + if (error instanceof Error) { throw new Error(`Error processing response stream: ${error.message}`) } @@ -1339,6 +1351,11 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio return "" } catch (error) { + const errorModel = this.getModel() + const errorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(errorMessage, this.providerName, errorModel.id, "completePrompt") + TelemetryService.instance.captureException(apiError) + if (error instanceof Error) { throw new Error(`OpenAI Native completion error: ${error.message}`) } diff --git a/src/api/providers/xai.ts b/src/api/providers/xai.ts index 36c1ab17dc..a1377a1317 100644 --- a/src/api/providers/xai.ts +++ b/src/api/providers/xai.ts @@ -1,7 +1,8 @@ import { Anthropic } from "@anthropic-ai/sdk" import OpenAI from "openai" -import { type XAIModelId, xaiDefaultModelId, xaiModels } from "@roo-code/types" +import { type XAIModelId, xaiDefaultModelId, xaiModels, ApiProviderError } from "@roo-code/types" +import { TelemetryService } from "@roo-code/telemetry" import { NativeToolCallParser } from "../../core/assistant-message/NativeToolCallParser" import type { ApiHandlerOptions } from "../../shared/api" @@ -79,6 +80,9 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler try { stream = await this.client.chat.completions.create(requestOptions) } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(errorMessage, this.providerName, modelId, "createMessage") + TelemetryService.instance.captureException(apiError) throw handleOpenAIError(error, this.providerName) } @@ -158,6 +162,9 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler return response.choices[0]?.message.content || "" } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + const apiError = new ApiProviderError(errorMessage, this.providerName, modelId, "completePrompt") + TelemetryService.instance.captureException(apiError) throw handleOpenAIError(error, this.providerName) } }