diff --git a/packages/types/src/providers/chutes.ts b/packages/types/src/providers/chutes.ts index c90e0445705..237cb76e804 100644 --- a/packages/types/src/providers/chutes.ts +++ b/packages/types/src/providers/chutes.ts @@ -34,6 +34,7 @@ export type ChutesModelId = | "zai-org/GLM-4.5-FP8" | "zai-org/GLM-4.5-turbo" | "zai-org/GLM-4.6-FP8" + | "zai-org/GLM-4.6-turbo" | "moonshotai/Kimi-K2-Instruct-75k" | "moonshotai/Kimi-K2-Instruct-0905" | "Qwen/Qwen3-235B-A22B-Thinking-2507" @@ -326,6 +327,15 @@ export const chutesModels = { description: "GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios.", }, + "zai-org/GLM-4.6-turbo": { + maxTokens: 202752, // From Chutes /v1/models: max_output_length + contextWindow: 202752, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 1.15, + outputPrice: 3.25, + description: "GLM-4.6-turbo model with 200K-token context window, optimized for fast inference.", + }, "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8": { maxTokens: 32768, contextWindow: 262144,