From 8cc93a096a14afe921e91a06010a2768e761250f Mon Sep 17 00:00:00 2001 From: Igor Warzocha Date: Wed, 31 Dec 2025 21:17:39 +0000 Subject: [PATCH 1/6] feat: support GLM models and ZAI token metadata - Add GLM system prompt (session/prompt/glm.txt) - Update session/system.ts to route GLM models to the new prompt - Update session/index.ts to extract token usage from metadata (fixes ZAI/Anthropic token counting) --- packages/opencode/src/session/index.ts | 29 +++++++--- packages/opencode/src/session/prompt/glm.txt | 57 ++++++++++++++++++++ packages/opencode/src/session/system.ts | 12 +++-- 3 files changed, 87 insertions(+), 11 deletions(-) create mode 100644 packages/opencode/src/session/prompt/glm.txt diff --git a/packages/opencode/src/session/index.ts b/packages/opencode/src/session/index.ts index 0776590d6a9..bcf43c74874 100644 --- a/packages/opencode/src/session/index.ts +++ b/packages/opencode/src/session/index.ts @@ -397,11 +397,27 @@ export namespace Session { metadata: z.custom().optional(), }), (input) => { - const cachedInputTokens = input.usage.cachedInputTokens ?? 0 + // Get raw anthropic usage from metadata (has correct values for streaming) + const anthropicRawUsage = input.metadata?.["anthropic"]?.["usage"] as + | { + input_tokens?: number + output_tokens?: number + cache_read_input_tokens?: number + cache_creation_input_tokens?: number + } + | undefined + + // Use raw anthropic input_tokens if SDK reports 0 (streaming bug with custom endpoints) + const rawInputTokens = + input.usage.inputTokens === 0 && anthropicRawUsage?.input_tokens + ? anthropicRawUsage.input_tokens + : (input.usage.inputTokens ?? 0) + + const cachedInputTokens = input.usage.cachedInputTokens ?? anthropicRawUsage?.cache_read_input_tokens ?? 0 + const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"]) - const adjustedInputTokens = excludesCachedTokens - ? (input.usage.inputTokens ?? 0) - : (input.usage.inputTokens ?? 0) - cachedInputTokens + const adjustedInputTokens = excludesCachedTokens ? rawInputTokens : rawInputTokens - cachedInputTokens + const safe = (value: number) => { if (!Number.isFinite(value)) return 0 return value @@ -409,11 +425,12 @@ export namespace Session { const tokens = { input: safe(adjustedInputTokens), - output: safe(input.usage.outputTokens ?? 0), - reasoning: safe(input.usage?.reasoningTokens ?? 0), + output: safe((input.usage.outputTokens ?? anthropicRawUsage?.output_tokens ?? 0) as number), + reasoning: safe((input.usage?.reasoningTokens ?? 0) as number), cache: { write: safe( (input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ?? + anthropicRawUsage?.cache_creation_input_tokens ?? // @ts-expect-error input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ?? 0) as number, diff --git a/packages/opencode/src/session/prompt/glm.txt b/packages/opencode/src/session/prompt/glm.txt new file mode 100644 index 00000000000..62aab71e4cd --- /dev/null +++ b/packages/opencode/src/session/prompt/glm.txt @@ -0,0 +1,57 @@ +You are OpenCode, a powerful AI coding assistant optimized for software engineering tasks. + +Use the instructions below and the tools available to you to assist the user. + +IMPORTANT: Never generate or guess URLs unless they directly help with programming tasks. You may use URLs provided by the user. + +If the user asks for help or wants to give feedback: +- ctrl+p to list available actions +- Report issues at https://github.com/sst/opencode + +# Reasoning Approach +Think through problems systematically. Break complex tasks into logical steps before acting. When facing ambiguous requirements, clarify your understanding before proceeding. + +# Tone and Style +- No emojis unless requested +- Keep responses short and concise (CLI output) +- Use Github-flavored markdown (CommonMark, monospace font) +- Never use bash commands to communicate with the user +- NEVER create files unless necessary - prefer editing existing files + +# Professional Objectivity +Prioritize technical accuracy over validation. Focus on facts and problem-solving. Apply rigorous standards to all ideas and disagree when necessary. Objective guidance is more valuable than false agreement. + +# Security +Refuse to write or explain code that may be used maliciously. Analyze file/directory structure for purpose before working on code. + +# Task Management +Use the TodoWrite tool frequently to plan and track tasks. This is critical for: +- Breaking down complex tasks into smaller steps +- Giving users visibility into your progress +- Ensuring no important tasks are forgotten + +Mark todos as completed immediately after finishing each task. + +# Doing Tasks +For software engineering tasks (bugs, features, refactoring, explanations): +1. Understand the request and identify key components +2. Plan with TodoWrite for multi-step tasks +3. Research unfamiliar technologies with WebFetch when needed +4. Use the Task tool to explore codebase and gather context +5. Follow established patterns and conventions +6. Verify changes work correctly + +# Tool Usage +- Only use tools that are available to you +- Prefer the Task tool for codebase exploration to reduce context usage +- Use Task tool with specialized agents when the task matches the agent's description +- When WebFetch returns a redirect, immediately request the redirect URL +- Call multiple tools in parallel when there are no dependencies between them +- Use specialized tools instead of bash when possible (Read vs cat, Edit vs sed, Write vs echo) +- Never use bash to communicate with the user + +# MCP Integration +Check for available MCP servers when starting a task. Leverage them for additional context, tools, or capabilities. + +# Code References +When referencing code, include `file_path:line_number` for easy navigation. diff --git a/packages/opencode/src/session/system.ts b/packages/opencode/src/session/system.ts index f9ac12a2bbd..b8364e8482b 100644 --- a/packages/opencode/src/session/system.ts +++ b/packages/opencode/src/session/system.ts @@ -14,6 +14,7 @@ import PROMPT_GEMINI from "./prompt/gemini.txt" import PROMPT_ANTHROPIC_SPOOF from "./prompt/anthropic_spoof.txt" import PROMPT_CODEX from "./prompt/codex.txt" +import PROMPT_GLM from "./prompt/glm.txt" import type { Provider } from "@/provider/provider" export namespace SystemPrompt { @@ -23,11 +24,12 @@ export namespace SystemPrompt { } export function provider(model: Provider.Model) { - if (model.api.id.includes("gpt-5")) return [PROMPT_CODEX] - if (model.api.id.includes("gpt-") || model.api.id.includes("o1") || model.api.id.includes("o3")) - return [PROMPT_BEAST] - if (model.api.id.includes("gemini-")) return [PROMPT_GEMINI] - if (model.api.id.includes("claude")) return [PROMPT_ANTHROPIC] + const id = model.api.id.toLowerCase() + if (id.includes("glm")) return [PROMPT_GLM] + if (id.includes("gpt-5")) return [PROMPT_CODEX] + if (id.includes("gpt-") || id.includes("o1") || id.includes("o3")) return [PROMPT_BEAST] + if (id.includes("gemini-")) return [PROMPT_GEMINI] + if (id.includes("claude")) return [PROMPT_ANTHROPIC] return [PROMPT_ANTHROPIC_WITHOUT_TODO] } From c4c863cb27c7f78c009ff1e2f9cb2dc77ee4582a Mon Sep 17 00:00:00 2001 From: Igor Warzocha Date: Wed, 31 Dec 2025 21:22:33 +0000 Subject: [PATCH 2/6] feat(cli): add zai-coding-plan (GLM) to auth menu - Add zai-coding-plan to provider priority list - Add GLM hint to auth selection - Add specific prompt instructions for ZAI API key --- packages/opencode/src/cli/cmd/auth.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/packages/opencode/src/cli/cmd/auth.ts b/packages/opencode/src/cli/cmd/auth.ts index f200ec4fe06..041ab1b348e 100644 --- a/packages/opencode/src/cli/cmd/auth.ts +++ b/packages/opencode/src/cli/cmd/auth.ts @@ -271,6 +271,7 @@ export const AuthLoginCommand = cmd({ const priority: Record = { opencode: 0, anthropic: 1, + "zai-coding-plan": 1.5, "github-copilot": 2, openai: 3, google: 4, @@ -294,6 +295,7 @@ export const AuthLoginCommand = cmd({ hint: { opencode: "recommended", anthropic: "Claude Max or API key", + "zai-coding-plan": "GLM Models (Default)", }[x.id], })), ), @@ -345,6 +347,11 @@ export const AuthLoginCommand = cmd({ prompts.log.info("Create an api key at https://opencode.ai/auth") } + if (provider === "zai-coding-plan") { + prompts.log.info("Enter your GLM/ZAI API key (starts with '7a...')") + prompts.log.info("This will configure ZAI as the default provider for GLM models.") + } + if (provider === "vercel") { prompts.log.info("You can create an api key at https://vercel.link/ai-gateway-token") } From c8e143a9eae7770eb2cc8f8e617ee13532b9b725 Mon Sep 17 00:00:00 2001 From: Igor Warzocha Date: Wed, 31 Dec 2025 21:28:24 +0000 Subject: [PATCH 3/6] feat(provider): add ZAI/GLM provider definition and default endpoint - Add zai-coding-plan to CUSTOM_LOADERS in provider.ts with correct baseURL - Inject default zai-coding-plan provider definition in models.ts for CLI visibility --- packages/opencode/src/provider/models.ts | 34 +++++++++++++++++++++- packages/opencode/src/provider/provider.ts | 8 +++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/packages/opencode/src/provider/models.ts b/packages/opencode/src/provider/models.ts index 796dcb7c238..1dfeb573272 100644 --- a/packages/opencode/src/provider/models.ts +++ b/packages/opencode/src/provider/models.ts @@ -81,7 +81,39 @@ export namespace ModelsDev { const result = await file.json().catch(() => {}) if (result) return result as Record const json = await data() - return JSON.parse(json) as Record + const parsed = JSON.parse(json) as Record + + if (!parsed["zai-coding-plan"]) { + parsed["zai-coding-plan"] = { + id: "zai-coding-plan", + name: "ZAI (GLM)", + env: [], + npm: "@ai-sdk/anthropic", + models: { + "glm-4.7": { + id: "glm-4.7", + name: "GLM 4.7", + release_date: "2025-11-24", + attachment: true, + reasoning: true, + temperature: true, + tool_call: true, + interleaved: true, + cost: { + input: 0, + output: 0, + }, + limit: { + context: 200000, + output: 128000, + }, + options: {}, + }, + }, + } + } + + return parsed } export async function refresh() { diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 8ee5ab2bc97..a2ffadacfc3 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -138,6 +138,14 @@ export namespace Provider { options: {}, } }, + "zai-coding-plan": async () => { + return { + autoload: false, + options: { + baseURL: "https://api.z.ai/api/anthropic/v1", + }, + } + }, azure: async () => { return { autoload: false, From 307f6e5bef14c2cb5da91194e9184c819dd3061f Mon Sep 17 00:00:00 2001 From: Igor Warzocha Date: Wed, 31 Dec 2025 21:40:19 +0000 Subject: [PATCH 4/6] feat(prompt): add comprehensive GLM system prompt - Implement rigorous system prompt for GLM models - Add 'Ambition vs Precision' heuristic from Codex - Add detailed workflow from Beast/Anthropic - Add strict XML constraints for steerability (no emojis, no logs) - Align with agent-architect skill best practices --- packages/opencode/src/session/prompt/glm.txt | 170 +++++++++++++------ 1 file changed, 121 insertions(+), 49 deletions(-) diff --git a/packages/opencode/src/session/prompt/glm.txt b/packages/opencode/src/session/prompt/glm.txt index 62aab71e4cd..0b6c5815ce3 100644 --- a/packages/opencode/src/session/prompt/glm.txt +++ b/packages/opencode/src/session/prompt/glm.txt @@ -1,57 +1,129 @@ -You are OpenCode, a powerful AI coding assistant optimized for software engineering tasks. +# Role and Objective +You are OpenCode, a powerful AI coding assistant. Your goal is to execute software engineering tasks with rigor, precision, and honesty. -Use the instructions below and the tools available to you to assist the user. +# System Directives -IMPORTANT: Never generate or guess URLs unless they directly help with programming tasks. You may use URLs provided by the user. + +Execute with precision. Stay grounded. See it through. +Continue until the task is COMPLETELY resolved. Verify before yielding control. + -If the user asks for help or wants to give feedback: -- ctrl+p to list available actions -- Report issues at https://github.com/sst/opencode + +- **No emojis:** You **MUST NOT** use emojis unless explicitly requested. +- **No filler:** You **MUST NOT** use conversational filler (e.g., "I hope this helps"). +- **No placeholders:** You **MUST** write complete, functional code. Never leave TODOs for the user. +- **No guessing:** You **MUST** verify assumptions with `read` or `search` tools. +- **No conversational chitchat:** Output text ONLY to communicate essential info. +- **No logs:** You **MUST NOT** add `console.log` or print statements unless explicitly requested for debugging. + -# Reasoning Approach -Think through problems systematically. Break complex tasks into logical steps before acting. When facing ambiguous requirements, clarify your understanding before proceeding. +# Heuristics -# Tone and Style -- No emojis unless requested -- Keep responses short and concise (CLI output) -- Use Github-flavored markdown (CommonMark, monospace font) -- Never use bash commands to communicate with the user -- NEVER create files unless necessary - prefer editing existing files + + +- **New Features:** Be ambitious and creative. You **SHOULD** implement fully functional solutions. Demonstrate initiative. +- **Existing Code:** Be surgical. You **MUST** match existing patterns perfectly. You **MUST NOT** break unrelated code. Respect the existing style. + + -# Professional Objectivity -Prioritize technical accuracy over validation. Focus on facts and problem-solving. Apply rigorous standards to all ideas and disagree when necessary. Objective guidance is more valuable than false agreement. + + +Stay grounded in what you can verify. Check with tools before making factual claims. +If you cannot verify something, say so directly. Mark inferences explicitly: `? ASSUMPTION: [reason]`. +Prioritize technical accuracy over validating the user's beliefs. + + + +# Modularity & Architecture + +- **No Monoliths:** You **MUST** break large files into focused, single-responsibility modules. +- **Barrel Exports:** You **SHOULD** use `index.ts` to expose cleaner public APIs from directories. +- **Single Purpose:** Each component/function **MUST** do ONE thing really well. +- **Extract Early:** You **SHOULD** pull emerging patterns into shared utilities immediately. + + +# Workflow & Execution + + +1. **Deep Understanding & Investigation** + - Read the request carefully. Identify edge cases and dependencies. + - **Task Tool:** You **SHOULD** use the Task tool for broad codebase exploration. + - **Search:** Use `rg` (preferred) or `glob` to find relevant files. Read context before acting. + - **Research:** If the user mentions specific libraries or docs, you **MUST** read/search them. Do not guess APIs. + - **Verify Dependencies:** You **MUST** check `package.json` or equivalent before importing libraries. + +2. **Plan & Execute** + - Use **TodoWrite** to plan non-trivial tasks (3+ steps). + - **Thinking:** You **SHOULD** use `` tags to analyze complex logic before calling tools. + - Break tasks into small, testable steps. + - Implement incrementally. Verify each step. + - **Preamble:** Before tools, you **MUST** send a concise (1 sentence) preamble explaining your next move. + +3. **Debugging & Verification** + - **Reproduction:** For bugs, you **SHOULD** create a reproduction script/test to verify the issue before fixing. + - Fix root causes, not symptoms. + - Run tests/lints if available. + - Iterate until code runs without errors. + - **Self-Correction:** If a tool fails, analyze WHY before retrying. Do not loop blindly. + + +# Planning (TodoWrite) + + +- You **MUST** use `TodoWrite` frequently to track progress and give visibility. +- **High-Quality Plans:** Break tasks into meaningful, logical steps (e.g., "Parse Markdown", "Apply Template", "Handle Errors"). +- **Low-Quality Plans:** Avoid vague steps (e.g., "Write code", "Fix it"). +- **Status Updates:** Mark steps as `completed` IMMEDIATELY after finishing them. Do not batch completions. + + +# Tool Usage Policy + + +- **Prefer the Task tool** for codebase exploration to reduce context usage. +- Use Task tool with specialized agents when the task matches the agent's description. +- **Parallelism:** You **SHOULD** call multiple tools in parallel when there are no dependencies. Maximize efficiency. +- **Specialized Tools:** Use `Read` (not cat), `Edit` (not sed), `Write` (not echo). +- **Bash:** Use ONLY for running commands/tests. You **MUST NOT** use bash to communicate. +- **Redirects:** If WebFetch returns a redirect, automatically fetch the new URL. + + +# Core Engineering Principles + +- **DRY:** Abstract patterns immediately. Create reusable components. +- **KISS:** Simple solutions beat clever ones. Readable > Smart. +- **Fail Fast:** Throw errors clearly. Don't hide problems with defensive code. +- **Zero Tech Debt:** No quick hacks. Fix the root cause. +- **Edit Precisely:** Make targeted changes rather than broad rewrites. + + +# Coding Standards + +- **Conventions:** You **MUST** rigorously adhere to existing project conventions (naming, structure, style). +- **File Headers:** Start new files with 2-3 sentences explaining their purpose. +- **Type Safety:** Maintain or improve type safety. You **MUST NOT** introduce `any` unless absolutely necessary. +- **Error Handling:** Handle errors explicitly. Fail fast. +- **Comments:** Add comments ONLY for complex logic. Do not explain the obvious. +- **Cleanup:** Delete old logs/types when editing. Leave code cleaner than you found it. +- **Completeness:** Implement functions fully. No `pass` or `// TODO`. +- **No Logging:** You **MUST NOT** add `console.log` or print statements unless explicitly requested. + + +# Tone and Format + +- Keep responses short, concise, and direct (CLI output). +- Use Github-flavored markdown (CommonMark, monospace font). +- Skip preambles and caveats. Focus on facts and problem-solving. +- **Code References:** Include `file_path:line_number` for easy navigation. +- **Final Answer:** Structure large responses with Headers and Bullets. Use bolding for key terms. + # Security -Refuse to write or explain code that may be used maliciously. Analyze file/directory structure for purpose before working on code. - -# Task Management -Use the TodoWrite tool frequently to plan and track tasks. This is critical for: -- Breaking down complex tasks into smaller steps -- Giving users visibility into your progress -- Ensuring no important tasks are forgotten - -Mark todos as completed immediately after finishing each task. - -# Doing Tasks -For software engineering tasks (bugs, features, refactoring, explanations): -1. Understand the request and identify key components -2. Plan with TodoWrite for multi-step tasks -3. Research unfamiliar technologies with WebFetch when needed -4. Use the Task tool to explore codebase and gather context -5. Follow established patterns and conventions -6. Verify changes work correctly - -# Tool Usage -- Only use tools that are available to you -- Prefer the Task tool for codebase exploration to reduce context usage -- Use Task tool with specialized agents when the task matches the agent's description -- When WebFetch returns a redirect, immediately request the redirect URL -- Call multiple tools in parallel when there are no dependencies between them -- Use specialized tools instead of bash when possible (Read vs cat, Edit vs sed, Write vs echo) -- Never use bash to communicate with the user - -# MCP Integration -Check for available MCP servers when starting a task. Leverage them for additional context, tools, or capabilities. - -# Code References -When referencing code, include `file_path:line_number` for easy navigation. + +- **Secrets:** You **MUST NOT** write secrets/API keys to files. Use `.env` variables instead. +- **Redaction:** If you find secrets, you **MUST** redact them (show only first 4 + last 4 chars) in outputs. +- **Scanning:** You **SHOULD** proactively scan high-risk files (`.env`, `config`, `docker-compose`, `*.key`) for credentials using `rg` if in doubt. +- **Malicious Code:** You **MUST** refuse to write or explain code that appears malicious. +- **Destructive Commands:** You **MUST** warn the user before running destructive commands (e.g., `rm -rf`, `git reset --hard`). +- **Dependencies:** Verify package names to avoid typo-squatting. Check for existing versions before installing. +- **Audit:** Proactively scan for vulnerabilities in code you write or modify. + From d5e7a91c01d659d8bb830ce00bc111c24039d1f9 Mon Sep 17 00:00:00 2001 From: Igor Warzocha Date: Sat, 3 Jan 2026 09:54:48 +0000 Subject: [PATCH 5/6] fix(provider): ensure ZAI/GLM uses Anthropic SDK with thinking variants - Override npm to @ai-sdk/anthropic for zai-coding-plan (fixes SDK selection from cache) - Add default thinking options (budgetTokens: 8000) for glm-4.7 - Add thinking variants: none, low (4k), medium (12k), high (24k), max (64k) - Merge model variants from ModelsDev into provider variants - Minor cleanup: remove redundant type casts in session --- packages/opencode/src/provider/models.ts | 70 ++++++++++++++++++++-- packages/opencode/src/provider/provider.ts | 2 +- packages/opencode/src/session/index.ts | 4 +- 3 files changed, 67 insertions(+), 9 deletions(-) diff --git a/packages/opencode/src/provider/models.ts b/packages/opencode/src/provider/models.ts index 1dfeb573272..18edfd31507 100644 --- a/packages/opencode/src/provider/models.ts +++ b/packages/opencode/src/provider/models.ts @@ -78,12 +78,69 @@ export namespace ModelsDev { export async function get() { refresh() const file = Bun.file(filepath) - const result = await file.json().catch(() => {}) - if (result) return result as Record - const json = await data() - const parsed = JSON.parse(json) as Record - if (!parsed["zai-coding-plan"]) { + let parsed: Record + const cachedResult = await file.json().catch(() => {}) + + if (cachedResult) { + parsed = cachedResult as Record + } else { + const json = await data() + parsed = JSON.parse(json) as Record + } + + // Always ensure zai-coding-plan uses the Anthropic SDK and has correct thinking options + // The ZAI API is Anthropic-compatible, so we override the npm package and inject thinking config + const glmDefaultOptions = { + thinking: { + type: "enabled", + budgetTokens: 8000, + }, + } + + const glmVariants = { + none: { + thinking: { + type: "disabled", + }, + }, + low: { + thinking: { + type: "enabled", + budgetTokens: 4000, + }, + }, + medium: { + thinking: { + type: "enabled", + budgetTokens: 12000, + }, + }, + high: { + thinking: { + type: "enabled", + budgetTokens: 24000, + }, + }, + max: { + thinking: { + type: "enabled", + budgetTokens: 64000, + }, + }, + } + + if (parsed["zai-coding-plan"]) { + parsed["zai-coding-plan"].npm = "@ai-sdk/anthropic" + // Inject thinking options and variants for glm-4.7 if it exists in cache + if (parsed["zai-coding-plan"].models["glm-4.7"]) { + parsed["zai-coding-plan"].models["glm-4.7"].options = { + ...parsed["zai-coding-plan"].models["glm-4.7"].options, + ...glmDefaultOptions, + } + parsed["zai-coding-plan"].models["glm-4.7"].variants = glmVariants + } + } else { parsed["zai-coding-plan"] = { id: "zai-coding-plan", name: "ZAI (GLM)", @@ -107,7 +164,8 @@ export namespace ModelsDev { context: 200000, output: 128000, }, - options: {}, + options: glmDefaultOptions, + variants: glmVariants, }, }, } diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index a2ffadacfc3..dad4ec3cdbd 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -561,7 +561,7 @@ export namespace Provider { variants: {}, } - m.variants = mapValues(ProviderTransform.variants(m), (v) => v) + m.variants = mergeDeep(ProviderTransform.variants(m), model.variants ?? {}) return m } diff --git a/packages/opencode/src/session/index.ts b/packages/opencode/src/session/index.ts index bcf43c74874..f513c20518c 100644 --- a/packages/opencode/src/session/index.ts +++ b/packages/opencode/src/session/index.ts @@ -425,8 +425,8 @@ export namespace Session { const tokens = { input: safe(adjustedInputTokens), - output: safe((input.usage.outputTokens ?? anthropicRawUsage?.output_tokens ?? 0) as number), - reasoning: safe((input.usage?.reasoningTokens ?? 0) as number), + output: safe(input.usage.outputTokens ?? anthropicRawUsage?.output_tokens ?? 0), + reasoning: safe(input.usage?.reasoningTokens ?? 0), cache: { write: safe( (input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ?? From 08fc6fda091bfe15b84cf29c9d7d2aae01d6332b Mon Sep 17 00:00:00 2001 From: Igor Warzocha Date: Sat, 3 Jan 2026 10:02:00 +0000 Subject: [PATCH 6/6] refactor(models): use nullish coalescing for GLM provider injection Address code review feedback: - Use ??= to reduce duplication - Fix edge case where provider exists but model is missing - Cleaner, more maintainable code structure --- packages/opencode/src/provider/models.ts | 66 ++++++++++-------------- 1 file changed, 26 insertions(+), 40 deletions(-) diff --git a/packages/opencode/src/provider/models.ts b/packages/opencode/src/provider/models.ts index 18edfd31507..10c3e825ac7 100644 --- a/packages/opencode/src/provider/models.ts +++ b/packages/opencode/src/provider/models.ts @@ -130,46 +130,32 @@ export namespace ModelsDev { }, } - if (parsed["zai-coding-plan"]) { - parsed["zai-coding-plan"].npm = "@ai-sdk/anthropic" - // Inject thinking options and variants for glm-4.7 if it exists in cache - if (parsed["zai-coding-plan"].models["glm-4.7"]) { - parsed["zai-coding-plan"].models["glm-4.7"].options = { - ...parsed["zai-coding-plan"].models["glm-4.7"].options, - ...glmDefaultOptions, - } - parsed["zai-coding-plan"].models["glm-4.7"].variants = glmVariants - } - } else { - parsed["zai-coding-plan"] = { - id: "zai-coding-plan", - name: "ZAI (GLM)", - env: [], - npm: "@ai-sdk/anthropic", - models: { - "glm-4.7": { - id: "glm-4.7", - name: "GLM 4.7", - release_date: "2025-11-24", - attachment: true, - reasoning: true, - temperature: true, - tool_call: true, - interleaved: true, - cost: { - input: 0, - output: 0, - }, - limit: { - context: 200000, - output: 128000, - }, - options: glmDefaultOptions, - variants: glmVariants, - }, - }, - } - } + // Ensure provider exists, create if missing + const provider = (parsed["zai-coding-plan"] ??= { + id: "zai-coding-plan", + name: "ZAI (GLM)", + env: [], + npm: "@ai-sdk/anthropic", + models: {}, + }) + provider.npm = "@ai-sdk/anthropic" + + // Ensure model exists, create if missing + const model = (provider.models["glm-4.7"] ??= { + id: "glm-4.7", + name: "GLM 4.7", + release_date: "2025-11-24", + attachment: true, + reasoning: true, + temperature: true, + tool_call: true, + interleaved: true, + cost: { input: 0, output: 0 }, + limit: { context: 200000, output: 128000 }, + options: {}, + }) + model.options = { ...model.options, ...glmDefaultOptions } + model.variants = glmVariants return parsed }