diff --git a/packages/opencode/src/cli/cmd/tui/component/dialog-agent.tsx b/packages/opencode/src/cli/cmd/tui/component/dialog-agent.tsx index 365a22445b4..facceb25364 100644 --- a/packages/opencode/src/cli/cmd/tui/component/dialog-agent.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/dialog-agent.tsx @@ -20,7 +20,7 @@ export function DialogAgent() { return ( { local.agent.set(option.value) diff --git a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx index 96b9e8ffd57..620cce50574 100644 --- a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx @@ -531,7 +531,7 @@ export function Prompt(props: PromptProps) { if (store.mode === "shell") { sdk.client.session.shell({ sessionID, - agent: local.agent.current().name, + agent: local.agent.current()?.name ?? "build", model: { providerID: selectedModel.providerID, modelID: selectedModel.modelID, @@ -552,7 +552,7 @@ export function Prompt(props: PromptProps) { sessionID, command: command.slice(1), arguments: args.join(" "), - agent: local.agent.current().name, + agent: local.agent.current()?.name ?? "build", model: `${selectedModel.providerID}/${selectedModel.modelID}`, messageID, variant, @@ -569,7 +569,7 @@ export function Prompt(props: PromptProps) { sessionID, ...selectedModel, messageID, - agent: local.agent.current().name, + agent: local.agent.current()?.name ?? "build", model: selectedModel, variant, parts: [ @@ -690,7 +690,7 @@ export function Prompt(props: PromptProps) { const highlight = createMemo(() => { if (keybind.leader) return theme.border if (store.mode === "shell") return theme.primary - return local.agent.color(local.agent.current().name) + return local.agent.color(local.agent.current()?.name ?? "build") }) const showVariant = createMemo(() => { @@ -701,7 +701,7 @@ export function Prompt(props: PromptProps) { }) const spinnerDef = createMemo(() => { - const color = local.agent.color(local.agent.current().name) + const color = local.agent.color(local.agent.current()?.name ?? "build") return { frames: createFrames({ color, @@ -935,7 +935,7 @@ export function Prompt(props: PromptProps) { /> - {store.mode === "shell" ? "Shell" : Locale.titlecase(local.agent.current().name)}{" "} + {store.mode === "shell" ? "Shell" : Locale.titlecase(local.agent.current()?.name ?? "build")}{" "} diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 5b6178bc01b..d36af37f7d1 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -10,6 +10,8 @@ import { type Tool, type ToolSet, extractReasoningMiddleware, + tool, + jsonSchema, } from "ai" import { clone, mergeDeep, pipe } from "remeda" import { ProviderTransform } from "@/provider/transform" @@ -140,6 +142,26 @@ export namespace LLM { const tools = await resolveTools(input) + // LiteLLM and some Anthropic proxies require the tools parameter to be present + // when message history contains tool calls, even if no tools are being used. + // Add a dummy tool that is never called to satisfy this validation. + // This is enabled for: + // 1. Providers with "litellm" in their ID or API ID (auto-detected) + // 2. Providers with explicit "litellmProxy: true" option (opt-in for custom gateways) + const isLiteLLMProxy = + provider.options?.["litellmProxy"] === true || + input.model.providerID.toLowerCase().includes("litellm") || + input.model.api.id.toLowerCase().includes("litellm") + + if (isLiteLLMProxy && Object.keys(tools).length === 0 && hasToolCalls(input.messages)) { + tools["_noop"] = tool({ + description: + "Placeholder for LiteLLM/Anthropic proxy compatibility - required when message history contains tool calls but no active tools are needed", + inputSchema: jsonSchema({ type: "object", properties: {} }), + execute: async () => ({ output: "", title: "", metadata: {} }), + }) + } + return streamText({ onError(error) { l.error("stream error", { @@ -171,7 +193,7 @@ export namespace LLM { topP: params.topP, topK: params.topK, providerOptions: ProviderTransform.providerOptions(input.model, params.options), - activeTools: Object.keys(tools).filter((x) => x !== "invalid"), + activeTools: Object.keys(tools).filter((x) => x !== "invalid" && x !== "_noop"), tools, maxOutputTokens, abortSignal: input.abort, @@ -238,4 +260,16 @@ export namespace LLM { } return input.tools } + + // Check if messages contain any tool-call content + // Used to determine if a dummy tool should be added for LiteLLM proxy compatibility + export function hasToolCalls(messages: ModelMessage[]): boolean { + for (const msg of messages) { + if (!Array.isArray(msg.content)) continue + for (const part of msg.content) { + if (part.type === "tool-call" || part.type === "tool-result") return true + } + } + return false + } } diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index c1d4015f6d3..7a55599fd55 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -533,6 +533,17 @@ export namespace MessageV2 { errorText: part.state.error, callProviderMetadata: part.metadata, }) + // Handle pending/running tool calls to prevent dangling tool_use blocks + // Anthropic/Claude APIs require every tool_use to have a corresponding tool_result + if (part.state.status === "pending" || part.state.status === "running") + assistantMessage.parts.push({ + type: ("tool-" + part.tool) as `tool-${string}`, + state: "output-error", + toolCallId: part.callID, + input: part.state.input, + errorText: "[Tool execution was interrupted]", + callProviderMetadata: part.metadata, + }) } if (part.type === "reasoning") { assistantMessage.parts.push({ diff --git a/packages/opencode/test/session/llm.test.ts b/packages/opencode/test/session/llm.test.ts new file mode 100644 index 00000000000..779cbc48f7d --- /dev/null +++ b/packages/opencode/test/session/llm.test.ts @@ -0,0 +1,90 @@ +import { describe, expect, test } from "bun:test" +import { LLM } from "../../src/session/llm" +import type { ModelMessage } from "ai" + +describe("session.llm.hasToolCalls", () => { + test("returns false for empty messages array", () => { + expect(LLM.hasToolCalls([])).toBe(false) + }) + + test("returns false for messages with only text content", () => { + const messages: ModelMessage[] = [ + { + role: "user", + content: [{ type: "text", text: "Hello" }], + }, + { + role: "assistant", + content: [{ type: "text", text: "Hi there" }], + }, + ] + expect(LLM.hasToolCalls(messages)).toBe(false) + }) + + test("returns true when messages contain tool-call", () => { + const messages = [ + { + role: "user", + content: [{ type: "text", text: "Run a command" }], + }, + { + role: "assistant", + content: [ + { + type: "tool-call", + toolCallId: "call-123", + toolName: "bash", + }, + ], + }, + ] as ModelMessage[] + expect(LLM.hasToolCalls(messages)).toBe(true) + }) + + test("returns true when messages contain tool-result", () => { + const messages = [ + { + role: "tool", + content: [ + { + type: "tool-result", + toolCallId: "call-123", + toolName: "bash", + }, + ], + }, + ] as ModelMessage[] + expect(LLM.hasToolCalls(messages)).toBe(true) + }) + + test("returns false for messages with string content", () => { + const messages: ModelMessage[] = [ + { + role: "user", + content: "Hello world", + }, + { + role: "assistant", + content: "Hi there", + }, + ] + expect(LLM.hasToolCalls(messages)).toBe(false) + }) + + test("returns true when tool-call is mixed with text content", () => { + const messages = [ + { + role: "assistant", + content: [ + { type: "text", text: "Let me run that command" }, + { + type: "tool-call", + toolCallId: "call-456", + toolName: "read", + }, + ], + }, + ] as ModelMessage[] + expect(LLM.hasToolCalls(messages)).toBe(true) + }) +}) diff --git a/packages/opencode/test/session/message-v2.test.ts b/packages/opencode/test/session/message-v2.test.ts index 071da270c9c..f069f6ba68a 100644 --- a/packages/opencode/test/session/message-v2.test.ts +++ b/packages/opencode/test/session/message-v2.test.ts @@ -569,4 +569,94 @@ describe("session.message-v2.toModelMessage", () => { expect(MessageV2.toModelMessage(input)).toStrictEqual([]) }) + + test("converts pending/running tool calls to error results to prevent dangling tool_use", () => { + const userID = "m-user" + const assistantID = "m-assistant" + + const input: MessageV2.WithParts[] = [ + { + info: userInfo(userID), + parts: [ + { + ...basePart(userID, "u1"), + type: "text", + text: "run tool", + }, + ] as MessageV2.Part[], + }, + { + info: assistantInfo(assistantID, userID), + parts: [ + { + ...basePart(assistantID, "a1"), + type: "tool", + callID: "call-pending", + tool: "bash", + state: { + status: "pending", + input: { cmd: "ls" }, + raw: "", + }, + }, + { + ...basePart(assistantID, "a2"), + type: "tool", + callID: "call-running", + tool: "read", + state: { + status: "running", + input: { path: "/tmp" }, + time: { start: 0 }, + }, + }, + ] as MessageV2.Part[], + }, + ] + + const result = MessageV2.toModelMessage(input) + + expect(result).toStrictEqual([ + { + role: "user", + content: [{ type: "text", text: "run tool" }], + }, + { + role: "assistant", + content: [ + { + type: "tool-call", + toolCallId: "call-pending", + toolName: "bash", + input: { cmd: "ls" }, + providerExecuted: undefined, + }, + { + type: "tool-call", + toolCallId: "call-running", + toolName: "read", + input: { path: "/tmp" }, + providerExecuted: undefined, + }, + ], + }, + { + role: "tool", + content: [ + { + type: "tool-result", + toolCallId: "call-pending", + toolName: "bash", + output: { type: "error-text", value: "[Tool execution was interrupted]" }, + }, + { + type: "tool-result", + toolCallId: "call-running", + toolName: "read", + output: { type: "error-text", value: "[Tool execution was interrupted]" }, + }, + ], + }, + ]) + }) })