diff --git a/.opencode/opencode.jsonc b/.opencode/opencode.jsonc index fe70e35fa76..d5d97f4c9d7 100644 --- a/.opencode/opencode.jsonc +++ b/.opencode/opencode.jsonc @@ -10,4 +10,5 @@ "options": {}, }, }, + "mcp": {}, } diff --git a/packages/desktop/src/context/local.tsx b/packages/desktop/src/context/local.tsx index 39fd1f98744..181a4d24747 100644 --- a/packages/desktop/src/context/local.tsx +++ b/packages/desktop/src/context/local.tsx @@ -78,7 +78,7 @@ export const { use: useLocal, provider: LocalProvider } = createSimpleContext({ }) const agent = (() => { - const list = createMemo(() => sync.data.agent.filter((x) => x.mode !== "subagent")) + const list = createMemo(() => sync.data.agent.filter((x) => x.mode !== "subagent" && !x.hidden)) const [store, setStore] = createStore<{ current: string }>({ diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index 94127e51ceb..ef007df136a 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -2,18 +2,24 @@ import { Config } from "../config/config" import z from "zod" import { Provider } from "../provider/provider" import { generateObject, type ModelMessage } from "ai" -import PROMPT_GENERATE from "./generate.txt" import { SystemPrompt } from "../session/system" import { Instance } from "../project/instance" import { mergeDeep } from "remeda" +import PROMPT_GENERATE from "./generate.txt" +import PROMPT_COMPACTION from "./prompt/compaction.txt" +import PROMPT_EXPLORE from "./prompt/explore.txt" +import PROMPT_SUMMARY from "./prompt/summary.txt" +import PROMPT_TITLE from "./prompt/title.txt" + export namespace Agent { export const Info = z .object({ name: z.string(), description: z.string().optional(), mode: z.enum(["subagent", "primary", "all"]), - builtIn: z.boolean(), + native: z.boolean().optional(), + hidden: z.boolean().optional(), topP: z.number().optional(), temperature: z.number().optional(), color: z.string().optional(), @@ -112,7 +118,8 @@ export namespace Agent { options: {}, permission: agentPermission, mode: "subagent", - builtIn: true, + native: true, + hidden: true, }, explore: { name: "explore", @@ -124,30 +131,23 @@ export namespace Agent { ...defaultTools, }, description: `Fast agent specialized for exploring codebases. Use this when you need to quickly find files by patterns (eg. "src/components/**/*.tsx"), search code for keywords (eg. "API endpoints"), or answer questions about the codebase (eg. "how do API endpoints work?"). When calling this agent, specify the desired thoroughness level: "quick" for basic searches, "medium" for moderate exploration, or "very thorough" for comprehensive analysis across multiple locations and naming conventions.`, - prompt: [ - `You are a file search specialist. You excel at thoroughly navigating and exploring codebases.`, - ``, - `Your strengths:`, - `- Rapidly finding files using glob patterns`, - `- Searching code and text with powerful regex patterns`, - `- Reading and analyzing file contents`, - ``, - `Guidelines:`, - `- Use Glob for broad file pattern matching`, - `- Use Grep for searching file contents with regex`, - `- Use Read when you know the specific file path you need to read`, - `- Use Bash for file operations like copying, moving, or listing directory contents`, - `- Adapt your search approach based on the thoroughness level specified by the caller`, - `- Return file paths as absolute paths in your final response`, - `- For clear communication, avoid using emojis`, - `- Do not create any files, or run bash commands that modify the user's system state in any way`, - ``, - `Complete the user's search request efficiently and report your findings clearly.`, - ].join("\n"), + prompt: PROMPT_EXPLORE, options: {}, permission: agentPermission, mode: "subagent", - builtIn: true, + native: true, + }, + compaction: { + name: "compaction", + mode: "primary", + native: true, + hidden: true, + prompt: PROMPT_COMPACTION, + tools: { + "*": false, + }, + options: {}, + permission: agentPermission, }, build: { name: "build", @@ -155,7 +155,27 @@ export namespace Agent { options: {}, permission: agentPermission, mode: "primary", - builtIn: true, + native: true, + }, + title: { + name: "title", + mode: "primary", + options: {}, + native: true, + hidden: true, + permission: agentPermission, + prompt: PROMPT_TITLE, + tools: {}, + }, + summary: { + name: "summary", + mode: "primary", + options: {}, + native: true, + hidden: true, + permission: agentPermission, + prompt: PROMPT_SUMMARY, + tools: {}, }, plan: { name: "plan", @@ -165,7 +185,7 @@ export namespace Agent { ...defaultTools, }, mode: "primary", - builtIn: true, + native: true, }, } for (const [key, value] of Object.entries(cfg.agent ?? {})) { @@ -181,7 +201,7 @@ export namespace Agent { permission: agentPermission, options: {}, tools: {}, - builtIn: false, + native: false, } const { name, diff --git a/packages/opencode/src/session/prompt/compaction.txt b/packages/opencode/src/agent/prompt/compaction.txt similarity index 100% rename from packages/opencode/src/session/prompt/compaction.txt rename to packages/opencode/src/agent/prompt/compaction.txt diff --git a/packages/opencode/src/agent/prompt/explore.txt b/packages/opencode/src/agent/prompt/explore.txt new file mode 100644 index 00000000000..5761077cbd8 --- /dev/null +++ b/packages/opencode/src/agent/prompt/explore.txt @@ -0,0 +1,18 @@ +You are a file search specialist. You excel at thoroughly navigating and exploring codebases. + +Your strengths: +- Rapidly finding files using glob patterns +- Searching code and text with powerful regex patterns +- Reading and analyzing file contents + +Guidelines: +- Use Glob for broad file pattern matching +- Use Grep for searching file contents with regex +- Use Read when you know the specific file path you need to read +- Use Bash for file operations like copying, moving, or listing directory contents +- Adapt your search approach based on the thoroughness level specified by the caller +- Return file paths as absolute paths in your final response +- For clear communication, avoid using emojis +- Do not create any files, or run bash commands that modify the user's system state in any way + +Complete the user's search request efficiently and report your findings clearly. diff --git a/packages/opencode/src/session/prompt/summarize.txt b/packages/opencode/src/agent/prompt/summary.txt similarity index 100% rename from packages/opencode/src/session/prompt/summarize.txt rename to packages/opencode/src/agent/prompt/summary.txt diff --git a/packages/opencode/src/session/prompt/title.txt b/packages/opencode/src/agent/prompt/title.txt similarity index 84% rename from packages/opencode/src/session/prompt/title.txt rename to packages/opencode/src/agent/prompt/title.txt index e297dc460b1..f67aaa95bac 100644 --- a/packages/opencode/src/session/prompt/title.txt +++ b/packages/opencode/src/agent/prompt/title.txt @@ -22,8 +22,8 @@ Your output must be: - The title should NEVER include "summarizing" or "generating" when generating a title - DO NOT SAY YOU CANNOT GENERATE A TITLE OR COMPLAIN ABOUT THE INPUT - Always output something meaningful, even if the input is minimal. -- If the user message is short or conversational (e.g. “hello”, “lol”, “whats up”, “hey”): - → create a title that reflects the user’s tone or intent (such as Greeting, Quick check-in, Light chat, Intro message, etc.) +- If the user message is short or conversational (e.g. "hello", "lol", "whats up", "hey"): + → create a title that reflects the user's tone or intent (such as Greeting, Quick check-in, Light chat, Intro message, etc.) diff --git a/packages/opencode/src/cli/cmd/agent.ts b/packages/opencode/src/cli/cmd/agent.ts index 812e97423a9..2cbcfbfe94b 100644 --- a/packages/opencode/src/cli/cmd/agent.ts +++ b/packages/opencode/src/cli/cmd/agent.ts @@ -227,8 +227,8 @@ const AgentListCommand = cmd({ async fn() { const agents = await Agent.list() const sortedAgents = agents.sort((a, b) => { - if (a.builtIn !== b.builtIn) { - return a.builtIn ? -1 : 1 + if (a.native !== b.native) { + return a.native ? -1 : 1 } return a.name.localeCompare(b.name) }) diff --git a/packages/opencode/src/cli/cmd/tui/component/dialog-agent.tsx b/packages/opencode/src/cli/cmd/tui/component/dialog-agent.tsx index 65aaeb22bf9..365a22445b4 100644 --- a/packages/opencode/src/cli/cmd/tui/component/dialog-agent.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/dialog-agent.tsx @@ -12,7 +12,7 @@ export function DialogAgent() { return { value: item.name, title: item.name, - description: item.builtIn ? "native" : item.description, + description: item.native ? "native" : item.description, } }), ) diff --git a/packages/opencode/src/cli/cmd/tui/component/prompt/autocomplete.tsx b/packages/opencode/src/cli/cmd/tui/component/prompt/autocomplete.tsx index c40aa114ac8..37e6ccda5de 100644 --- a/packages/opencode/src/cli/cmd/tui/component/prompt/autocomplete.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/prompt/autocomplete.tsx @@ -184,7 +184,7 @@ export function Autocomplete(props: { const agents = createMemo(() => { const agents = sync.data.agent return agents - .filter((agent) => !agent.builtIn && agent.mode !== "primary") + .filter((agent) => !agent.hidden && agent.mode !== "primary") .map( (agent): AutocompleteOption => ({ display: "@" + agent.name, diff --git a/packages/opencode/src/cli/cmd/tui/context/local.tsx b/packages/opencode/src/cli/cmd/tui/context/local.tsx index 6cc97e04167..f04b79685c1 100644 --- a/packages/opencode/src/cli/cmd/tui/context/local.tsx +++ b/packages/opencode/src/cli/cmd/tui/context/local.tsx @@ -52,7 +52,7 @@ export const { use: useLocal, provider: LocalProvider } = createSimpleContext({ }) const agent = iife(() => { - const agents = createMemo(() => sync.data.agent.filter((x) => x.mode !== "subagent")) + const agents = createMemo(() => sync.data.agent.filter((x) => x.mode !== "subagent" && !x.hidden)) const [agentStore, setAgentStore] = createStore<{ current: string }>({ diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 60ce2297b9b..d4755af17a6 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -858,7 +858,7 @@ export namespace Provider { return info } - export async function getLanguage(model: Model) { + export async function getLanguage(model: Model): Promise { const s = await state() const key = `${model.providerID}/${model.id}` if (s.models.has(key)) return s.models.get(key)! diff --git a/packages/opencode/src/session/compaction.ts b/packages/opencode/src/session/compaction.ts index f9d1b1c0476..f8ed149ba40 100644 --- a/packages/opencode/src/session/compaction.ts +++ b/packages/opencode/src/session/compaction.ts @@ -1,22 +1,18 @@ import { BusEvent } from "@/bus/bus-event" import { Bus } from "@/bus" -import { wrapLanguageModel, type ModelMessage } from "ai" import { Session } from "." import { Identifier } from "../id/id" import { Instance } from "../project/instance" import { Provider } from "../provider/provider" import { MessageV2 } from "./message-v2" -import { SystemPrompt } from "./system" import z from "zod" import { SessionPrompt } from "./prompt" import { Flag } from "../flag/flag" import { Token } from "../util/token" -import { Config } from "../config/config" import { Log } from "../util/log" -import { ProviderTransform } from "@/provider/transform" import { SessionProcessor } from "./processor" import { fn } from "@/util/fn" -import { mergeDeep, pipe } from "remeda" +import { Agent } from "@/agent/agent" export namespace SessionCompaction { const log = Log.create({ service: "session.compaction" }) @@ -90,24 +86,21 @@ export namespace SessionCompaction { parentID: string messages: MessageV2.WithParts[] sessionID: string - model: { - providerID: string - modelID: string - } - agent: string abort: AbortSignal auto: boolean }) { - const cfg = await Config.get() - const model = await Provider.getModel(input.model.providerID, input.model.modelID) - const language = await Provider.getLanguage(model) - const system = [...SystemPrompt.compaction(model.providerID)] + const userMessage = input.messages.findLast((m) => m.info.id === input.parentID)!.info as MessageV2.User + const agent = await Agent.get("compaction") + const model = agent.model + ? await Provider.getModel(agent.model.providerID, agent.model.modelID) + : await Provider.getModel(userMessage.model.providerID, userMessage.model.modelID) const msg = (await Session.updateMessage({ id: Identifier.ascending("message"), role: "assistant", parentID: input.parentID, sessionID: input.sessionID, - mode: input.agent, + mode: "compaction", + agent: "compaction", summary: true, path: { cwd: Instance.directory, @@ -120,7 +113,7 @@ export namespace SessionCompaction { reasoning: 0, cache: { read: 0, write: 0 }, }, - modelID: input.model.modelID, + modelID: model.id, providerID: model.providerID, time: { created: Date.now(), @@ -129,46 +122,18 @@ export namespace SessionCompaction { const processor = SessionProcessor.create({ assistantMessage: msg, sessionID: input.sessionID, - model: model, + model, abort: input.abort, }) const result = await processor.process({ - onError(error) { - log.error("stream error", { - error, - }) - }, - // set to 0, we handle loop - maxRetries: 0, - providerOptions: ProviderTransform.providerOptions( - model, - pipe({}, mergeDeep(ProviderTransform.options(model, input.sessionID)), mergeDeep(model.options)), - ), - headers: model.headers, - abortSignal: input.abort, - tools: model.capabilities.toolcall ? {} : undefined, + user: userMessage, + agent, + abort: input.abort, + sessionID: input.sessionID, + tools: {}, + system: [], messages: [ - ...system.map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), - ...MessageV2.toModelMessage( - input.messages.filter((m) => { - if (m.info.role !== "assistant" || m.info.error === undefined) { - return true - } - if ( - MessageV2.AbortedError.isInstance(m.info.error) && - m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") - ) { - return true - } - - return false - }), - ), + ...MessageV2.toModelMessage(input.messages), { role: "user", content: [ @@ -179,28 +144,9 @@ export namespace SessionCompaction { ], }, ], - model: wrapLanguageModel({ - model: language, - middleware: [ - { - async transformParams(args) { - if (args.type === "stream") { - // @ts-expect-error - args.params.prompt = ProviderTransform.message(args.params.prompt, model) - } - return args.params - }, - }, - ], - }), - experimental_telemetry: { - isEnabled: cfg.experimental?.openTelemetry, - metadata: { - userId: cfg.username ?? "unknown", - sessionId: input.sessionID, - }, - }, + model, }) + if (result === "continue" && input.auto) { const continueMsg = await Session.updateMessage({ id: Identifier.ascending("message"), @@ -209,8 +155,8 @@ export namespace SessionCompaction { time: { created: Date.now(), }, - agent: input.agent, - model: input.model, + agent: userMessage.agent, + model: userMessage.model, }) await Session.updatePart({ id: Identifier.ascending("part"), diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts new file mode 100644 index 00000000000..97b8aae2bd8 --- /dev/null +++ b/packages/opencode/src/session/llm.ts @@ -0,0 +1,184 @@ +import { Provider } from "@/provider/provider" +import { Log } from "@/util/log" +import { streamText, wrapLanguageModel, type ModelMessage, type StreamTextResult, type Tool, type ToolSet } from "ai" +import { mergeDeep, pipe } from "remeda" +import { ProviderTransform } from "@/provider/transform" +import { Config } from "@/config/config" +import { Instance } from "@/project/instance" +import type { Agent } from "@/agent/agent" +import type { MessageV2 } from "./message-v2" +import { Plugin } from "@/plugin" +import { SystemPrompt } from "./system" +import { ToolRegistry } from "@/tool/registry" +import { Flag } from "@/flag/flag" + +export namespace LLM { + const log = Log.create({ service: "llm" }) + + export const OUTPUT_TOKEN_MAX = 32_000 + + export type StreamInput = { + user: MessageV2.User + sessionID: string + model: Provider.Model + agent: Agent.Info + system: string[] + abort: AbortSignal + messages: ModelMessage[] + small?: boolean + tools: Record + retries?: number + } + + export type StreamOutput = StreamTextResult + + export async function stream(input: StreamInput) { + const l = log + .clone() + .tag("providerID", input.model.providerID) + .tag("modelID", input.model.id) + .tag("sessionID", input.sessionID) + .tag("small", (input.small ?? false).toString()) + .tag("agent", input.agent.name) + l.info("stream", { + modelID: input.model.id, + providerID: input.model.providerID, + }) + const [language, cfg] = await Promise.all([Provider.getLanguage(input.model), Config.get()]) + + const system = SystemPrompt.header(input.model.providerID) + system.push( + [ + // use agent prompt otherwise provider prompt + ...(input.agent.prompt ? [input.agent.prompt] : SystemPrompt.provider(input.model)), + // any custom prompt passed into this call + ...input.system, + // any custom prompt from last user message + ...(input.user.system ? [input.user.system] : []), + ] + .filter((x) => x) + .join("\n"), + ) + + const params = await Plugin.trigger( + "chat.params", + { + sessionID: input.sessionID, + agent: input.agent, + model: input.model, + provider: Provider.getProvider(input.model.providerID), + message: input.user, + }, + { + temperature: input.model.capabilities.temperature + ? (input.agent.temperature ?? ProviderTransform.temperature(input.model)) + : undefined, + topP: input.agent.topP ?? ProviderTransform.topP(input.model), + options: pipe( + {}, + mergeDeep(ProviderTransform.options(input.model, input.sessionID)), + input.small ? mergeDeep(ProviderTransform.smallOptions(input.model)) : mergeDeep({}), + mergeDeep(input.model.options), + mergeDeep(input.agent.options), + ), + }, + ) + + l.info("params", { + params, + }) + + const maxOutputTokens = ProviderTransform.maxOutputTokens( + input.model.api.npm, + params.options, + input.model.limit.output, + OUTPUT_TOKEN_MAX, + ) + + const tools = await resolveTools(input) + + return streamText({ + onError(error) { + l.error("stream error", { + error, + }) + }, + async experimental_repairToolCall(failed) { + const lower = failed.toolCall.toolName.toLowerCase() + if (lower !== failed.toolCall.toolName && tools[lower]) { + l.info("repairing tool call", { + tool: failed.toolCall.toolName, + repaired: lower, + }) + return { + ...failed.toolCall, + toolName: lower, + } + } + return { + ...failed.toolCall, + input: JSON.stringify({ + tool: failed.toolCall.toolName, + error: failed.error.message, + }), + toolName: "invalid", + } + }, + temperature: params.temperature, + topP: params.topP, + providerOptions: ProviderTransform.providerOptions(input.model, params.options), + activeTools: Object.keys(tools).filter((x) => x !== "invalid"), + tools, + maxOutputTokens, + abortSignal: input.abort, + headers: { + ...(input.model.providerID.startsWith("opencode") + ? { + "x-opencode-project": Instance.project.id, + "x-opencode-session": input.sessionID, + "x-opencode-request": input.user.id, + "x-opencode-client": Flag.OPENCODE_CLIENT, + } + : undefined), + ...input.model.headers, + }, + maxRetries: input.retries ?? 0, + messages: [ + ...system.map( + (x): ModelMessage => ({ + role: "system", + content: x, + }), + ), + ...input.messages, + ], + model: wrapLanguageModel({ + model: language, + middleware: [ + { + async transformParams(args) { + if (args.type === "stream") { + // @ts-expect-error + args.params.prompt = ProviderTransform.message(args.params.prompt, input.model) + } + return args.params + }, + }, + ], + }), + experimental_telemetry: { isEnabled: cfg.experimental?.openTelemetry }, + }) + } + + async function resolveTools(input: Pick) { + const enabled = pipe( + input.agent.tools, + mergeDeep(await ToolRegistry.enabled(input.agent)), + mergeDeep(input.user.tools ?? {}), + ) + for (const [key, value] of Object.entries(enabled)) { + if (value === false) delete input.tools[key] + } + return input.tools + } +} diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index 1f4fffaa66b..76162c79780 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -348,7 +348,11 @@ export namespace MessageV2 { parentID: z.string(), modelID: z.string(), providerID: z.string(), + /** + * @deprecated + */ mode: z.string(), + agent: z.string(), path: z.object({ cwd: z.string(), root: z.string(), @@ -412,12 +416,7 @@ export namespace MessageV2 { }) export type WithParts = z.infer - export function toModelMessage( - input: { - info: Info - parts: Part[] - }[], - ): ModelMessage[] { + export function toModelMessage(input: WithParts[]): ModelMessage[] { const result: UIMessage[] = [] for (const msg of input) { @@ -461,6 +460,15 @@ export namespace MessageV2 { } if (msg.info.role === "assistant") { + if ( + msg.info.error && + !( + MessageV2.AbortedError.isInstance(msg.info.error) && + msg.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") + ) + ) { + continue + } const assistantMessage: UIMessage = { id: msg.info.id, role: "assistant", diff --git a/packages/opencode/src/session/processor.ts b/packages/opencode/src/session/processor.ts index a1244d1df53..1d4d2430311 100644 --- a/packages/opencode/src/session/processor.ts +++ b/packages/opencode/src/session/processor.ts @@ -1,5 +1,4 @@ import { MessageV2 } from "./message-v2" -import { streamText } from "ai" import { Log } from "@/util/log" import { Identifier } from "@/id/id" import { Session } from "." @@ -12,6 +11,7 @@ import { SessionRetry } from "./retry" import { SessionStatus } from "./status" import { Plugin } from "@/plugin" import type { Provider } from "@/provider/provider" +import { LLM } from "./llm" import { Config } from "@/config/config" export namespace SessionProcessor { @@ -21,15 +21,6 @@ export namespace SessionProcessor { export type Info = Awaited> export type Result = Awaited> - export type StreamInput = Parameters[0] - - export type TBD = { - model: { - modelID: string - providerID: string - } - } - export function create(input: { assistantMessage: MessageV2.Assistant sessionID: string @@ -48,14 +39,14 @@ export namespace SessionProcessor { partFromToolCall(toolCallID: string) { return toolcalls[toolCallID] }, - async process(streamInput: StreamInput) { + async process(streamInput: LLM.StreamInput) { log.info("process") const shouldBreak = (await Config.get()).experimental?.continue_loop_on_deny !== true while (true) { try { let currentText: MessageV2.TextPart | undefined let reasoningMap: Record = {} - const stream = streamText(streamInput) + const stream = await LLM.stream(streamInput) for await (const value of stream.fullStream) { input.abort.throwIfAborted() diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts index 51fb49af420..9a36c5c62e3 100644 --- a/packages/opencode/src/session/prompt.ts +++ b/packages/opencode/src/session/prompt.ts @@ -5,32 +5,22 @@ import z from "zod" import { Identifier } from "../id/id" import { MessageV2 } from "./message-v2" import { Log } from "../util/log" -import { Flag } from "../flag/flag" import { SessionRevert } from "./revert" import { Session } from "." import { Agent } from "../agent/agent" import { Provider } from "../provider/provider" -import { - generateText, - type ModelMessage, - type Tool as AITool, - tool, - wrapLanguageModel, - stepCountIs, - jsonSchema, -} from "ai" +import { type Tool as AITool, tool, jsonSchema } from "ai" import { SessionCompaction } from "./compaction" import { Instance } from "../project/instance" import { Bus } from "../bus" import { ProviderTransform } from "../provider/transform" import { SystemPrompt } from "./system" import { Plugin } from "../plugin" - import PROMPT_PLAN from "../session/prompt/plan.txt" import BUILD_SWITCH from "../session/prompt/build-switch.txt" import MAX_STEPS from "../session/prompt/max-steps.txt" import { defer } from "../util/defer" -import { clone, mergeDeep, pipe } from "remeda" +import { mergeDeep, pipe } from "remeda" import { ToolRegistry } from "../tool/registry" import { Wildcard } from "../util/wildcard" import { MCP } from "../mcp" @@ -44,12 +34,13 @@ import { Command } from "../command" import { $, fileURLToPath } from "bun" import { ConfigMarkdown } from "../config/markdown" import { SessionSummary } from "./summary" -import { Config } from "../config/config" import { NamedError } from "@opencode-ai/util/error" import { fn } from "@/util/fn" import { SessionProcessor } from "./processor" import { TaskTool } from "@/tool/task" import { SessionStatus } from "./status" +import { LLM } from "./llm" +import { iife } from "@/util/iife" import { Shell } from "@/shell/shell" // @ts-ignore @@ -96,8 +87,8 @@ export namespace SessionPrompt { .optional(), agent: z.string().optional(), noReply: z.boolean().optional(), - system: z.string().optional(), tools: z.record(z.string(), z.boolean()).optional(), + system: z.string().optional(), parts: z.array( z.discriminatedUnion("type", [ MessageV2.TextPart.omit({ @@ -145,6 +136,20 @@ export namespace SessionPrompt { }) export type PromptInput = z.infer + export const prompt = fn(PromptInput, async (input) => { + const session = await Session.get(input.sessionID) + await SessionRevert.cleanup(session) + + const message = await createUserMessage(input) + await Session.touch(input.sessionID) + + if (input.noReply === true) { + return message + } + + return loop(input.sessionID) + }) + export async function resolvePromptParts(template: string): Promise { const parts: PromptInput["parts"] = [ { @@ -196,20 +201,6 @@ export namespace SessionPrompt { return parts } - export const prompt = fn(PromptInput, async (input) => { - const session = await Session.get(input.sessionID) - await SessionRevert.cleanup(session) - - const message = await createUserMessage(input) - await Session.touch(input.sessionID) - - if (input.noReply === true) { - return message - } - - return loop(input.sessionID) - }) - function start(sessionID: string) { const s = state() if (s[sessionID]) return @@ -291,7 +282,6 @@ export namespace SessionPrompt { }) const model = await Provider.getModel(lastUser.model.providerID, lastUser.model.modelID) - const language = await Provider.getLanguage(model) const task = tasks.pop() // pending subtask @@ -304,6 +294,7 @@ export namespace SessionPrompt { parentID: lastUser.id, sessionID, mode: task.agent, + agent: task.agent, path: { cwd: Instance.directory, root: Instance.worktree, @@ -414,11 +405,6 @@ export namespace SessionPrompt { messages: msgs, parentID: lastUser.id, abort, - agent: lastUser.agent, - model: { - providerID: model.providerID, - modelID: model.id, - }, sessionID, auto: task.auto, }) @@ -442,7 +428,6 @@ export namespace SessionPrompt { } // normal processing - const cfg = await Config.get() const agent = await Agent.get(lastUser.agent) const maxSteps = agent.maxSteps ?? Infinity const isLastStep = step >= maxSteps @@ -450,12 +435,14 @@ export namespace SessionPrompt { messages: msgs, agent, }) + const processor = SessionProcessor.create({ assistantMessage: (await Session.updateMessage({ id: Identifier.ascending("message"), parentID: lastUser.id, role: "assistant", mode: agent.name, + agent: agent.name, path: { cwd: Instance.directory, root: Instance.worktree, @@ -478,12 +465,6 @@ export namespace SessionPrompt { model, abort, }) - const system = await resolveSystemPrompt({ - model, - agent, - system: lastUser.system, - isLastStep, - }) const tools = await resolveTools({ agent, sessionID, @@ -491,30 +472,6 @@ export namespace SessionPrompt { tools: lastUser.tools, processor, }) - const provider = await Provider.getProvider(model.providerID) - const params = await Plugin.trigger( - "chat.params", - { - sessionID: sessionID, - agent: lastUser.agent, - model: model, - provider, - message: lastUser, - }, - { - temperature: model.capabilities.temperature - ? (agent.temperature ?? ProviderTransform.temperature(model)) - : undefined, - topP: agent.topP ?? ProviderTransform.topP(model), - topK: ProviderTransform.topK(model), - options: pipe( - {}, - mergeDeep(ProviderTransform.options(model, sessionID, provider?.options)), - mergeDeep(model.options), - mergeDeep(agent.options), - ), - }, - ) if (step === 1) { SessionSummary.summarize({ @@ -523,135 +480,25 @@ export namespace SessionPrompt { }) } - // Deep copy message history so that modifications made by plugins do not - // affect the original messages - const sessionMessages = clone( - msgs.filter((m) => { - if (m.info.role !== "assistant" || m.info.error === undefined) { - return true - } - if ( - MessageV2.AbortedError.isInstance(m.info.error) && - m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") - ) { - return true - } - return false - }), - ) - - await Plugin.trigger("experimental.chat.messages.transform", {}, { messages: sessionMessages }) - - const messages: ModelMessage[] = [ - ...system.map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), - ...MessageV2.toModelMessage(sessionMessages), - ...(isLastStep - ? [ - { - role: "assistant" as const, - content: MAX_STEPS, - }, - ] - : []), - ] - const result = await processor.process({ - onError(error) { - log.error("stream error", { - error, - }) - }, - async experimental_repairToolCall(input) { - const lower = input.toolCall.toolName.toLowerCase() - if (lower !== input.toolCall.toolName && tools[lower]) { - log.info("repairing tool call", { - tool: input.toolCall.toolName, - repaired: lower, - }) - return { - ...input.toolCall, - toolName: lower, - } - } - return { - ...input.toolCall, - input: JSON.stringify({ - tool: input.toolCall.toolName, - error: input.error.message, - }), - toolName: "invalid", - } - }, - headers: { - ...(model.providerID.startsWith("opencode") - ? { - "x-opencode-project": Instance.project.id, - "x-opencode-session": sessionID, - "x-opencode-request": lastUser.id, - "x-opencode-client": Flag.OPENCODE_CLIENT, - } - : undefined), - ...model.headers, - }, - // set to 0, we handle loop - maxRetries: 0, - activeTools: Object.keys(tools).filter((x) => x !== "invalid"), - maxOutputTokens: ProviderTransform.maxOutputTokens( - model.api.npm, - params.options, - model.limit.output, - OUTPUT_TOKEN_MAX, - ), - abortSignal: abort, - providerOptions: ProviderTransform.providerOptions(model, params.options), - stopWhen: stepCountIs(1), - temperature: params.temperature, - topP: params.topP, - topK: params.topK, - toolChoice: isLastStep ? "none" : undefined, - messages, - tools: model.capabilities.toolcall === false ? undefined : tools, - model: wrapLanguageModel({ - model: language, - middleware: [ - { - async transformParams(args) { - if (args.type === "stream") { - // @ts-expect-error - prompt types are compatible at runtime - args.params.prompt = ProviderTransform.message(args.params.prompt, model) - } - // Transform tool schemas for provider compatibility - if (args.params.tools && Array.isArray(args.params.tools)) { - args.params.tools = args.params.tools.map((tool: any) => { - // Tools at middleware level have inputSchema, not parameters - if (tool.inputSchema && typeof tool.inputSchema === "object") { - // Transform the inputSchema for provider compatibility - return { - ...tool, - inputSchema: ProviderTransform.schema(model, tool.inputSchema), - } - } - // If no inputSchema, return tool unchanged - return tool - }) - } - return args.params - }, - }, - ], - }), - experimental_telemetry: { - isEnabled: cfg.experimental?.openTelemetry, - metadata: { - userId: cfg.username ?? "unknown", - sessionId: sessionID, - }, - }, + user: lastUser, + agent, + abort, + sessionID, + system: [...(await SystemPrompt.environment()), ...(await SystemPrompt.custom())], + messages: [ + ...MessageV2.toModelMessage(msgs), + ...(isLastStep + ? [ + { + role: "assistant" as const, + content: MAX_STEPS, + }, + ] + : []), + ], + tools, + model, }) if (result === "stop") break continue @@ -675,33 +522,6 @@ export namespace SessionPrompt { return Provider.defaultModel() } - async function resolveSystemPrompt(input: { - system?: string - agent: Agent.Info - model: Provider.Model - isLastStep?: boolean - }) { - let system = SystemPrompt.header(input.model.providerID) - system.push( - ...(() => { - if (input.system) return [input.system] - if (input.agent.prompt) return [input.agent.prompt] - return SystemPrompt.provider(input.model) - })(), - ) - system.push(...(await SystemPrompt.environment())) - system.push(...(await SystemPrompt.custom())) - - if (input.isLastStep) { - system.push(MAX_STEPS) - } - - // max 2 system prompt messages for caching purposes - const [first, ...rest] = system - system = [first, rest.join("\n")] - return system - } - async function resolveTools(input: { agent: Agent.Info model: Provider.Model @@ -709,6 +529,7 @@ export namespace SessionPrompt { tools?: Record processor: SessionProcessor.Info }) { + using _ = log.time("resolveTools") const tools: Record = {} const enabledTools = pipe( input.agent.tools, @@ -778,7 +599,6 @@ export namespace SessionPrompt { }, }) } - for (const [key, item] of Object.entries(await MCP.tools())) { if (Wildcard.all(key, enabledTools) === false) continue const execute = item.execute @@ -857,7 +677,6 @@ export namespace SessionPrompt { created: Date.now(), }, tools: input.tools, - system: input.system, agent: agent.name, model: input.model ?? agent.model ?? (await lastModel(input.sessionID)), } @@ -1148,7 +967,7 @@ export namespace SessionPrompt { synthetic: true, }) } - const wasPlan = input.messages.some((msg) => msg.info.role === "assistant" && msg.info.mode === "plan") + const wasPlan = input.messages.some((msg) => msg.info.role === "assistant" && msg.info.agent === "plan") if (wasPlan && input.agent.name === "build") { userMessage.parts.push({ id: Identifier.ascending("part"), @@ -1216,6 +1035,7 @@ export namespace SessionPrompt { sessionID: input.sessionID, parentID: userMsg.id, mode: input.agent, + agent: input.agent, cost: 0, path: { cwd: Instance.directory, @@ -1510,28 +1330,24 @@ export namespace SessionPrompt { input.history.filter((m) => m.info.role === "user" && !m.parts.every((p) => "synthetic" in p && p.synthetic)) .length === 1 if (!isFirst) return - const cfg = await Config.get() - const small = - (await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID)) - const language = await Provider.getLanguage(small) - const provider = await Provider.getProvider(small.providerID) - const options = pipe( - {}, - mergeDeep(ProviderTransform.options(small, input.session.id, provider?.options)), - mergeDeep(ProviderTransform.smallOptions(small)), - mergeDeep(small.options), - ) - await generateText({ - // use higher # for reasoning models since reasoning tokens eat up a lot of the budget - maxOutputTokens: small.capabilities.reasoning ? 3000 : 20, - providerOptions: ProviderTransform.providerOptions(small, options), + const agent = await Agent.get("title") + if (!agent) return + const result = await LLM.stream({ + agent, + user: input.message.info as MessageV2.User, + system: [], + small: true, + tools: {}, + model: await iife(async () => { + if (agent.model) return await Provider.getModel(agent.model.providerID, agent.model.modelID) + return ( + (await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID)) + ) + }), + abort: new AbortController().signal, + sessionID: input.session.id, + retries: 2, messages: [ - ...SystemPrompt.title(small.providerID).map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), { role: "user", content: "Generate a title for this conversation:\n", @@ -1555,32 +1371,19 @@ export namespace SessionPrompt { }, ]), ], - headers: small.headers, - model: language, - experimental_telemetry: { - isEnabled: cfg.experimental?.openTelemetry, - metadata: { - userId: cfg.username ?? "unknown", - sessionId: input.session.id, - }, - }, }) - .then((result) => { - if (result.text) - return Session.update(input.session.id, (draft) => { - const cleaned = result.text - .replace(/[\s\S]*?<\/think>\s*/g, "") - .split("\n") - .map((line) => line.trim()) - .find((line) => line.length > 0) - if (!cleaned) return - - const title = cleaned.length > 100 ? cleaned.substring(0, 97) + "..." : cleaned - draft.title = title - }) - }) - .catch((error) => { - log.error("failed to generate title", { error, model: small.id }) + const text = await result.text.catch((err) => log.error("failed to generate title", { error: err })) + if (text) + return Session.update(input.session.id, (draft) => { + const cleaned = text + .replace(/[\s\S]*?<\/think>\s*/g, "") + .split("\n") + .map((line) => line.trim()) + .find((line) => line.length > 0) + if (!cleaned) return + + const title = cleaned.length > 100 ? cleaned.substring(0, 97) + "..." : cleaned + draft.title = title }) } } diff --git a/packages/opencode/src/session/summary.ts b/packages/opencode/src/session/summary.ts index 4761c9d2feb..83519307a32 100644 --- a/packages/opencode/src/session/summary.ts +++ b/packages/opencode/src/session/summary.ts @@ -1,20 +1,21 @@ import { Provider } from "@/provider/provider" -import { Config } from "@/config/config" + import { fn } from "@/util/fn" import z from "zod" import { Session } from "." -import { generateText, type ModelMessage } from "ai" + import { MessageV2 } from "./message-v2" import { Identifier } from "@/id/id" import { Snapshot } from "@/snapshot" -import { ProviderTransform } from "@/provider/transform" -import { SystemPrompt } from "./system" + import { Log } from "@/util/log" import path from "path" import { Instance } from "@/project/instance" import { Storage } from "@/storage/storage" import { Bus } from "@/bus" -import { mergeDeep, pipe } from "remeda" + +import { LLM } from "./llm" +import { Agent } from "@/agent/agent" export namespace SessionSummary { const log = Log.create({ service: "session.summary" }) @@ -61,7 +62,6 @@ export namespace SessionSummary { } async function summarizeMessage(input: { messageID: string; messages: MessageV2.WithParts[] }) { - const cfg = await Config.get() const messages = input.messages.filter( (m) => m.info.id === input.messageID || (m.info.role === "assistant" && m.info.parentID === input.messageID), ) @@ -78,27 +78,17 @@ export namespace SessionSummary { const small = (await Provider.getSmallModel(assistantMsg.providerID)) ?? (await Provider.getModel(assistantMsg.providerID, assistantMsg.modelID)) - const language = await Provider.getLanguage(small) - - const options = pipe( - {}, - mergeDeep(ProviderTransform.options(small, assistantMsg.sessionID)), - mergeDeep(ProviderTransform.smallOptions(small)), - mergeDeep(small.options), - ) const textPart = msgWithParts.parts.find((p) => p.type === "text" && !p.synthetic) as MessageV2.TextPart if (textPart && !userMsg.summary?.title) { - const result = await generateText({ - maxOutputTokens: small.capabilities.reasoning ? 1500 : 20, - providerOptions: ProviderTransform.providerOptions(small, options), + const agent = await Agent.get("title") + const stream = await LLM.stream({ + agent, + user: userMsg, + tools: {}, + model: agent.model ? await Provider.getModel(agent.model.providerID, agent.model.modelID) : small, + small: true, messages: [ - ...SystemPrompt.title(small.providerID).map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), { role: "user" as const, content: ` @@ -109,18 +99,14 @@ export namespace SessionSummary { `, }, ], - headers: small.headers, - model: language, - experimental_telemetry: { - isEnabled: cfg.experimental?.openTelemetry, - metadata: { - userId: cfg.username ?? "unknown", - sessionId: assistantMsg.sessionID, - }, - }, + abort: new AbortController().signal, + sessionID: userMsg.sessionID, + system: [], + retries: 3, }) - log.info("title", { title: result.text }) - userMsg.summary.title = result.text + const result = await stream.text + log.info("title", { title: result }) + userMsg.summary.title = result await Session.updateMessage(userMsg) } @@ -138,34 +124,30 @@ export namespace SessionSummary { } } } - const result = await generateText({ - model: language, - maxOutputTokens: 100, - providerOptions: ProviderTransform.providerOptions(small, options), + const summaryAgent = await Agent.get("summary") + const stream = await LLM.stream({ + agent: summaryAgent, + user: userMsg, + tools: {}, + model: summaryAgent.model + ? await Provider.getModel(summaryAgent.model.providerID, summaryAgent.model.modelID) + : small, + small: true, messages: [ - ...SystemPrompt.summarize(small.providerID).map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), ...MessageV2.toModelMessage(messages), { - role: "user", + role: "user" as const, content: `Summarize the above conversation according to your system prompts.`, }, ], - headers: small.headers, - experimental_telemetry: { - isEnabled: cfg.experimental?.openTelemetry, - metadata: { - userId: cfg.username ?? "unknown", - sessionId: assistantMsg.sessionID, - }, - }, - }).catch(() => {}) + abort: new AbortController().signal, + sessionID: userMsg.sessionID, + system: [], + retries: 3, + }) + const result = await stream.text if (result) { - userMsg.summary.body = result.text + userMsg.summary.body = result } } await Session.updateMessage(userMsg) diff --git a/packages/opencode/src/session/system.ts b/packages/opencode/src/session/system.ts index 3146110cf3f..e15185b38b7 100644 --- a/packages/opencode/src/session/system.ts +++ b/packages/opencode/src/session/system.ts @@ -14,8 +14,7 @@ import PROMPT_BEAST from "./prompt/beast.txt" import PROMPT_GEMINI from "./prompt/gemini.txt" import PROMPT_ANTHROPIC_SPOOF from "./prompt/anthropic_spoof.txt" import PROMPT_COMPACTION from "./prompt/compaction.txt" -import PROMPT_SUMMARIZE from "./prompt/summarize.txt" -import PROMPT_TITLE from "./prompt/title.txt" + import PROMPT_CODEX from "./prompt/codex.txt" import type { Provider } from "@/provider/provider" @@ -118,31 +117,4 @@ export namespace SystemPrompt { ) return Promise.all(found).then((result) => result.filter(Boolean)) } - - export function compaction(providerID: string) { - switch (providerID) { - case "anthropic": - return [PROMPT_ANTHROPIC_SPOOF.trim(), PROMPT_COMPACTION] - default: - return [PROMPT_COMPACTION] - } - } - - export function summarize(providerID: string) { - switch (providerID) { - case "anthropic": - return [PROMPT_ANTHROPIC_SPOOF.trim(), PROMPT_SUMMARIZE] - default: - return [PROMPT_SUMMARIZE] - } - } - - export function title(providerID: string) { - switch (providerID) { - case "anthropic": - return [PROMPT_ANTHROPIC_SPOOF.trim(), PROMPT_TITLE] - default: - return [PROMPT_TITLE] - } - } } diff --git a/packages/opencode/src/tool/bash.ts b/packages/opencode/src/tool/bash.ts index 6b84d1bff8a..115d8f8b29d 100644 --- a/packages/opencode/src/tool/bash.ts +++ b/packages/opencode/src/tool/bash.ts @@ -50,7 +50,6 @@ const parser = lazy(async () => { }) // TODO: we may wanna rename this tool so it works better on other shells - export const BashTool = Tool.define("bash", async () => { const shell = Shell.acceptable() log.info("bash tool using shell", { shell }) diff --git a/packages/opencode/src/tool/registry.ts b/packages/opencode/src/tool/registry.ts index 7e440a78aa5..647c7426715 100644 --- a/packages/opencode/src/tool/registry.ts +++ b/packages/opencode/src/tool/registry.ts @@ -21,8 +21,11 @@ import { Plugin } from "../plugin" import { WebSearchTool } from "./websearch" import { CodeSearchTool } from "./codesearch" import { Flag } from "@/flag/flag" +import { Log } from "@/util/log" export namespace ToolRegistry { + const log = Log.create({ service: "tool.registry" }) + export const state = Instance.state(async () => { const custom = [] as Tool.Info[] const glob = new Bun.Glob("tool/*.{js,ts}") @@ -119,10 +122,13 @@ export namespace ToolRegistry { } return true }) - .map(async (t) => ({ - id: t.id, - ...(await t.init()), - })), + .map(async (t) => { + using _ = log.time(t.id) + return { + id: t.id, + ...(await t.init()), + } + }), ) return result } diff --git a/packages/sdk/js/src/v2/gen/sdk.gen.ts b/packages/sdk/js/src/v2/gen/sdk.gen.ts index 90df76c2234..16fe07ae4a8 100644 --- a/packages/sdk/js/src/v2/gen/sdk.gen.ts +++ b/packages/sdk/js/src/v2/gen/sdk.gen.ts @@ -1203,10 +1203,10 @@ export class Session extends HeyApiClient { } agent?: string noReply?: boolean - system?: string tools?: { [key: string]: boolean } + system?: string parts?: Array }, options?: Options, @@ -1222,8 +1222,8 @@ export class Session extends HeyApiClient { { in: "body", key: "model" }, { in: "body", key: "agent" }, { in: "body", key: "noReply" }, - { in: "body", key: "system" }, { in: "body", key: "tools" }, + { in: "body", key: "system" }, { in: "body", key: "parts" }, ], }, @@ -1289,10 +1289,10 @@ export class Session extends HeyApiClient { } agent?: string noReply?: boolean - system?: string tools?: { [key: string]: boolean } + system?: string parts?: Array }, options?: Options, @@ -1308,8 +1308,8 @@ export class Session extends HeyApiClient { { in: "body", key: "model" }, { in: "body", key: "agent" }, { in: "body", key: "noReply" }, - { in: "body", key: "system" }, { in: "body", key: "tools" }, + { in: "body", key: "system" }, { in: "body", key: "parts" }, ], }, diff --git a/packages/sdk/js/src/v2/gen/types.gen.ts b/packages/sdk/js/src/v2/gen/types.gen.ts index 9dc057ba5c2..31d5b85610a 100644 --- a/packages/sdk/js/src/v2/gen/types.gen.ts +++ b/packages/sdk/js/src/v2/gen/types.gen.ts @@ -147,6 +147,7 @@ export type AssistantMessage = { modelID: string providerID: string mode: string + agent: string path: { cwd: string root: string @@ -475,6 +476,40 @@ export type EventPermissionReplied = { } } +export type EventFileEdited = { + type: "file.edited" + properties: { + file: string + } +} + +export type Todo = { + /** + * Brief description of the task + */ + content: string + /** + * Current status of the task: pending, in_progress, completed, cancelled + */ + status: string + /** + * Priority level of the task: high, medium, low + */ + priority: string + /** + * Unique identifier for the todo item + */ + id: string +} + +export type EventTodoUpdated = { + type: "todo.updated" + properties: { + sessionID: string + todos: Array + } +} + export type SessionStatus = | { type: "idle" @@ -511,40 +546,6 @@ export type EventSessionCompacted = { } } -export type EventFileEdited = { - type: "file.edited" - properties: { - file: string - } -} - -export type Todo = { - /** - * Brief description of the task - */ - content: string - /** - * Current status of the task: pending, in_progress, completed, cancelled - */ - status: string - /** - * Priority level of the task: high, medium, low - */ - priority: string - /** - * Unique identifier for the todo item - */ - id: string -} - -export type EventTodoUpdated = { - type: "todo.updated" - properties: { - sessionID: string - todos: Array - } -} - export type EventCommandExecuted = { type: "command.executed" properties: { @@ -745,11 +746,11 @@ export type Event = | EventMessagePartRemoved | EventPermissionUpdated | EventPermissionReplied + | EventFileEdited + | EventTodoUpdated | EventSessionStatus | EventSessionIdle | EventSessionCompacted - | EventFileEdited - | EventTodoUpdated | EventCommandExecuted | EventSessionCreated | EventSessionUpdated @@ -1738,7 +1739,8 @@ export type Agent = { name: string description?: string mode: "subagent" | "primary" | "all" - builtIn: boolean + native?: boolean + hidden?: boolean topP?: number temperature?: number color?: string @@ -2801,10 +2803,10 @@ export type SessionPromptData = { } agent?: string noReply?: boolean - system?: string tools?: { [key: string]: boolean } + system?: string parts: Array } path: { @@ -2896,10 +2898,10 @@ export type SessionPromptAsyncData = { } agent?: string noReply?: boolean - system?: string tools?: { [key: string]: boolean } + system?: string parts: Array } path: { diff --git a/packages/sdk/openapi.json b/packages/sdk/openapi.json index 372b0a63db8..21928684a07 100644 --- a/packages/sdk/openapi.json +++ b/packages/sdk/openapi.json @@ -1997,9 +1997,6 @@ "noReply": { "type": "boolean" }, - "system": { - "type": "string" - }, "tools": { "type": "object", "propertyNames": { @@ -2009,6 +2006,9 @@ "type": "boolean" } }, + "system": { + "type": "string" + }, "parts": { "type": "array", "items": { @@ -2202,9 +2202,6 @@ "noReply": { "type": "boolean" }, - "system": { - "type": "string" - }, "tools": { "type": "object", "propertyNames": { @@ -2214,6 +2211,9 @@ "type": "boolean" } }, + "system": { + "type": "string" + }, "parts": { "type": "array", "items": { @@ -5193,6 +5193,9 @@ "mode": { "type": "string" }, + "agent": { + "type": "string" + }, "path": { "type": "object", "properties": { @@ -5251,6 +5254,7 @@ "modelID", "providerID", "mode", + "agent", "path", "cost", "tokens" @@ -6152,6 +6156,72 @@ }, "required": ["type", "properties"] }, + "Event.file.edited": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "file.edited" + }, + "properties": { + "type": "object", + "properties": { + "file": { + "type": "string" + } + }, + "required": ["file"] + } + }, + "required": ["type", "properties"] + }, + "Todo": { + "type": "object", + "properties": { + "content": { + "description": "Brief description of the task", + "type": "string" + }, + "status": { + "description": "Current status of the task: pending, in_progress, completed, cancelled", + "type": "string" + }, + "priority": { + "description": "Priority level of the task: high, medium, low", + "type": "string" + }, + "id": { + "description": "Unique identifier for the todo item", + "type": "string" + } + }, + "required": ["content", "status", "priority", "id"] + }, + "Event.todo.updated": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "todo.updated" + }, + "properties": { + "type": "object", + "properties": { + "sessionID": { + "type": "string" + }, + "todos": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Todo" + } + } + }, + "required": ["sessionID", "todos"] + } + }, + "required": ["type", "properties"] + }, "SessionStatus": { "anyOf": [ { @@ -6255,72 +6325,6 @@ }, "required": ["type", "properties"] }, - "Event.file.edited": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "file.edited" - }, - "properties": { - "type": "object", - "properties": { - "file": { - "type": "string" - } - }, - "required": ["file"] - } - }, - "required": ["type", "properties"] - }, - "Todo": { - "type": "object", - "properties": { - "content": { - "description": "Brief description of the task", - "type": "string" - }, - "status": { - "description": "Current status of the task: pending, in_progress, completed, cancelled", - "type": "string" - }, - "priority": { - "description": "Priority level of the task: high, medium, low", - "type": "string" - }, - "id": { - "description": "Unique identifier for the todo item", - "type": "string" - } - }, - "required": ["content", "status", "priority", "id"] - }, - "Event.todo.updated": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "todo.updated" - }, - "properties": { - "type": "object", - "properties": { - "sessionID": { - "type": "string" - }, - "todos": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Todo" - } - } - }, - "required": ["sessionID", "todos"] - } - }, - "required": ["type", "properties"] - }, "Event.command.executed": { "type": "object", "properties": { @@ -6887,19 +6891,19 @@ "$ref": "#/components/schemas/Event.permission.replied" }, { - "$ref": "#/components/schemas/Event.session.status" + "$ref": "#/components/schemas/Event.file.edited" }, { - "$ref": "#/components/schemas/Event.session.idle" + "$ref": "#/components/schemas/Event.todo.updated" }, { - "$ref": "#/components/schemas/Event.session.compacted" + "$ref": "#/components/schemas/Event.session.status" }, { - "$ref": "#/components/schemas/Event.file.edited" + "$ref": "#/components/schemas/Event.session.idle" }, { - "$ref": "#/components/schemas/Event.todo.updated" + "$ref": "#/components/schemas/Event.session.compacted" }, { "$ref": "#/components/schemas/Event.command.executed" @@ -8920,7 +8924,10 @@ "type": "string", "enum": ["subagent", "primary", "all"] }, - "builtIn": { + "native": { + "type": "boolean" + }, + "hidden": { "type": "boolean" }, "topP": { @@ -9001,7 +9008,7 @@ "maximum": 9007199254740991 } }, - "required": ["name", "mode", "builtIn", "permission", "tools", "options"] + "required": ["name", "mode", "permission", "tools", "options"] }, "MCPStatusConnected": { "type": "object",