Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/core/task-persistence/apiMessages.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ export type ApiMessage = Anthropic.MessageParam & {
type?: "reasoning"
summary?: any[]
encrypted_content?: string
text?: string
}

export async function readApiMessages({
Expand Down
86 changes: 65 additions & 21 deletions src/core/task/Task.ts
Original file line number Diff line number Diff line change
Expand Up @@ -646,19 +646,21 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
return readApiMessages({ taskId: this.taskId, globalStoragePath: this.globalStoragePath })
}

private async addToApiConversationHistory(message: Anthropic.MessageParam) {
private async addToApiConversationHistory(message: Anthropic.MessageParam, reasoning?: string) {
// Capture the encrypted_content / thought signatures from the provider (e.g., OpenAI Responses API, Google GenAI) if present.
// We only persist data reported by the current response body.
const handler = this.api as ApiHandler & {
getResponseId?: () => string | undefined
getEncryptedContent?: () => { encrypted_content: string; id?: string } | undefined
getThoughtSignature?: () => string | undefined
getSummary?: () => any[] | undefined
}

if (message.role === "assistant") {
const responseId = handler.getResponseId?.()
const reasoningData = handler.getEncryptedContent?.()
const thoughtSignature = handler.getThoughtSignature?.()
const reasoningSummary = handler.getSummary?.()

// Start from the original assistant message
const messageWithTs: any = {
Expand All @@ -667,10 +669,26 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
ts: Date.now(),
}

// If we have encrypted_content, embed it as the first content block on the assistant message.
// This keeps reasoning + assistant atomic for context management while still allowing providers
// to receive a separate reasoning item when we build the request.
if (reasoningData?.encrypted_content) {
// Store reasoning: plain text (most providers) or encrypted (OpenAI Native)
if (reasoning) {
const reasoningBlock = {
type: "reasoning",
text: reasoning,
summary: reasoningSummary ?? ([] as any[]),
}

if (typeof messageWithTs.content === "string") {
messageWithTs.content = [
reasoningBlock,
{ type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
]
} else if (Array.isArray(messageWithTs.content)) {
messageWithTs.content = [reasoningBlock, ...messageWithTs.content]
} else if (!messageWithTs.content) {
messageWithTs.content = [reasoningBlock]
}
} else if (reasoningData?.encrypted_content) {
// OpenAI Native encrypted reasoning
const reasoningBlock = {
type: "reasoning",
summary: [] as any[],
Expand Down Expand Up @@ -2661,10 +2679,13 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
}
}

await this.addToApiConversationHistory({
role: "assistant",
content: assistantContent,
})
await this.addToApiConversationHistory(
{
role: "assistant",
content: assistantContent,
},
reasoningMessage || undefined,
)

TelemetryService.instance.captureConversationMessage(this.taskId, "assistant")

Expand Down Expand Up @@ -3339,14 +3360,16 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
const cleanConversationHistory: (Anthropic.Messages.MessageParam | ReasoningItemForRequest)[] = []

for (const msg of messages) {
// Legacy path: standalone reasoning items stored as separate messages
if (msg.type === "reasoning" && msg.encrypted_content) {
cleanConversationHistory.push({
type: "reasoning",
summary: msg.summary,
encrypted_content: msg.encrypted_content!,
...(msg.id ? { id: msg.id } : {}),
})
// Standalone reasoning: send encrypted, skip plain text
if (msg.type === "reasoning") {
if (msg.encrypted_content) {
cleanConversationHistory.push({
type: "reasoning",
summary: msg.summary,
encrypted_content: msg.encrypted_content!,
...(msg.id ? { id: msg.id } : {}),
})
}
continue
}

Expand All @@ -3364,21 +3387,42 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {

const [first, ...rest] = contentArray

const hasEmbeddedReasoning =
// Embedded reasoning: encrypted (send) or plain text (skip)
const hasEncryptedReasoning =
first && (first as any).type === "reasoning" && typeof (first as any).encrypted_content === "string"
const hasPlainTextReasoning =
first && (first as any).type === "reasoning" && typeof (first as any).text === "string"

if (hasEmbeddedReasoning) {
if (hasEncryptedReasoning) {
const reasoningBlock = first as any

// Emit a separate reasoning item for the provider
// Send as separate reasoning item (OpenAI Native)
cleanConversationHistory.push({
type: "reasoning",
summary: reasoningBlock.summary ?? [],
encrypted_content: reasoningBlock.encrypted_content,
...(reasoningBlock.id ? { id: reasoningBlock.id } : {}),
})

// Build assistant message without the embedded reasoning block
// Send assistant message without reasoning
let assistantContent: Anthropic.Messages.MessageParam["content"]

if (rest.length === 0) {
assistantContent = ""
} else if (rest.length === 1 && rest[0].type === "text") {
assistantContent = (rest[0] as Anthropic.Messages.TextBlockParam).text
} else {
assistantContent = rest
}

cleanConversationHistory.push({
role: "assistant",
content: assistantContent,
} satisfies Anthropic.Messages.MessageParam)

continue
} else if (hasPlainTextReasoning) {
// Strip plain text reasoning, send assistant message only
let assistantContent: Anthropic.Messages.MessageParam["content"]

if (rest.length === 0) {
Expand Down
58 changes: 58 additions & 0 deletions src/core/task/__tests__/reasoning-preservation.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -371,4 +371,62 @@ describe("Task reasoning preservation", () => {
text: "Here is my response.",
})
})

it("should store plain text reasoning from streaming for all providers", async () => {
const task = new Task({
provider: mockProvider as ClineProvider,
apiConfiguration: mockApiConfiguration,
task: "Test task",
startTask: false,
})

// Avoid disk writes in this test
;(task as any).saveApiConversationHistory = vi.fn().mockResolvedValue(undefined)

// Mock API handler without getEncryptedContent (like Anthropic, Gemini, etc.)
task.api = {
getModel: vi.fn().mockReturnValue({
id: "test-model",
info: {
contextWindow: 16000,
supportsPromptCache: true,
},
}),
} as any

// Simulate the new path: passing reasoning as a parameter
const reasoningText = "Let me analyze this carefully. First, I'll consider the requirements..."
const assistantText = "Here is my response."

await (task as any).addToApiConversationHistory(
{
role: "assistant",
content: [{ type: "text", text: assistantText }],
},
reasoningText,
)

expect(task.apiConversationHistory).toHaveLength(1)
const stored = task.apiConversationHistory[0] as any

expect(stored.role).toBe("assistant")
expect(Array.isArray(stored.content)).toBe(true)

const [reasoningBlock, textBlock] = stored.content

// Verify reasoning is stored with plain text, not encrypted
expect(reasoningBlock).toMatchObject({
type: "reasoning",
text: reasoningText,
summary: [],
})

// Verify there's no encrypted_content field (that's only for OpenAI Native)
expect(reasoningBlock.encrypted_content).toBeUndefined()

expect(textBlock).toMatchObject({
type: "text",
text: assistantText,
})
})
})
Loading