diff --git a/src/api/providers/base-openai-compatible-provider.ts b/src/api/providers/base-openai-compatible-provider.ts index 5adf56b001..5aee7267b3 100644 --- a/src/api/providers/base-openai-compatible-provider.ts +++ b/src/api/providers/base-openai-compatible-provider.ts @@ -90,13 +90,7 @@ export abstract class BaseOpenAiCompatibleProvider model, max_tokens, temperature, - // Enable mergeToolResultText to merge environment_details and other text content - // after tool_results into the last tool message. This prevents reasoning/thinking - // models from dropping reasoning_content when they see a user message after tool results. - messages: [ - { role: "system", content: systemPrompt }, - ...convertToOpenAiMessages(messages, { mergeToolResultText: true }), - ], + messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], stream: true, stream_options: { include_usage: true }, ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), diff --git a/src/api/providers/cerebras.ts b/src/api/providers/cerebras.ts index d0ff747688..99e7c4cc3d 100644 --- a/src/api/providers/cerebras.ts +++ b/src/api/providers/cerebras.ts @@ -106,7 +106,7 @@ export class CerebrasHandler extends BaseProvider implements SingleCompletionHan supportsNativeTools && metadata?.tools && metadata.tools.length > 0 && metadata?.toolProtocol !== "xml" // Convert Anthropic messages to OpenAI format (Cerebras is OpenAI-compatible) - const openaiMessages = convertToOpenAiMessages(messages, { mergeToolResultText: true }) + const openaiMessages = convertToOpenAiMessages(messages) // Prepare request body following Cerebras API specification exactly const requestBody: Record = { diff --git a/src/api/providers/chutes.ts b/src/api/providers/chutes.ts index ac2281190e..78ac7e591f 100644 --- a/src/api/providers/chutes.ts +++ b/src/api/providers/chutes.ts @@ -44,10 +44,7 @@ export class ChutesHandler extends RouterProvider implements SingleCompletionHan const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { model, max_tokens, - messages: [ - { role: "system", content: systemPrompt }, - ...convertToOpenAiMessages(messages, { mergeToolResultText: true }), - ], + messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], stream: true, stream_options: { include_usage: true }, ...(metadata?.tools && { tools: metadata.tools }), diff --git a/src/api/providers/deepinfra.ts b/src/api/providers/deepinfra.ts index a221927c2b..4dfad2689a 100644 --- a/src/api/providers/deepinfra.ts +++ b/src/api/providers/deepinfra.ts @@ -72,10 +72,7 @@ export class DeepInfraHandler extends RouterProvider implements SingleCompletion const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { model: modelId, - messages: [ - { role: "system", content: systemPrompt }, - ...convertToOpenAiMessages(messages, { mergeToolResultText: true }), - ], + messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], stream: true, stream_options: { include_usage: true }, reasoning_effort, diff --git a/src/api/providers/featherless.ts b/src/api/providers/featherless.ts index 64df12bc97..3dcd0821b8 100644 --- a/src/api/providers/featherless.ts +++ b/src/api/providers/featherless.ts @@ -44,10 +44,7 @@ export class FeatherlessHandler extends BaseOpenAiCompatibleProvider