Skip to content

Commit 097781c

Browse files
committed
Revert "feat: enable mergeToolResultText for all OpenAI-compatible providers (#10299)"
This reverts commit ded6486.
1 parent e851b93 commit 097781c

File tree

15 files changed

+21
-41
lines changed

15 files changed

+21
-41
lines changed

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -90,13 +90,7 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
9090
model,
9191
max_tokens,
9292
temperature,
93-
// Enable mergeToolResultText to merge environment_details and other text content
94-
// after tool_results into the last tool message. This prevents reasoning/thinking
95-
// models from dropping reasoning_content when they see a user message after tool results.
96-
messages: [
97-
{ role: "system", content: systemPrompt },
98-
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
99-
],
93+
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
10094
stream: true,
10195
stream_options: { include_usage: true },
10296
...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),

src/api/providers/cerebras.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ export class CerebrasHandler extends BaseProvider implements SingleCompletionHan
106106
supportsNativeTools && metadata?.tools && metadata.tools.length > 0 && metadata?.toolProtocol !== "xml"
107107

108108
// Convert Anthropic messages to OpenAI format (Cerebras is OpenAI-compatible)
109-
const openaiMessages = convertToOpenAiMessages(messages, { mergeToolResultText: true })
109+
const openaiMessages = convertToOpenAiMessages(messages)
110110

111111
// Prepare request body following Cerebras API specification exactly
112112
const requestBody: Record<string, any> = {

src/api/providers/chutes.ts

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,7 @@ export class ChutesHandler extends RouterProvider implements SingleCompletionHan
4444
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
4545
model,
4646
max_tokens,
47-
messages: [
48-
{ role: "system", content: systemPrompt },
49-
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
50-
],
47+
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
5148
stream: true,
5249
stream_options: { include_usage: true },
5350
...(metadata?.tools && { tools: metadata.tools }),

src/api/providers/deepinfra.ts

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,7 @@ export class DeepInfraHandler extends RouterProvider implements SingleCompletion
7272

7373
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
7474
model: modelId,
75-
messages: [
76-
{ role: "system", content: systemPrompt },
77-
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
78-
],
75+
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
7976
stream: true,
8077
stream_options: { include_usage: true },
8178
reasoning_effort,

src/api/providers/featherless.ts

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,7 @@ export class FeatherlessHandler extends BaseOpenAiCompatibleProvider<Featherless
4444
model,
4545
max_tokens,
4646
temperature,
47-
messages: [
48-
{ role: "system", content: systemPrompt },
49-
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
50-
],
47+
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
5148
stream: true,
5249
stream_options: { include_usage: true },
5350
}

src/api/providers/huggingface.ts

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,7 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
5656
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
5757
model: modelId,
5858
temperature,
59-
messages: [
60-
{ role: "system", content: systemPrompt },
61-
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
62-
],
59+
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
6360
stream: true,
6461
stream_options: { include_usage: true },
6562
}

src/api/providers/lite-llm.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ export class LiteLLMHandler extends RouterProvider implements SingleCompletionHa
4545
): ApiStream {
4646
const { id: modelId, info } = await this.fetchModel()
4747

48-
const openAiMessages = convertToOpenAiMessages(messages, { mergeToolResultText: true })
48+
const openAiMessages = convertToOpenAiMessages(messages)
4949

5050
// Prepare messages with cache control if enabled and supported
5151
let systemMessage: OpenAI.Chat.ChatCompletionMessageParam

src/api/providers/lm-studio.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
4444
): ApiStream {
4545
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
4646
{ role: "system", content: systemPrompt },
47-
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
47+
...convertToOpenAiMessages(messages),
4848
]
4949

5050
// LM Studio always supports native tools (https://lmstudio.ai/docs/developer/core/tools)

src/api/providers/openai.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
127127
}
128128
}
129129

130-
convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText: true })]
130+
convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)]
131131

132132
if (modelInfo.supportsPromptCache) {
133133
// Note: the following logic is copied from openrouter:
@@ -235,7 +235,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
235235
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
236236
: enabledLegacyFormat
237237
? [systemMessage, ...convertToSimpleMessages(messages)]
238-
: [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText: true })],
238+
: [systemMessage, ...convertToOpenAiMessages(messages)],
239239
...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
240240
...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }),
241241
...(metadata?.toolProtocol === "native" && {
@@ -356,7 +356,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
356356
role: "developer",
357357
content: `Formatting re-enabled\n${systemPrompt}`,
358358
},
359-
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
359+
...convertToOpenAiMessages(messages),
360360
],
361361
stream: true,
362362
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
@@ -393,7 +393,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
393393
role: "developer",
394394
content: `Formatting re-enabled\n${systemPrompt}`,
395395
},
396-
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
396+
...convertToOpenAiMessages(messages),
397397
],
398398
reasoning_effort: modelInfo.reasoningEffort as "low" | "medium" | "high" | undefined,
399399
temperature: undefined,

src/api/providers/openrouter.ts

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -229,15 +229,13 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
229229

230230
// Convert Anthropic messages to OpenAI format.
231231
// Pass normalization function for Mistral compatibility (requires 9-char alphanumeric IDs)
232-
// Enable mergeToolResultText to merge environment_details after tool_results into the last
233-
// tool message, preventing reasoning/thinking models from dropping reasoning_content.
234232
const isMistral = modelId.toLowerCase().includes("mistral")
235233
let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
236234
{ role: "system", content: systemPrompt },
237-
...convertToOpenAiMessages(messages, {
238-
mergeToolResultText: true,
239-
...(isMistral && { normalizeToolCallId: normalizeMistralToolCallId }),
240-
}),
235+
...convertToOpenAiMessages(
236+
messages,
237+
isMistral ? { normalizeToolCallId: normalizeMistralToolCallId } : undefined,
238+
),
241239
]
242240

243241
// DeepSeek highly recommends using user instead of system role.

0 commit comments

Comments
 (0)