Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion src/api/providers/base-openai-compatible-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,13 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
model,
max_tokens,
temperature,
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
// Enable mergeToolResultText to merge environment_details and other text content
// after tool_results into the last tool message. This prevents reasoning/thinking
// models from dropping reasoning_content when they see a user message after tool results.
messages: [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
],
stream: true,
stream_options: { include_usage: true },
...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
Expand Down
2 changes: 1 addition & 1 deletion src/api/providers/cerebras.ts
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ export class CerebrasHandler extends BaseProvider implements SingleCompletionHan
supportsNativeTools && metadata?.tools && metadata.tools.length > 0 && metadata?.toolProtocol !== "xml"

// Convert Anthropic messages to OpenAI format (Cerebras is OpenAI-compatible)
const openaiMessages = convertToOpenAiMessages(messages)
const openaiMessages = convertToOpenAiMessages(messages, { mergeToolResultText: true })

// Prepare request body following Cerebras API specification exactly
const requestBody: Record<string, any> = {
Expand Down
5 changes: 4 additions & 1 deletion src/api/providers/chutes.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,10 @@ export class ChutesHandler extends RouterProvider implements SingleCompletionHan
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model,
max_tokens,
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
messages: [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
],
stream: true,
stream_options: { include_usage: true },
...(metadata?.tools && { tools: metadata.tools }),
Expand Down
5 changes: 4 additions & 1 deletion src/api/providers/deepinfra.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,10 @@ export class DeepInfraHandler extends RouterProvider implements SingleCompletion

const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model: modelId,
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
messages: [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
],
stream: true,
stream_options: { include_usage: true },
reasoning_effort,
Expand Down
5 changes: 4 additions & 1 deletion src/api/providers/featherless.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,10 @@ export class FeatherlessHandler extends BaseOpenAiCompatibleProvider<Featherless
model,
max_tokens,
temperature,
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
messages: [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
],
stream: true,
stream_options: { include_usage: true },
}
Expand Down
5 changes: 4 additions & 1 deletion src/api/providers/huggingface.ts
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,10 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model: modelId,
temperature,
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
messages: [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
],
stream: true,
stream_options: { include_usage: true },
}
Expand Down
2 changes: 1 addition & 1 deletion src/api/providers/lite-llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ export class LiteLLMHandler extends RouterProvider implements SingleCompletionHa
): ApiStream {
const { id: modelId, info } = await this.fetchModel()

const openAiMessages = convertToOpenAiMessages(messages)
const openAiMessages = convertToOpenAiMessages(messages, { mergeToolResultText: true })

// Prepare messages with cache control if enabled and supported
let systemMessage: OpenAI.Chat.ChatCompletionMessageParam
Expand Down
2 changes: 1 addition & 1 deletion src/api/providers/lm-studio.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
): ApiStream {
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
]

// LM Studio always supports native tools (https://lmstudio.ai/docs/developer/core/tools)
Expand Down
8 changes: 4 additions & 4 deletions src/api/providers/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
}
}

convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)]
convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText: true })]

if (modelInfo.supportsPromptCache) {
// Note: the following logic is copied from openrouter:
Expand Down Expand Up @@ -234,7 +234,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
: enabledLegacyFormat
? [systemMessage, ...convertToSimpleMessages(messages)]
: [systemMessage, ...convertToOpenAiMessages(messages)],
: [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText: true })],
...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }),
...(metadata?.toolProtocol === "native" && {
Expand Down Expand Up @@ -349,7 +349,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
role: "developer",
content: `Formatting re-enabled\n${systemPrompt}`,
},
...convertToOpenAiMessages(messages),
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
],
stream: true,
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
Expand Down Expand Up @@ -386,7 +386,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
role: "developer",
content: `Formatting re-enabled\n${systemPrompt}`,
},
...convertToOpenAiMessages(messages),
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
],
reasoning_effort: modelInfo.reasoningEffort as "low" | "medium" | "high" | undefined,
temperature: undefined,
Expand Down
10 changes: 6 additions & 4 deletions src/api/providers/openrouter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -229,13 +229,15 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH

// Convert Anthropic messages to OpenAI format.
// Pass normalization function for Mistral compatibility (requires 9-char alphanumeric IDs)
// Enable mergeToolResultText to merge environment_details after tool_results into the last
// tool message, preventing reasoning/thinking models from dropping reasoning_content.
const isMistral = modelId.toLowerCase().includes("mistral")
let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(
messages,
isMistral ? { normalizeToolCallId: normalizeMistralToolCallId } : undefined,
),
...convertToOpenAiMessages(messages, {
mergeToolResultText: true,
...(isMistral && { normalizeToolCallId: normalizeMistralToolCallId }),
}),
]

// DeepSeek highly recommends using user instead of system role.
Expand Down
2 changes: 1 addition & 1 deletion src/api/providers/qwen-code.ts
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ export class QwenCodeHandler extends BaseProvider implements SingleCompletionHan
content: systemPrompt,
}

const convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)]
const convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText: true })]

const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model: model.id,
Expand Down
2 changes: 1 addition & 1 deletion src/api/providers/requesty.ts
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan

const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
]

// Map extended efforts to OpenAI Chat Completions-accepted values (omit unsupported)
Expand Down
8 changes: 7 additions & 1 deletion src/api/providers/roo.ts
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,13 @@ export class RooHandler extends BaseOpenAiCompatibleProvider<string> {
model,
max_tokens,
temperature,
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
// Enable mergeToolResultText to merge environment_details and other text content
// after tool_results into the last tool message. This prevents reasoning/thinking
// models from dropping reasoning_content when they see a user message after tool results.
messages: [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
],
stream: true,
stream_options: { include_usage: true },
...(reasoning && { reasoning }),
Expand Down
2 changes: 1 addition & 1 deletion src/api/providers/unbound.ts
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ export class UnboundHandler extends RouterProvider implements SingleCompletionHa

const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
]

if (info.supportsPromptCache) {
Expand Down
2 changes: 1 addition & 1 deletion src/api/providers/vercel-ai-gateway.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ export class VercelAiGatewayHandler extends RouterProvider implements SingleComp

const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
]

if (VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS.has(modelId) && info.supportsPromptCache) {
Expand Down
2 changes: 1 addition & 1 deletion src/api/providers/xai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
temperature: this.options.modelTemperature ?? XAI_DEFAULT_TEMPERATURE,
messages: [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
] as OpenAI.Chat.ChatCompletionMessageParam[],
stream: true as const,
stream_options: { include_usage: true },
Expand Down
177 changes: 177 additions & 0 deletions src/api/transform/__tests__/openai-format.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -224,4 +224,181 @@ describe("convertToOpenAiMessages", () => {
const assistantMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionAssistantMessageParam
expect(assistantMessage.tool_calls![0].id).toBe("custom_toolu_123")
})

describe("mergeToolResultText option", () => {
it("should merge text content into last tool message when mergeToolResultText is true", () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: "user",
content: [
{
type: "tool_result",
tool_use_id: "tool-123",
content: "Tool result content",
},
{
type: "text",
text: "<environment_details>\nSome context\n</environment_details>",
},
],
},
]

const openAiMessages = convertToOpenAiMessages(anthropicMessages, { mergeToolResultText: true })

// Should produce only one tool message with merged content
expect(openAiMessages).toHaveLength(1)
const toolMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam
expect(toolMessage.role).toBe("tool")
expect(toolMessage.tool_call_id).toBe("tool-123")
expect(toolMessage.content).toBe(
"Tool result content\n\n<environment_details>\nSome context\n</environment_details>",
)
})

it("should merge text into last tool message when multiple tool results exist", () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: "user",
content: [
{
type: "tool_result",
tool_use_id: "call_1",
content: "First result",
},
{
type: "tool_result",
tool_use_id: "call_2",
content: "Second result",
},
{
type: "text",
text: "<environment_details>Context</environment_details>",
},
],
},
]

const openAiMessages = convertToOpenAiMessages(anthropicMessages, { mergeToolResultText: true })

// Should produce two tool messages, with text merged into the last one
expect(openAiMessages).toHaveLength(2)
expect((openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam).content).toBe("First result")
expect((openAiMessages[1] as OpenAI.Chat.ChatCompletionToolMessageParam).content).toBe(
"Second result\n\n<environment_details>Context</environment_details>",
)
})

it("should NOT merge text when images are present (fall back to user message)", () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: "user",
content: [
{
type: "tool_result",
tool_use_id: "tool-123",
content: "Tool result content",
},
{
type: "image",
source: {
type: "base64",
media_type: "image/png",
data: "base64data",
},
},
],
},
]

const openAiMessages = convertToOpenAiMessages(anthropicMessages, { mergeToolResultText: true })

// Should produce a tool message AND a user message (because image is present)
expect(openAiMessages).toHaveLength(2)
expect((openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam).role).toBe("tool")
expect(openAiMessages[1].role).toBe("user")
})

it("should create separate user message when mergeToolResultText is false", () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: "user",
content: [
{
type: "tool_result",
tool_use_id: "tool-123",
content: "Tool result content",
},
{
type: "text",
text: "<environment_details>\nSome context\n</environment_details>",
},
],
},
]

const openAiMessages = convertToOpenAiMessages(anthropicMessages, { mergeToolResultText: false })

// Should produce a tool message AND a separate user message (default behavior)
expect(openAiMessages).toHaveLength(2)
expect((openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam).role).toBe("tool")
expect((openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam).content).toBe(
"Tool result content",
)
expect(openAiMessages[1].role).toBe("user")
})

it("should work with normalizeToolCallId when mergeToolResultText is true", () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: "user",
content: [
{
type: "tool_result",
tool_use_id: "call_5019f900a247472bacde0b82",
content: "Tool result content",
},
{
type: "text",
text: "<environment_details>Context</environment_details>",
},
],
},
]

const openAiMessages = convertToOpenAiMessages(anthropicMessages, {
mergeToolResultText: true,
normalizeToolCallId: normalizeMistralToolCallId,
})

// Should merge AND normalize the ID
expect(openAiMessages).toHaveLength(1)
const toolMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam
expect(toolMessage.role).toBe("tool")
expect(toolMessage.tool_call_id).toBe(normalizeMistralToolCallId("call_5019f900a247472bacde0b82"))
expect(toolMessage.content).toBe(
"Tool result content\n\n<environment_details>Context</environment_details>",
)
})

it("should handle user messages with only text content (no tool results)", () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: "user",
content: [
{
type: "text",
text: "Hello, how are you?",
},
],
},
]

const openAiMessages = convertToOpenAiMessages(anthropicMessages, { mergeToolResultText: true })

// Should produce a normal user message
expect(openAiMessages).toHaveLength(1)
expect(openAiMessages[0].role).toBe("user")
})
})
})
Loading
Loading