diff --git a/messages/zh-CN/settings.json b/messages/zh-CN/settings.json index 0d73e2715..1724e3a73 100644 --- a/messages/zh-CN/settings.json +++ b/messages/zh-CN/settings.json @@ -694,7 +694,7 @@ "summary": "验证供应商与模型连通性", "desc": "测试供应商模型是否可用,默认与路由配置中选择的供应商类型保持一致。", "testLabel": "供应商模型测试", - "notice": "注意:测试将向供应商发送真实请求(非流式),可能消耗少量额度。请确认供应商 URL、API 密钥及模型配置正确。" + "notice": "注意:测试将向供应商发送真实请求,可能消耗少量额度。请确认供应商 URL、API 密钥及模型配置正确。" }, "codexStrategy": { "title": "Codex Instructions 策略", diff --git a/src/actions/providers.ts b/src/actions/providers.ts index 7b63cfdaa..f88a3cb04 100644 --- a/src/actions/providers.ts +++ b/src/actions/providers.ts @@ -33,9 +33,13 @@ import { GeminiAuth } from "@/app/v1/_lib/gemini/auth"; // API 测试配置常量 const API_TEST_CONFIG = { TIMEOUT_MS: 10000, // 10 秒超时 - MAX_RESPONSE_PREVIEW_LENGTH: 100, // 响应内容预览最大长度 + MAX_RESPONSE_PREVIEW_LENGTH: 500, // 响应内容预览最大长度(增加到 500 字符以显示更多内容) TEST_MAX_TOKENS: 100, // 测试请求的最大 token 数 TEST_PROMPT: "Hello", // 测试请求的默认提示词 + // 流式响应资源限制(防止 DoS 攻击) + MAX_STREAM_CHUNKS: 1000, // 最大数据块数量 + MAX_STREAM_BUFFER_SIZE: 10 * 1024 * 1024, // 10MB 最大缓冲区大小 + MAX_STREAM_ITERATIONS: 10000, // 最大迭代次数(防止无限循环) } as const; // 获取服务商数据 @@ -829,6 +833,10 @@ type ProviderApiTestResult = ActionResult< model?: string; usage?: Record; content?: string; + streamInfo?: { + chunksReceived: number; + format: "sse" | "ndjson"; + }; }; } | { @@ -979,6 +987,405 @@ function clipText(value: unknown, maxLength?: number): string | undefined { return typeof value === "string" ? value.substring(0, limit) : undefined; } +/** + * 流式响应解析结果 + */ +type StreamParseResult = { + data: ProviderApiResponse; + chunksReceived: number; + format: "sse" | "ndjson"; +}; + +/** + * 解析 SSE 文本格式的流式响应 + */ +function parseSSEText(text: string): StreamParseResult { + // 验证输入大小(防止 DoS) + if (text.length > API_TEST_CONFIG.MAX_STREAM_BUFFER_SIZE) { + throw new Error(`SSE 文本超过最大大小 (${API_TEST_CONFIG.MAX_STREAM_BUFFER_SIZE} 字节)`); + } + + const lines = text.split("\n"); + + // 防止过多行数(防止 DoS) + if (lines.length > API_TEST_CONFIG.MAX_STREAM_ITERATIONS) { + throw new Error(`SSE 超过最大行数 (${API_TEST_CONFIG.MAX_STREAM_ITERATIONS})`); + } + + const chunks: ProviderApiResponse[] = []; + let currentData = ""; + let skippedChunks = 0; + + for (const line of lines) { + const trimmed = line.trim(); + + if (trimmed.startsWith("data:")) { + const dataContent = trimmed.slice(5).trim(); + + // 跳过 [DONE] 标记 + if (dataContent === "[DONE]") { + continue; + } + + if (dataContent) { + currentData = dataContent; + } + } else if (trimmed === "" && currentData) { + // 防止过多数据块(防止 DoS) + if (chunks.length >= API_TEST_CONFIG.MAX_STREAM_CHUNKS) { + logger.warn("SSE 解析达到最大数据块限制", { + maxChunks: API_TEST_CONFIG.MAX_STREAM_CHUNKS, + skipped: skippedChunks, + }); + break; + } + + // 空行表示一个完整的 SSE 事件结束 + try { + const parsed = JSON.parse(currentData) as ProviderApiResponse; + chunks.push(parsed); + currentData = ""; + } catch (parseError) { + // 记录解析失败的 chunk(用于调试) + skippedChunks++; + logger.warn("SSE chunk 解析失败", { + chunkPreview: clipText(currentData, 100), + error: parseError instanceof Error ? parseError.message : "Unknown", + }); + currentData = ""; + } + } + } + + // 处理最后一个未结束的 data + if (currentData && chunks.length < API_TEST_CONFIG.MAX_STREAM_CHUNKS) { + try { + const parsed = JSON.parse(currentData) as ProviderApiResponse; + chunks.push(parsed); + } catch (parseError) { + skippedChunks++; + logger.warn("SSE 最后一个 chunk 解析失败", { + chunkPreview: clipText(currentData, 100), + error: parseError instanceof Error ? parseError.message : "Unknown", + }); + } + } + + if (chunks.length === 0) { + throw new Error( + `未能从 SSE 响应中解析出有效数据${skippedChunks > 0 ? `(跳过 ${skippedChunks} 个无效 chunk)` : ""}` + ); + } + + logger.info("SSE 文本解析完成", { + totalChunks: chunks.length, + skippedChunks, + textLength: text.length, + }); + + // 合并所有 chunks 为完整响应 + const mergedResponse = mergeStreamChunks(chunks); + + return { + data: mergedResponse, + chunksReceived: chunks.length, + format: "sse", + }; +} + +/** + * 解析流式响应(从 Response 对象读取) + */ +async function parseStreamResponse(response: Response): Promise { + if (!response.body) { + throw new Error("响应体为空"); + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + const chunks: ProviderApiResponse[] = []; + + let buffer = ""; + let currentData = ""; + let skippedChunks = 0; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + + // 保留最后一行(可能不完整) + buffer = lines.pop() || ""; + + for (const line of lines) { + const trimmed = line.trim(); + + if (trimmed.startsWith("data:")) { + const dataContent = trimmed.slice(5).trim(); + + // 跳过 [DONE] 标记 + if (dataContent === "[DONE]") { + continue; + } + + if (dataContent) { + currentData = dataContent; + } + } else if (trimmed === "" && currentData) { + // 空行表示一个完整的 SSE 事件结束 + try { + const parsed = JSON.parse(currentData) as ProviderApiResponse; + chunks.push(parsed); + currentData = ""; + } catch (parseError) { + // 记录解析失败的 chunk + skippedChunks++; + logger.warn("流式响应 chunk 解析失败", { + chunkPreview: clipText(currentData, 100), + error: parseError instanceof Error ? parseError.message : "Unknown", + }); + currentData = ""; + } + } + } + } + + // 处理剩余的 buffer + if (buffer.trim()) { + const trimmed = buffer.trim(); + if (trimmed.startsWith("data:")) { + const dataContent = trimmed.slice(5).trim(); + if (dataContent && dataContent !== "[DONE]") { + try { + const parsed = JSON.parse(dataContent) as ProviderApiResponse; + chunks.push(parsed); + } catch (parseError) { + skippedChunks++; + logger.warn("流式响应剩余 buffer 解析失败", { + chunkPreview: clipText(dataContent, 100), + error: parseError instanceof Error ? parseError.message : "Unknown", + }); + } + } + } + } + + // 处理最后一个未结束的 data + if (currentData) { + try { + const parsed = JSON.parse(currentData) as ProviderApiResponse; + chunks.push(parsed); + } catch (parseError) { + skippedChunks++; + logger.warn("流式响应最后一个 chunk 解析失败", { + chunkPreview: clipText(currentData, 100), + error: parseError instanceof Error ? parseError.message : "Unknown", + }); + } + } + } catch (error) { + // 在错误路径中取消 reader,防止资源泄漏 + await reader.cancel(); + throw error; + } finally { + reader.releaseLock(); + } + + if (chunks.length === 0) { + throw new Error( + `未能从流式响应中解析出有效数据${skippedChunks > 0 ? `(跳过 ${skippedChunks} 个无效 chunk)` : ""}` + ); + } + + logger.info("流式响应解析完成", { + totalChunks: chunks.length, + skippedChunks, + }); + + // 合并所有 chunks 为完整响应 + const mergedResponse = mergeStreamChunks(chunks); + + return { + data: mergedResponse, + chunksReceived: chunks.length, + format: "sse", + }; +} + +/** + * 合并流式 chunks 为完整响应 + */ +function mergeStreamChunks(chunks: ProviderApiResponse[]): ProviderApiResponse { + if (chunks.length === 0) { + throw new Error("没有可合并的 chunks"); + } + + // 使用第一个 chunk 作为基础 + const base = { ...chunks[0] }; + + // 合并 usage 信息(取最后一个非空的) + for (let i = chunks.length - 1; i >= 0; i--) { + const chunk = chunks[i]; + // Anthropic/OpenAI Chat/OpenAI Responses + if ("usage" in chunk && chunk.usage) { + if ("usage" in base) { + (base as AnthropicMessagesResponse | OpenAIChatResponse | OpenAIResponsesResponse).usage = + chunk.usage as ( + | AnthropicMessagesResponse + | OpenAIChatResponse + | OpenAIResponsesResponse + )["usage"]; + } + break; + } + // Gemini + if ("usageMetadata" in chunk && chunk.usageMetadata) { + (base as GeminiResponse).usageMetadata = chunk.usageMetadata; + break; + } + } + + // 合并文本内容 + let mergedText = ""; + + for (const chunk of chunks) { + // Anthropic Messages API + if ("content" in chunk && Array.isArray(chunk.content)) { + for (const content of chunk.content) { + if (content.type === "text" && "text" in content) { + mergedText += content.text; + } + } + } + + // OpenAI Chat Completions API (流式响应有 delta 字段) + if ("choices" in chunk && Array.isArray(chunk.choices)) { + const firstChoice = chunk.choices[0]; + // 流式响应使用 delta + if (firstChoice && "delta" in firstChoice) { + const delta = firstChoice.delta as { content?: string }; + if (delta.content) { + mergedText += delta.content; + } + } + // 非流式响应使用 message + else if (firstChoice?.message?.content) { + mergedText += firstChoice.message.content; + } + } + + // OpenAI Responses API + if ("output" in chunk && Array.isArray(chunk.output)) { + const firstOutput = chunk.output[0]; + if (firstOutput?.type === "message" && Array.isArray(firstOutput.content)) { + for (const content of firstOutput.content) { + if (content.type === "output_text" && "text" in content) { + mergedText += content.text; + } + } + } + } + + // Gemini API + if ("candidates" in chunk && Array.isArray(chunk.candidates)) { + const firstCandidate = chunk.candidates[0]; + if (firstCandidate?.content?.parts) { + for (const part of firstCandidate.content.parts) { + if (part.text) { + mergedText += part.text; + } + } + } + } + } + + // 将合并后的文本写回到响应对象 + if (mergedText) { + // Anthropic Messages API + if ("content" in base && Array.isArray(base.content)) { + base.content = [{ type: "text", text: mergedText }]; + } + + // OpenAI Chat Completions API + if ("choices" in base && Array.isArray(base.choices)) { + // 类型守卫:确保 base.choices[0] 存在 + const firstChoice = base.choices[0]; + if (firstChoice) { + base.choices = [ + { + ...firstChoice, + message: { role: "assistant", content: mergedText }, + finish_reason: "stop", + }, + ]; + } else { + // 如果没有 choices,创建一个默认的 + base.choices = [ + { + index: 0, + message: { role: "assistant", content: mergedText }, + finish_reason: "stop", + }, + ]; + } + } + + // OpenAI Responses API + if ("output" in base && Array.isArray(base.output)) { + const firstOutput = base.output[0]; + // 类型守卫:确保这是 OpenAI Responses 格式 + if ( + "id" in base && + typeof base.id === "string" && + "type" in base && + base.type === "response" + ) { + (base as OpenAIResponsesResponse).output = [ + { + type: "message", + id: firstOutput?.id || "msg_" + Date.now(), + status: firstOutput?.status || "completed", + role: "assistant", + content: [{ type: "output_text", text: mergedText }], + }, + ]; + } + } + + // Gemini API + if ("candidates" in base && Array.isArray(base.candidates)) { + const firstCandidate = base.candidates[0]; + // 类型守卫:确保这是 Gemini 格式 + if (firstCandidate && "content" in firstCandidate) { + (base as GeminiResponse).candidates = [ + { + ...firstCandidate, + content: { + parts: [{ text: mergedText }], + }, + finishReason: "STOP", + }, + ]; + } else { + // 如果没有 candidates,创建一个默认的 + (base as GeminiResponse).candidates = [ + { + content: { + parts: [{ text: mergedText }], + }, + finishReason: "STOP", + }, + ]; + } + } + } + + return base; +} + type ProviderUrlValidationError = { message: string; details: { @@ -1181,7 +1588,136 @@ async function executeProviderApiTest( }; } - const result = (await response.json()) as ProviderApiResponse; + // 检查响应是否为流式响应(SSE) + const contentType = response.headers.get("content-type") || ""; + const isStreamResponse = + contentType.includes("text/event-stream") || contentType.includes("application/x-ndjson"); + + if (isStreamResponse) { + // 流式响应:读取并解析流式数据 + logger.info("Provider API test received streaming response", { + providerUrl: normalizedProviderUrl.replace(/:\/\/[^@]*@/, "://***@"), + contentType, + }); + + try { + const streamResult = await parseStreamResponse(response); + const extracted = options.extract(streamResult.data); + + return { + ok: true, + data: { + success: true, + message: `${options.successMessage}(流式响应)`, + details: { + responseTime, + ...extracted, + streamInfo: { + chunksReceived: streamResult.chunksReceived, + format: streamResult.format, + }, + }, + }, + }; + } catch (streamError) { + logger.error("Provider API test stream parsing failed", { + providerUrl: normalizedProviderUrl.replace(/:\/\/[^@]*@/, "://***@"), + error: streamError instanceof Error ? streamError.message : String(streamError), + }); + + return { + ok: true, + data: { + success: false, + message: "流式响应解析失败", + details: { + responseTime, + error: streamError instanceof Error ? streamError.message : "无法解析流式响应数据", + }, + }, + }; + } + } + + // 先读取响应文本,然后尝试解析 JSON + const responseText = await response.text(); + + // 检查是否为 SSE 格式(即使 Content-Type 未正确设置) + // 使用正则表达式进行更健壮的检测 + const ssePattern = /^(event:|data:)|\n\n(event:|data:)/; + const isLikelySSE = ssePattern.test(responseText); + + if (isLikelySSE) { + logger.info("Provider API test received SSE response without proper Content-Type", { + providerUrl: normalizedProviderUrl.replace(/:\/\/[^@]*@/, "://***@"), + contentType, + responsePreview: clipText(responseText, 100), + }); + + try { + const streamResult = parseSSEText(responseText); + const extracted = options.extract(streamResult.data); + + return { + ok: true, + data: { + success: true, + message: `${options.successMessage}(流式响应,Content-Type 未正确设置)`, + details: { + responseTime, + ...extracted, + streamInfo: { + chunksReceived: streamResult.chunksReceived, + format: streamResult.format, + }, + }, + }, + }; + } catch (streamError) { + logger.error("Provider API test SSE text parsing failed", { + providerUrl: normalizedProviderUrl.replace(/:\/\/[^@]*@/, "://***@"), + error: streamError instanceof Error ? streamError.message : String(streamError), + }); + + return { + ok: true, + data: { + success: false, + message: "流式响应解析失败", + details: { + responseTime, + error: streamError instanceof Error ? streamError.message : "无法解析 SSE 格式数据", + }, + }, + }; + } + } + + // 尝试解析 JSON + let result: ProviderApiResponse; + try { + result = JSON.parse(responseText) as ProviderApiResponse; + } catch (jsonError) { + logger.error("Provider API test JSON parse failed", { + providerUrl: normalizedProviderUrl.replace(/:\/\/[^@]*@/, "://***@"), + contentType, + responsePreview: clipText(responseText, 100), + jsonError: jsonError instanceof Error ? jsonError.message : String(jsonError), + }); + + return { + ok: true, + data: { + success: false, + message: "响应格式无效: 无法解析 JSON", + details: { + responseTime, + error: `JSON 解析失败: ${jsonError instanceof Error ? jsonError.message : "未知错误"}`, + }, + }, + }; + } + const extracted = options.extract(result); return { @@ -1291,8 +1827,19 @@ export async function testProviderOpenAIResponses( }), body: (model) => ({ model, - max_output_tokens: API_TEST_CONFIG.TEST_MAX_TOKENS, - input: "讲一个简短的故事", + // 注意:不包含 max_output_tokens,因为某些中转服务不支持此参数 + // input 必须是数组格式,符合 OpenAI Responses API 规范 + input: [ + { + role: "user", + content: [ + { + type: "input_text", + text: API_TEST_CONFIG.TEST_PROMPT, + }, + ], + }, + ], }), successMessage: "OpenAI Responses API 测试成功", extract: (result) => ({ diff --git a/src/app/[locale]/settings/providers/_components/forms/api-test-button.tsx b/src/app/[locale]/settings/providers/_components/forms/api-test-button.tsx index f6fda3d33..cf763183a 100644 --- a/src/app/[locale]/settings/providers/_components/forms/api-test-button.tsx +++ b/src/app/[locale]/settings/providers/_components/forms/api-test-button.tsx @@ -126,6 +126,10 @@ export function ApiTestButton({ usage?: Record | string | number; content?: string; error?: string; + streamInfo?: { + chunksReceived: number; + format: "sse" | "ndjson"; + }; }; } | null>(null); @@ -325,6 +329,8 @@ export function ApiTestButton({ `响应内容: ${testResult.details.content.slice(0, API_TEST_UI_CONFIG.MAX_PREVIEW_LENGTH)}${ testResult.details.content.length > API_TEST_UI_CONFIG.MAX_PREVIEW_LENGTH ? "..." : "" }`, + testResult.details?.streamInfo && + `流式响应: 接收 ${testResult.details.streamInfo.chunksReceived} 个数据块 (${testResult.details.streamInfo.format.toUpperCase()})`, testResult.details?.error && `错误详情: ${testResult.details.error}`, ] .filter(Boolean) @@ -485,6 +491,25 @@ export function ApiTestButton({ )} + {/* 流式响应信息 */} + {testResult.details.streamInfo && ( +
+

流式响应信息

+
+
+
+ 接收到的数据块:{" "} + {testResult.details.streamInfo.chunksReceived} +
+
+ 流式格式:{" "} + {testResult.details.streamInfo.format.toUpperCase()} +
+
+
+
+ )} + {/* 错误详情 */} {testResult.details.error && (
@@ -577,6 +602,19 @@ export function ApiTestButton({
)} + {testResult.details.streamInfo && ( +
+
+ 流式响应: +
+
+
+ 接收 {testResult.details.streamInfo.chunksReceived} 个数据块 ( + {testResult.details.streamInfo.format.toUpperCase()}) +
+
+
+ )} {testResult.details.error && (