diff --git a/messages/en/customs.json b/messages/en/customs.json index c23d942b1..6ffc5aa0e 100644 --- a/messages/en/customs.json +++ b/messages/en/customs.json @@ -42,11 +42,10 @@ "loading": "Loading...", "unknownUser": "unknown", "status": { - "running": "RUNNING", - "init": "INIT", - "idle": "IDLE", - "error": "ERROR", - "done": "DONE" + "inProgressTooltip": "Session has active requests being processed", + "initializingTooltip": "Session is initializing with its first request", + "idleTooltip": "Session is idle with no active requests", + "errorTooltip": "Session encountered an error during processing" } } } diff --git a/messages/en/dashboard.json b/messages/en/dashboard.json index 2cf6b7930..573c8bccf 100644 --- a/messages/en/dashboard.json +++ b/messages/en/dashboard.json @@ -934,6 +934,17 @@ "title": "Provider Availability Monitor", "description": "Real-time monitoring of provider availability and performance metrics", "nav": "Availability Monitor", + "tabs": { + "provider": "Provider Availability", + "endpoint": "Endpoint Health" + }, + "overview": { + "systemAvailability": "System Availability", + "avgLatency": "Avg Latency", + "errorRate": "Error Rate", + "activeProbes": "Active Probes", + "load": "Load" + }, "status": { "green": "Healthy", "red": "Unhealthy", @@ -957,6 +968,11 @@ }, "timeRange": { "label": "Time Range", + "15min": "15 min", + "1h": "1 hour", + "6h": "6 hours", + "24h": "24 hours", + "7d": "7 days", "last15min": "Last 15 minutes", "last1h": "Last 1 hour", "last6h": "Last 6 hours", @@ -1007,7 +1023,13 @@ "autoRefresh": "Auto Refresh", "stopAutoRefresh": "Stop Auto Refresh", "viewDetails": "View Details", - "testProvider": "Test Provider" + "testProvider": "Test Provider", + "retry": "Retry", + "probeNow": "Probe Now", + "probing": "Probing...", + "probeAll": "Probe All", + "probeSuccess": "Probe successful", + "probeFailed": "Probe failed" }, "states": { "loading": "Loading...", @@ -1059,6 +1081,62 @@ "probeSuccess": "Probe successful", "probeFailed": "Probe failed" }, + "laneChart": { + "title": "Provider Availability Timeline", + "noData": "No data available", + "requests": "{count} requests", + "availability": "{value}% available", + "noRequests": "No requests", + "denseData": "Dense", + "sparseData": "Sparse", + "latency": "Latency" + }, + "latencyChart": { + "title": "Latency Distribution", + "p50": "P50", + "p95": "P95", + "p99": "P99", + "noData": "No latency data available" + }, + "latencyCurve": { + "title": "Latency Trend", + "noData": "No latency data available", + "avg": "Avg", + "min": "Min", + "max": "Max", + "latency": "Latency" + }, + "terminal": { + "title": "Probe Logs", + "live": "LIVE", + "download": "Download logs", + "noLogs": "No probe logs available", + "manual": "MANUAL", + "auto": "AUTO", + "filterPlaceholder": "Filter logs..." + }, + "probeGrid": { + "title": "Endpoint Status", + "noEndpoints": "No endpoints configured", + "lastProbe": "Last probe", + "status": { + "unknown": "Unknown", + "healthy": "Healthy", + "unhealthy": "Unhealthy" + } + }, + "endpoint": { + "selectVendor": "Select vendor", + "selectType": "Select type" + }, + "confidence": { + "low": "Low", + "medium": "Medium", + "high": "High", + "lowTooltip": "Less than {count} requests. Data may not be representative.", + "mediumTooltip": "Moderate request volume. Data is reasonably reliable.", + "highTooltip": "High request volume. Data is reliable." + }, "toast": { "refreshSuccess": "Availability data refreshed", "refreshFailed": "Refresh failed, please retry" diff --git a/messages/en/settings/index.ts b/messages/en/settings/index.ts index db8bf1a03..47a6b5424 100644 --- a/messages/en/settings/index.ts +++ b/messages/en/settings/index.ts @@ -13,6 +13,7 @@ import sensitiveWords from "./sensitiveWords.json"; import strings from "./strings.json"; import providersAutoSort from "./providers/autoSort.json"; +import providersBatchEdit from "./providers/batchEdit.json"; import providersFilter from "./providers/filter.json"; import providersGuide from "./providers/guide.json"; import providersInlineEdit from "./providers/inlineEdit.json"; @@ -74,6 +75,7 @@ const providersForm = { const providers = { ...providersStrings, autoSort: providersAutoSort, + batchEdit: providersBatchEdit, filter: providersFilter, form: providersForm, guide: providersGuide, diff --git a/messages/en/settings/providers/form/sections.json b/messages/en/settings/providers/form/sections.json index 62dfad529..8a57f1e5e 100644 --- a/messages/en/settings/providers/form/sections.json +++ b/messages/en/settings/providers/form/sections.json @@ -312,21 +312,21 @@ "disableHint": "Set to 0 to disable the timeout (for canary rollback scenarios only, not recommended)", "nonStreamingTotal": { "core": "true", - "desc": "Non-streaming request total timeout, range 60-1200 seconds, default 600 seconds (10 minutes)", + "desc": "Non-streaming request total timeout, range 60-1200 seconds, enter 0 to disable (default: no limit)", "label": "Non-streaming Total Timeout (seconds)", - "placeholder": "600" + "placeholder": "0" }, "streamingFirstByte": { "core": "true", - "desc": "Streaming request first byte timeout, range 1-120 seconds, default 30 seconds", + "desc": "Streaming request first byte timeout, range 1-180 seconds, enter 0 to disable (default: no limit)", "label": "Streaming First Byte Timeout (seconds)", - "placeholder": "30" + "placeholder": "0" }, "streamingIdle": { "core": "true", - "desc": "Streaming request idle timeout, range 60-600 seconds, enter 0 to disable (prevent mid-stream stalling)", + "desc": "Streaming request idle timeout, range 60-600 seconds, enter 0 to disable (default: no limit)", "label": "Streaming Idle Timeout (seconds)", - "placeholder": "60" + "placeholder": "0" }, "summary": "First byte: {streaming}s | Stream interval: {idle}s | Non-streaming: {nonStreaming}s", "title": "Timeout Configuration" diff --git a/messages/en/settings/providers/form/success.json b/messages/en/settings/providers/form/success.json index d877e6686..f00d85750 100644 --- a/messages/en/settings/providers/form/success.json +++ b/messages/en/settings/providers/form/success.json @@ -1,4 +1,6 @@ { "created": "Provider added successfully", - "createdDesc": "Provider \"{name}\" has been added" + "createdDesc": "Provider \"{name}\" has been added", + "updated": "Provider updated successfully", + "updatedDesc": "Provider \"{name}\" has been updated" } diff --git a/messages/ja/customs.json b/messages/ja/customs.json index 72a1a396d..e68e1a122 100644 --- a/messages/ja/customs.json +++ b/messages/ja/customs.json @@ -42,11 +42,10 @@ "loading": "読み込み中...", "unknownUser": "不明", "status": { - "running": "実行中", - "init": "初期化", - "idle": "アイドル", - "error": "エラー", - "done": "完了" + "inProgressTooltip": "セッションはアクティブなリクエストを処理中です", + "initializingTooltip": "セッションは最初のリクエストで初期化中です", + "idleTooltip": "セッションはアイドル状態で、アクティブなリクエストはありません", + "errorTooltip": "セッション処理中にエラーが発生しました" } } } diff --git a/messages/ja/dashboard.json b/messages/ja/dashboard.json index 52c6e2a09..d52587a28 100644 --- a/messages/ja/dashboard.json +++ b/messages/ja/dashboard.json @@ -920,6 +920,82 @@ "title": "プロバイダー可用性モニター", "description": "プロバイダーの可用性とパフォーマンス指標をリアルタイムで監視", "nav": "可用性モニター", + "tabs": { + "provider": "プロバイダー可用性", + "endpoint": "エンドポイント健全性" + }, + "overview": { + "systemAvailability": "システム可用性", + "avgLatency": "平均遅延", + "errorRate": "エラー率", + "activeProbes": "アクティブプローブ", + "load": "負荷" + }, + "timeRange": { + "15min": "15分", + "1h": "1時間", + "6h": "6時間", + "24h": "24時間", + "7d": "7日" + }, + "laneChart": { + "title": "プロバイダー可用性タイムライン", + "noData": "データがありません", + "requests": "{count} リクエスト", + "availability": "{value}% 可用", + "noRequests": "リクエストなし" + }, + "latencyChart": { + "title": "遅延分布", + "p50": "P50", + "p95": "P95", + "p99": "P99", + "noData": "遅延データがありません" + }, + "latencyCurve": { + "title": "遅延トレンド", + "noData": "遅延データがありません", + "avg": "平均", + "min": "最小", + "max": "最大", + "latency": "遅延" + }, + "terminal": { + "title": "プローブログ", + "live": "LIVE", + "download": "ログをダウンロード", + "noLogs": "プローブログがありません", + "manual": "手動", + "auto": "自動", + "filterPlaceholder": "ログをフィルター..." + }, + "probeGrid": { + "noEndpoints": "エンドポイントが設定されていません", + "lastProbe": "最終プローブ", + "status": { + "unknown": "不明", + "healthy": "正常", + "unhealthy": "異常" + } + }, + "endpoint": { + "selectVendor": "ベンダーを選択", + "selectType": "タイプを選択" + }, + "confidence": { + "low": "低", + "medium": "中", + "high": "高", + "lowTooltip": "{count} 件未満のリクエスト。データが代表的でない可能性があります。", + "mediumTooltip": "中程度のリクエスト量。データは比較的信頼できます。", + "highTooltip": "高いリクエスト量。データは信頼できます。" + }, + "actions": { + "probeNow": "今すぐプローブ", + "probing": "プローブ中...", + "probeSuccess": "プローブ成功", + "probeFailed": "プローブ失敗" + }, "status": { "green": "正常(OK)", "red": "異常", @@ -943,6 +1019,11 @@ }, "timeRange": { "label": "時間範囲", + "15min": "15分", + "1h": "1時間", + "6h": "6時間", + "24h": "24時間", + "7d": "7日間", "last15min": "過去15分", "last1h": "過去1時間", "last6h": "過去6時間", @@ -993,7 +1074,13 @@ "autoRefresh": "自動更新", "stopAutoRefresh": "自動更新を停止", "viewDetails": "詳細を表示", - "testProvider": "プロバイダーをテスト" + "testProvider": "プロバイダーをテスト", + "retry": "再試行", + "probeNow": "今すぐプローブ", + "probing": "プローブ中...", + "probeAll": "すべてプローブ", + "probeSuccess": "プローブ成功", + "probeFailed": "プローブ失敗" }, "states": { "loading": "読み込み中...", @@ -1045,6 +1132,62 @@ "probeSuccess": "プローブ成功", "probeFailed": "プローブ失敗" }, + "laneChart": { + "title": "プロバイダー可用性タイムライン", + "noData": "データなし", + "requests": "{count} リクエスト", + "availability": "可用性 {value}%", + "noRequests": "リクエストなし", + "denseData": "高密度", + "sparseData": "低密度", + "latency": "レイテンシ" + }, + "latencyChart": { + "title": "レイテンシ分布", + "p50": "P50", + "p95": "P95", + "p99": "P99", + "noData": "レイテンシデータなし" + }, + "latencyCurve": { + "title": "レイテンシトレンド", + "noData": "レイテンシデータなし", + "avg": "平均", + "min": "最小", + "max": "最大", + "latency": "レイテンシ" + }, + "terminal": { + "title": "プローブログ", + "live": "ライブ", + "download": "ログをダウンロード", + "noLogs": "プローブログなし", + "manual": "手動", + "auto": "自動", + "filterPlaceholder": "ログをフィルター..." + }, + "probeGrid": { + "title": "エンドポイントステータス", + "noEndpoints": "エンドポイント未設定", + "lastProbe": "最終プローブ", + "status": { + "unknown": "不明", + "healthy": "正常", + "unhealthy": "異常" + } + }, + "endpoint": { + "selectVendor": "ベンダーを選択", + "selectType": "タイプを選択" + }, + "confidence": { + "low": "低", + "medium": "中", + "high": "高", + "lowTooltip": "リクエスト数が {count} 未満です。データが代表的でない可能性があります。", + "mediumTooltip": "リクエスト量は適度です。データは比較的信頼できます。", + "highTooltip": "リクエスト量が十分です。データは信頼できます。" + }, "toast": { "refreshSuccess": "可用性データを更新しました", "refreshFailed": "更新に失敗しました。再試行してください" diff --git a/messages/ja/settings/index.ts b/messages/ja/settings/index.ts index db8bf1a03..47a6b5424 100644 --- a/messages/ja/settings/index.ts +++ b/messages/ja/settings/index.ts @@ -13,6 +13,7 @@ import sensitiveWords from "./sensitiveWords.json"; import strings from "./strings.json"; import providersAutoSort from "./providers/autoSort.json"; +import providersBatchEdit from "./providers/batchEdit.json"; import providersFilter from "./providers/filter.json"; import providersGuide from "./providers/guide.json"; import providersInlineEdit from "./providers/inlineEdit.json"; @@ -74,6 +75,7 @@ const providersForm = { const providers = { ...providersStrings, autoSort: providersAutoSort, + batchEdit: providersBatchEdit, filter: providersFilter, form: providersForm, guide: providersGuide, diff --git a/messages/ja/settings/providers/form/sections.json b/messages/ja/settings/providers/form/sections.json index 84c69e2bf..83323980c 100644 --- a/messages/ja/settings/providers/form/sections.json +++ b/messages/ja/settings/providers/form/sections.json @@ -313,21 +313,21 @@ "disableHint": "0に設定するとタイムアウトを無効にします(カナリアロールバックシナリオのみ、非推奨)", "nonStreamingTotal": { "core": "true", - "desc": "非ストリーミングリクエストの総タイムアウト、範囲60~1200秒、デフォルト600秒(10分)", + "desc": "非ストリーミングリクエストの総タイムアウト、範囲60~1200秒、0で無効化(デフォルト: 無制限)", "label": "非ストリーミング総タイムアウト(秒)", - "placeholder": "600" + "placeholder": "0" }, "streamingFirstByte": { "core": "true", - "desc": "ストリーミングリクエストの初バイトタイムアウト、範囲1~120秒、デフォルト30秒", + "desc": "ストリーミングリクエストの初バイトタイムアウト、範囲1~180秒、0で無効化(デフォルト: 無制限)", "label": "ストリーミング初バイトタイムアウト(秒)", - "placeholder": "30" + "placeholder": "0" }, "streamingIdle": { "core": "true", - "desc": "ストリーミングリクエストのアイドルタイムアウト、範囲60~600秒、0で無効化(途中停止防止)", + "desc": "ストリーミングリクエストのアイドルタイムアウト、範囲60~600秒、0で無効化(デフォルト: 無制限)", "label": "ストリーミングアイドルタイムアウト(秒)", - "placeholder": "60" + "placeholder": "0" }, "summary": "初回バイト: {streaming}s | ストリーム間隔: {idle}s | 非ストリーミング: {nonStreaming}s", "title": "タイムアウト設定" diff --git a/messages/ja/settings/providers/form/success.json b/messages/ja/settings/providers/form/success.json index 0fa668b07..52fd7be53 100644 --- a/messages/ja/settings/providers/form/success.json +++ b/messages/ja/settings/providers/form/success.json @@ -1,4 +1,6 @@ { "created": "プロバイダーを追加しました", - "createdDesc": "「{name}」を追加しました" + "createdDesc": "「{name}」を追加しました", + "updated": "プロバイダーを更新しました", + "updatedDesc": "「{name}」を更新しました" } diff --git a/messages/ru/customs.json b/messages/ru/customs.json index aa7fad617..52f236d90 100644 --- a/messages/ru/customs.json +++ b/messages/ru/customs.json @@ -42,11 +42,10 @@ "loading": "Загрузка...", "unknownUser": "неизвестно", "status": { - "running": "РАБОТАЕТ", - "init": "ИНИЦИАЛИЗАЦИЯ", - "idle": "ОЖИДАНИЕ", - "error": "ОШИБКА", - "done": "ЗАВЕРШЕНО" + "inProgressTooltip": "Сеанс обрабатывает активные запросы", + "initializingTooltip": "Сеанс инициализируется с первым запросом", + "idleTooltip": "Сеанс простаивает, нет активных запросов", + "errorTooltip": "Во время обработки сеанса произошла ошибка" } } } diff --git a/messages/ru/dashboard.json b/messages/ru/dashboard.json index 9a93d4299..f673bec36 100644 --- a/messages/ru/dashboard.json +++ b/messages/ru/dashboard.json @@ -922,6 +922,95 @@ "title": "Мониторинг доступности провайдеров", "description": "Мониторинг доступности и показателей производительности провайдеров в реальном времени", "nav": "Мониторинг доступности", + "tabs": { + "provider": "Доступность провайдеров", + "endpoint": "Здоровье эндпоинтов" + }, + "overview": { + "systemAvailability": "Доступность системы", + "avgLatency": "Средняя задержка", + "errorRate": "Коэффициент ошибок", + "activeProbes": "Активные проверки", + "load": "Нагрузка" + }, + "timeRange": { + "label": "Временной диапазон", + "15min": "15 мин", + "1h": "1 час", + "6h": "6 часов", + "24h": "24 часа", + "7d": "7 дней", + "last15min": "Последние 15 минут", + "last1h": "Последний час", + "last6h": "Последние 6 часов", + "last24h": "Последние 24 часа", + "last7d": "7д", + "custom": "Настраиваемый" + }, + "laneChart": { + "title": "Хронология доступности провайдеров", + "noData": "Нет данных", + "requests": "{count} запросов", + "availability": "{value}% доступно", + "noRequests": "Нет запросов", + "denseData": "Плотные", + "sparseData": "Разреженные", + "latency": "Задержка" + }, + "latencyChart": { + "title": "Распределение задержки", + "p50": "P50", + "p95": "P95", + "p99": "P99", + "noData": "Нет данных о задержке" + }, + "latencyCurve": { + "title": "Тренд задержки", + "noData": "Нет данных о задержке", + "avg": "Средн.", + "min": "Мин.", + "max": "Макс.", + "latency": "Задержка" + }, + "terminal": { + "title": "Журнал проверок", + "live": "LIVE", + "download": "Скачать журнал", + "noLogs": "Нет журналов проверок", + "manual": "Ручная", + "auto": "Авто", + "filterPlaceholder": "Фильтр журналов..." + }, + "probeGrid": { + "title": "Статус эндпоинтов", + "noEndpoints": "Эндпоинты не настроены", + "lastProbe": "Последняя проверка", + "status": { + "unknown": "Неизвестно", + "healthy": "Здоров", + "unhealthy": "Нездоров" + } + }, + "endpoint": { + "selectVendor": "Выберите вендора", + "selectType": "Выберите тип" + }, + "confidence": { + "low": "Низкая", + "medium": "Средняя", + "high": "Высокая", + "lowTooltip": "Менее {count} запросов. Данные могут быть нерепрезентативными.", + "mediumTooltip": "Умеренный объём запросов. Данные достаточно надёжны.", + "highTooltip": "Высокий объём запросов. Данные надёжны." + }, + "actions": { + "retry": "Повторить", + "probeNow": "Проверить сейчас", + "probing": "Проверка...", + "probeAll": "Проверить все", + "probeSuccess": "Проверка успешна", + "probeFailed": "Проверка не удалась" + }, "status": { "green": "Здоров", "red": "Недоступен", @@ -943,15 +1032,6 @@ "lastRequest": "Последний запрос", "requestCount": "Кол-во запросов" }, - "timeRange": { - "label": "Временной диапазон", - "last15min": "Последние 15 минут", - "last1h": "Последний час", - "last6h": "Последние 6 часов", - "last24h": "Последние 24 часа", - "last7d": "7д", - "custom": "Настраиваемый" - }, "filters": { "provider": "Провайдер", "allProviders": "Все провайдеры", diff --git a/messages/ru/settings/index.ts b/messages/ru/settings/index.ts index db8bf1a03..47a6b5424 100644 --- a/messages/ru/settings/index.ts +++ b/messages/ru/settings/index.ts @@ -13,6 +13,7 @@ import sensitiveWords from "./sensitiveWords.json"; import strings from "./strings.json"; import providersAutoSort from "./providers/autoSort.json"; +import providersBatchEdit from "./providers/batchEdit.json"; import providersFilter from "./providers/filter.json"; import providersGuide from "./providers/guide.json"; import providersInlineEdit from "./providers/inlineEdit.json"; @@ -74,6 +75,7 @@ const providersForm = { const providers = { ...providersStrings, autoSort: providersAutoSort, + batchEdit: providersBatchEdit, filter: providersFilter, form: providersForm, guide: providersGuide, diff --git a/messages/ru/settings/providers/form/sections.json b/messages/ru/settings/providers/form/sections.json index afea91597..561fa7802 100644 --- a/messages/ru/settings/providers/form/sections.json +++ b/messages/ru/settings/providers/form/sections.json @@ -313,21 +313,21 @@ "disableHint": "Установите 0 для отключения тайм-аута (только для сценариев отката канарейки, не рекомендуется)", "nonStreamingTotal": { "core": "true", - "desc": "Полный тайм-аут непотоковой передачи, диапазон 60-1200 секунд, значение по умолчанию 600 секунд (10 минут)", + "desc": "Полный тайм-аут непотоковой передачи, диапазон 60-1200 секунд, 0 для отключения (по умолчанию: без ограничений)", "label": "Полный тайм-аут непотоковой передачи (секунды)", - "placeholder": "600" + "placeholder": "0" }, "streamingFirstByte": { "core": "true", - "desc": "Тайм-аут первого байта потоковой передачи, диапазон 1-120 секунд, значение по умолчанию 30 секунд", + "desc": "Тайм-аут первого байта потоковой передачи, диапазон 1-180 секунд, 0 для отключения (по умолчанию: без ограничений)", "label": "Тайм-аут первого байта потока (секунды)", - "placeholder": "30" + "placeholder": "0" }, "streamingIdle": { "core": "true", - "desc": "Тайм-аут простоя потоковой передачи, диапазон 60-600 секунд, введите 0 для отключения (предотвращение застревания)", + "desc": "Тайм-аут простоя потоковой передачи, диапазон 60-600 секунд, 0 для отключения (по умолчанию: без ограничений)", "label": "Тайм-аут простоя потока (секунды)", - "placeholder": "60" + "placeholder": "0" }, "summary": "1 байт: {streaming}с | поток: {idle}с | не поток: {nonStreaming}с", "title": "Конфигурация тайм-аута" diff --git a/messages/ru/settings/providers/form/success.json b/messages/ru/settings/providers/form/success.json index 8a6de6453..e009eba99 100644 --- a/messages/ru/settings/providers/form/success.json +++ b/messages/ru/settings/providers/form/success.json @@ -1,4 +1,6 @@ { "created": "Провайдер успешно добавлен", - "createdDesc": "Провайдер «{name}» добавлен" + "createdDesc": "Провайдер «{name}» добавлен", + "updated": "Провайдер успешно обновлен", + "updatedDesc": "Провайдер «{name}» обновлен" } diff --git a/messages/zh-CN/customs.json b/messages/zh-CN/customs.json index f035820bf..0a42416d0 100644 --- a/messages/zh-CN/customs.json +++ b/messages/zh-CN/customs.json @@ -42,11 +42,10 @@ "loading": "加载中...", "unknownUser": "未知", "status": { - "running": "运行中", - "init": "初始化", - "idle": "空闲", - "error": "错误", - "done": "完成" + "inProgressTooltip": "会话正在处理活跃请求", + "initializingTooltip": "会话正在初始化首个请求", + "idleTooltip": "会话空闲,无活跃请求", + "errorTooltip": "会话处理过程中发生错误" } } } diff --git a/messages/zh-CN/dashboard.json b/messages/zh-CN/dashboard.json index 9b04c2c95..495839c05 100644 --- a/messages/zh-CN/dashboard.json +++ b/messages/zh-CN/dashboard.json @@ -1039,6 +1039,17 @@ "title": "供应商可用性监控", "description": "实时监控供应商的可用性状态和性能指标", "nav": "可用性监控", + "tabs": { + "provider": "供应商可用性", + "endpoint": "端点健康" + }, + "overview": { + "systemAvailability": "系统可用性", + "avgLatency": "平均延迟", + "errorRate": "错误率", + "activeProbes": "活跃探测", + "load": "负载" + }, "status": { "green": "正常", "red": "异常", @@ -1062,6 +1073,11 @@ }, "timeRange": { "label": "时间范围", + "15min": "15 分钟", + "1h": "1 小时", + "6h": "6 小时", + "24h": "24 小时", + "7d": "7 天", "last15min": "最近 15 分钟", "last1h": "最近 1 小时", "last6h": "最近 6 小时", @@ -1112,7 +1128,13 @@ "autoRefresh": "自动刷新", "stopAutoRefresh": "停止自动刷新", "viewDetails": "查看详情", - "testProvider": "测试供应商" + "testProvider": "测试供应商", + "retry": "重试", + "probeNow": "立即探测", + "probing": "探测中...", + "probeAll": "探测全部", + "probeSuccess": "探测成功", + "probeFailed": "探测失败" }, "states": { "loading": "加载中...", @@ -1164,6 +1186,62 @@ "probeSuccess": "探测成功", "probeFailed": "探测失败" }, + "laneChart": { + "title": "供应商可用性时间线", + "noData": "暂无数据", + "requests": "{count} 个请求", + "availability": "可用性 {value}%", + "noRequests": "无请求", + "denseData": "密集", + "sparseData": "稀疏", + "latency": "延迟" + }, + "latencyChart": { + "title": "延迟分布", + "p50": "P50", + "p95": "P95", + "p99": "P99", + "noData": "暂无延迟数据" + }, + "latencyCurve": { + "title": "延迟趋势", + "noData": "暂无延迟数据", + "avg": "平均", + "min": "最小", + "max": "最大", + "latency": "延迟" + }, + "terminal": { + "title": "探测日志", + "live": "实时", + "download": "下载日志", + "noLogs": "暂无探测日志", + "manual": "手动", + "auto": "自动", + "filterPlaceholder": "筛选日志..." + }, + "probeGrid": { + "title": "端点状态", + "noEndpoints": "未配置端点", + "lastProbe": "最后探测", + "status": { + "unknown": "未知", + "healthy": "健康", + "unhealthy": "异常" + } + }, + "endpoint": { + "selectVendor": "选择供应商", + "selectType": "选择类型" + }, + "confidence": { + "low": "低", + "medium": "中", + "high": "高", + "lowTooltip": "请求数少于 {count},数据可能不具代表性。", + "mediumTooltip": "请求量适中,数据较为可靠。", + "highTooltip": "请求量充足,数据可靠。" + }, "toast": { "refreshSuccess": "可用性数据已刷新", "refreshFailed": "刷新失败,请重试" diff --git a/messages/zh-CN/settings/index.ts b/messages/zh-CN/settings/index.ts index db8bf1a03..47a6b5424 100644 --- a/messages/zh-CN/settings/index.ts +++ b/messages/zh-CN/settings/index.ts @@ -13,6 +13,7 @@ import sensitiveWords from "./sensitiveWords.json"; import strings from "./strings.json"; import providersAutoSort from "./providers/autoSort.json"; +import providersBatchEdit from "./providers/batchEdit.json"; import providersFilter from "./providers/filter.json"; import providersGuide from "./providers/guide.json"; import providersInlineEdit from "./providers/inlineEdit.json"; @@ -74,6 +75,7 @@ const providersForm = { const providers = { ...providersStrings, autoSort: providersAutoSort, + batchEdit: providersBatchEdit, filter: providersFilter, form: providersForm, guide: providersGuide, diff --git a/messages/zh-CN/settings/providers/form/sections.json b/messages/zh-CN/settings/providers/form/sections.json index 831e8463a..36e89df83 100644 --- a/messages/zh-CN/settings/providers/form/sections.json +++ b/messages/zh-CN/settings/providers/form/sections.json @@ -246,20 +246,20 @@ "desc": "配置请求超时时间,0 表示禁用超时", "streamingFirstByte": { "label": "流式首字节超时(秒)", - "placeholder": "30", - "desc": "流式请求首字节超时,范围 1-120 秒,默认 30 秒", + "placeholder": "0", + "desc": "流式请求首字节超时,范围 1-180 秒,填 0 禁用(默认不限制)", "core": "true" }, "streamingIdle": { "label": "流式静默期超时(秒)", - "placeholder": "60", - "desc": "流式请求静默期超时,范围 60-600 秒,填 0 禁用(防止中途卡住)", + "placeholder": "0", + "desc": "流式请求静默期超时,范围 60-600 秒,填 0 禁用(默认不限制)", "core": "true" }, "nonStreamingTotal": { "label": "非流式总超时(秒)", - "placeholder": "600", - "desc": "非流式请求总超时,范围 60-1200 秒,默认 600 秒(10 分钟)", + "placeholder": "0", + "desc": "非流式请求总超时,范围 60-1200 秒,填 0 禁用(默认不限制)", "core": "true" }, "disableHint": "设为 0 表示禁用该超时(仅用于灰度回退场景,不推荐)" diff --git a/messages/zh-CN/settings/providers/form/success.json b/messages/zh-CN/settings/providers/form/success.json index 7f79b9556..a8ccefdbe 100644 --- a/messages/zh-CN/settings/providers/form/success.json +++ b/messages/zh-CN/settings/providers/form/success.json @@ -1,4 +1,6 @@ { "created": "添加服务商成功", - "createdDesc": "服务商 \"{name}\" 已添加" + "createdDesc": "服务商 \"{name}\" 已添加", + "updated": "更新服务商成功", + "updatedDesc": "服务商 \"{name}\" 已更新" } diff --git a/messages/zh-TW/customs.json b/messages/zh-TW/customs.json index e0e468de7..81822fdff 100644 --- a/messages/zh-TW/customs.json +++ b/messages/zh-TW/customs.json @@ -42,11 +42,10 @@ "loading": "載入中...", "unknownUser": "未知", "status": { - "running": "執行中", - "init": "初始化", - "idle": "閒置", - "error": "錯誤", - "done": "完成" + "inProgressTooltip": "會話正在處理活躍請求", + "initializingTooltip": "會話正在初始化首個請求", + "idleTooltip": "會話閒置,無活躍請求", + "errorTooltip": "會話處理過程中發生錯誤" } } } diff --git a/messages/zh-TW/dashboard.json b/messages/zh-TW/dashboard.json index 10ae12463..016e37293 100644 --- a/messages/zh-TW/dashboard.json +++ b/messages/zh-TW/dashboard.json @@ -920,6 +920,93 @@ "title": "供應商可用性監控", "description": "即時監控供應商的可用性狀態和效能指標", "nav": "可用性監控", + "tabs": { + "provider": "供應商可用性", + "endpoint": "端點健康" + }, + "overview": { + "systemAvailability": "系統可用性", + "avgLatency": "平均延遲", + "errorRate": "錯誤率", + "activeProbes": "活躍探測", + "load": "負載" + }, + "timeRange": { + "label": "時間範圍", + "15min": "15 分鐘", + "1h": "1 小時", + "6h": "6 小時", + "24h": "24 小時", + "7d": "7 天", + "last15min": "最近 15 分鐘", + "last1h": "最近 1 小時", + "last6h": "最近 6 小時", + "last24h": "最近 24 小時", + "last7d": "近 7 天", + "custom": "自訂" + }, + "laneChart": { + "title": "供應商可用性時間軸", + "noData": "暫無資料", + "requests": "{count} 個請求", + "availability": "可用性 {value}%", + "noRequests": "無請求", + "denseData": "密集", + "sparseData": "稀疏", + "latency": "延遲" + }, + "latencyChart": { + "title": "延遲分佈", + "p50": "P50", + "p95": "P95", + "p99": "P99", + "noData": "暫無延遲資料" + }, + "latencyCurve": { + "title": "延遲趨勢", + "noData": "暫無延遲資料", + "avg": "平均", + "min": "最小", + "max": "最大", + "latency": "延遲" + }, + "terminal": { + "title": "探測日誌", + "live": "即時", + "download": "下載日誌", + "noLogs": "暫無探測日誌", + "manual": "手動", + "auto": "自動", + "filterPlaceholder": "篩選日誌..." + }, + "probeGrid": { + "title": "端點狀態", + "noEndpoints": "未設定端點", + "lastProbe": "最後探測", + "status": { + "unknown": "未知", + "healthy": "健康", + "unhealthy": "異常" + } + }, + "endpoint": { + "selectVendor": "選擇供應商", + "selectType": "選擇類型" + }, + "confidence": { + "low": "低", + "medium": "中", + "high": "高", + "lowTooltip": "請求數少於 {count},資料可能不具代表性。", + "mediumTooltip": "請求量適中,資料較為可靠。", + "highTooltip": "請求量充足,資料可靠。" + }, + "actions": { + "probeNow": "立即探測", + "probing": "探測中...", + "probeSuccess": "探測成功", + "probeFailed": "探測失敗" + }, "status": { "green": "良好", "red": "異常", @@ -941,15 +1028,6 @@ "lastRequest": "最後請求", "requestCount": "請求數" }, - "timeRange": { - "label": "時間範圍", - "last15min": "最近 15 分鐘", - "last1h": "最近 1 小時", - "last6h": "最近 6 小時", - "last24h": "最近 24 小時", - "last7d": "近 7 天", - "custom": "自訂" - }, "filters": { "provider": "供應商", "allProviders": "全部供應商", @@ -993,7 +1071,13 @@ "autoRefresh": "自動重新整理", "stopAutoRefresh": "停止自動重新整理", "viewDetails": "檢視詳情", - "testProvider": "測試供應商" + "testProvider": "測試供應商", + "retry": "重試", + "probeNow": "立即探測", + "probing": "探測中...", + "probeAll": "探測全部", + "probeSuccess": "探測成功", + "probeFailed": "探測失敗" }, "states": { "loading": "載入中...", diff --git a/messages/zh-TW/settings/index.ts b/messages/zh-TW/settings/index.ts index db8bf1a03..47a6b5424 100644 --- a/messages/zh-TW/settings/index.ts +++ b/messages/zh-TW/settings/index.ts @@ -13,6 +13,7 @@ import sensitiveWords from "./sensitiveWords.json"; import strings from "./strings.json"; import providersAutoSort from "./providers/autoSort.json"; +import providersBatchEdit from "./providers/batchEdit.json"; import providersFilter from "./providers/filter.json"; import providersGuide from "./providers/guide.json"; import providersInlineEdit from "./providers/inlineEdit.json"; @@ -74,6 +75,7 @@ const providersForm = { const providers = { ...providersStrings, autoSort: providersAutoSort, + batchEdit: providersBatchEdit, filter: providersFilter, form: providersForm, guide: providersGuide, diff --git a/messages/zh-TW/settings/providers/form/sections.json b/messages/zh-TW/settings/providers/form/sections.json index e5cb7ba6a..779945de1 100644 --- a/messages/zh-TW/settings/providers/form/sections.json +++ b/messages/zh-TW/settings/providers/form/sections.json @@ -313,21 +313,21 @@ "disableHint": "設為 0 表示禁用該超時(僅用於灰度回退場景,不推薦)", "nonStreamingTotal": { "core": "true", - "desc": "非串流請求總超時,範圍 60-1200 秒,預設 600 秒(10 分鐘)", + "desc": "非串流請求總超時,範圍 60-1200 秒,填 0 禁用(預設不限制)", "label": "非串流總超時(秒)", - "placeholder": "600" + "placeholder": "0" }, "streamingFirstByte": { "core": "true", - "desc": "串流請求首字節超時,範圍 1-120 秒,預設 30 秒", + "desc": "串流請求首字節超時,範圍 1-180 秒,填 0 禁用(預設不限制)", "label": "串流首字節超時(秒)", - "placeholder": "30" + "placeholder": "0" }, "streamingIdle": { "core": "true", - "desc": "串流請求靜默期超時,範圍 60-600 秒,填 0 禁用(防止中途卡住)", + "desc": "串流請求靜默期超時,範圍 60-600 秒,填 0 禁用(預設不限制)", "label": "串流靜默期超時(秒)", - "placeholder": "60" + "placeholder": "0" }, "summary": "首字: {streaming}s | 串流間隔: {idle}s | 非串流: {nonStreaming}s", "title": "超時配置" diff --git a/messages/zh-TW/settings/providers/form/success.json b/messages/zh-TW/settings/providers/form/success.json index f6bb1c35a..597eeaec8 100644 --- a/messages/zh-TW/settings/providers/form/success.json +++ b/messages/zh-TW/settings/providers/form/success.json @@ -1,4 +1,6 @@ { "created": "新增供應商成功", - "createdDesc": "供應商「{name}」已新增" + "createdDesc": "供應商「{name}」已新增", + "updated": "更新供應商成功", + "updatedDesc": "供應商「{name}」已更新" } diff --git a/src/actions/active-sessions.ts b/src/actions/active-sessions.ts index baeee2e32..2d5fb4cf3 100644 --- a/src/actions/active-sessions.ts +++ b/src/actions/active-sessions.ts @@ -43,33 +43,42 @@ export async function getActiveSessions(): Promise s.userId === currentUserId); + // 获取并发计数(即使缓存命中也需要实时获取) + const { SessionTracker } = await import("@/lib/session-tracker"); + const cachedSessionIds = filteredData.map((s) => s.sessionId); + const concurrentCounts = await SessionTracker.getConcurrentCountBatch(cachedSessionIds); + return { ok: true, - data: filteredData.map((s) => ({ - sessionId: s.sessionId, - userName: s.userName, - userId: s.userId, - keyId: s.keyId, - keyName: s.keyName, - providerId: s.providers[0]?.id || null, - providerName: s.providers.map((p) => p.name).join(", ") || null, - model: s.models.join(", ") || null, - apiType: (s.apiType as "chat" | "codex") || "chat", - startTime: s.firstRequestAt ? new Date(s.firstRequestAt).getTime() : Date.now(), - inputTokens: s.totalInputTokens, - outputTokens: s.totalOutputTokens, - cacheCreationInputTokens: s.totalCacheCreationTokens, - cacheReadInputTokens: s.totalCacheReadTokens, - totalTokens: - s.totalInputTokens + - s.totalOutputTokens + - s.totalCacheCreationTokens + - s.totalCacheReadTokens, - costUsd: s.totalCostUsd, - status: "completed", - durationMs: s.totalDurationMs, - requestCount: s.requestCount, - })), + data: filteredData.map((s) => { + const concurrentCount = concurrentCounts.get(s.sessionId) ?? 0; + return { + sessionId: s.sessionId, + userName: s.userName, + userId: s.userId, + keyId: s.keyId, + keyName: s.keyName, + providerId: s.providers[0]?.id || null, + providerName: s.providers.map((p) => p.name).join(", ") || null, + model: s.models.join(", ") || null, + apiType: (s.apiType as "chat" | "codex") || "chat", + startTime: s.firstRequestAt ? new Date(s.firstRequestAt).getTime() : Date.now(), + inputTokens: s.totalInputTokens, + outputTokens: s.totalOutputTokens, + cacheCreationInputTokens: s.totalCacheCreationTokens, + cacheReadInputTokens: s.totalCacheReadTokens, + totalTokens: + s.totalInputTokens + + s.totalOutputTokens + + s.totalCacheCreationTokens + + s.totalCacheReadTokens, + costUsd: s.totalCostUsd, + status: concurrentCount > 0 ? "in_progress" : "completed", + durationMs: s.totalDurationMs, + requestCount: s.requestCount, + concurrentCount, + }; + }), }; } @@ -85,6 +94,10 @@ export async function getActiveSessions(): Promise s.sessionId); + const concurrentCounts = await SessionTracker.getConcurrentCountBatch(allSessionIds); + // 4. 写入缓存 setActiveSessionsCache(sessionsData); @@ -94,31 +107,35 @@ export async function getActiveSessions(): Promise s.userId === currentUserId); // 6. 转换格式 - const sessions: ActiveSessionInfo[] = filteredSessions.map((s) => ({ - sessionId: s.sessionId, - userName: s.userName, - userId: s.userId, - keyId: s.keyId, - keyName: s.keyName, - providerId: s.providers[0]?.id || null, - providerName: s.providers.map((p) => p.name).join(", ") || null, - model: s.models.join(", ") || null, - apiType: (s.apiType as "chat" | "codex") || "chat", - startTime: s.firstRequestAt ? new Date(s.firstRequestAt).getTime() : Date.now(), - inputTokens: s.totalInputTokens, - outputTokens: s.totalOutputTokens, - cacheCreationInputTokens: s.totalCacheCreationTokens, - cacheReadInputTokens: s.totalCacheReadTokens, - totalTokens: - s.totalInputTokens + - s.totalOutputTokens + - s.totalCacheCreationTokens + - s.totalCacheReadTokens, - costUsd: s.totalCostUsd, - status: "completed", - durationMs: s.totalDurationMs, - requestCount: s.requestCount, - })); + const sessions: ActiveSessionInfo[] = filteredSessions.map((s) => { + const concurrentCount = concurrentCounts.get(s.sessionId) ?? 0; + return { + sessionId: s.sessionId, + userName: s.userName, + userId: s.userId, + keyId: s.keyId, + keyName: s.keyName, + providerId: s.providers[0]?.id || null, + providerName: s.providers.map((p) => p.name).join(", ") || null, + model: s.models.join(", ") || null, + apiType: (s.apiType as "chat" | "codex") || "chat", + startTime: s.firstRequestAt ? new Date(s.firstRequestAt).getTime() : Date.now(), + inputTokens: s.totalInputTokens, + outputTokens: s.totalOutputTokens, + cacheCreationInputTokens: s.totalCacheCreationTokens, + cacheReadInputTokens: s.totalCacheReadTokens, + totalTokens: + s.totalInputTokens + + s.totalOutputTokens + + s.totalCacheCreationTokens + + s.totalCacheReadTokens, + costUsd: s.totalCostUsd, + status: concurrentCount > 0 ? "in_progress" : "completed", + durationMs: s.totalDurationMs, + requestCount: s.requestCount, + concurrentCount, + }; + }); logger.debug( `[SessionCache] Active sessions fetched and cached, count: ${sessions.length} (filtered for user: ${currentUserId})` diff --git a/src/app/[locale]/dashboard/_components/bento/dashboard-bento.tsx b/src/app/[locale]/dashboard/_components/bento/dashboard-bento.tsx index af074889f..c5a347cc6 100644 --- a/src/app/[locale]/dashboard/_components/bento/dashboard-bento.tsx +++ b/src/app/[locale]/dashboard/_components/bento/dashboard-bento.tsx @@ -258,11 +258,7 @@ export function DashboardBento({ {/* Live Sessions Panel - Right sidebar, spans 2 rows */} {isAdmin && ( - + )} {/* Leaderboard Cards - Below chart, 3 columns */} diff --git a/src/app/[locale]/dashboard/_components/bento/live-sessions-panel.tsx b/src/app/[locale]/dashboard/_components/bento/live-sessions-panel.tsx index 344470b4e..9c505d294 100644 --- a/src/app/[locale]/dashboard/_components/bento/live-sessions-panel.tsx +++ b/src/app/[locale]/dashboard/_components/bento/live-sessions-panel.tsx @@ -1,89 +1,62 @@ "use client"; -import { Activity, AlertCircle, CheckCircle2, Circle, XCircle } from "lucide-react"; +import { Activity, AlertCircle, Circle, XCircle } from "lucide-react"; import { useRouter } from "next/navigation"; import { useTranslations } from "next-intl"; import { useCallback, useEffect, useRef, useState } from "react"; +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip"; +import { + getSessionDisplayStatus, + SESSION_DISPLAY_STATUS, + type SessionStatusInfo, +} from "@/lib/session-status"; import { cn } from "@/lib/utils"; import type { ActiveSessionInfo } from "@/types/session"; import { BentoCard } from "./bento-grid"; interface LiveSessionsPanelProps { - sessions: (ActiveSessionInfo & { lastActivityAt?: number })[]; + sessions: ActiveSessionInfo[]; isLoading?: boolean; maxItems?: number; className?: string; } -type SessionStatus = "running" | "idle" | "error" | "done" | "init"; - -function getSessionStatus(session: ActiveSessionInfo & { lastActivityAt?: number }): SessionStatus { - // Determine status based on session activity and startTime - const now = Date.now(); - const lastActivity = session.lastActivityAt ?? session.startTime; - const idleThreshold = 60 * 1000; // 1 minute - - if (session.status === "error" || (session as { status?: string }).status === "error") { - return "error"; - } - - if (now - lastActivity < 5000) { - return "running"; - } - - if (now - lastActivity < idleThreshold) { - return "init"; - } - - return "idle"; -} - -const statusConfig: Record< - SessionStatus, - { icon: typeof Circle; color: string; pulse?: boolean; labelKey: string } -> = { - running: { - icon: Circle, - color: "text-emerald-500 dark:text-emerald-400", - pulse: true, - labelKey: "status.running", - }, - init: { - icon: Circle, - color: "text-amber-500 dark:text-amber-400", - pulse: true, - labelKey: "status.init", - }, - idle: { - icon: Circle, - color: "text-muted-foreground/50", - pulse: false, - labelKey: "status.idle", - }, - error: { - icon: XCircle, - color: "text-rose-500 dark:text-rose-400", - pulse: true, - labelKey: "status.error", - }, - done: { - icon: CheckCircle2, - color: "text-muted-foreground/50", - pulse: false, - labelKey: "status.done", - }, -}; - function SessionItem({ session }: { session: ActiveSessionInfo }) { const router = useRouter(); const t = useTranslations("customs.activeSessions"); - const status = getSessionStatus(session); - const config = statusConfig[status]; - const StatusIcon = config.icon; + const statusInfo = getSessionDisplayStatus({ + concurrentCount: session.concurrentCount, + requestCount: session.requestCount, + status: session.status, + }); const shortId = session.sessionId.slice(-6); const userName = session.userName || t("unknownUser"); + // Determine ping animation color based on status + const getPingColor = (info: SessionStatusInfo) => { + if (info.status === SESSION_DISPLAY_STATUS.IN_PROGRESS) { + return info.label === "ERROR" ? "bg-rose-500" : "bg-emerald-500"; + } + if (info.status === SESSION_DISPLAY_STATUS.INITIALIZING) { + return "bg-amber-500"; + } + return ""; + }; + + // Determine user name color based on status + const getUserNameColor = (info: SessionStatusInfo) => { + if (info.status === SESSION_DISPLAY_STATUS.IN_PROGRESS) { + return info.label === "ERROR" + ? "text-rose-500 dark:text-rose-400" + : "text-blue-500 dark:text-blue-400"; + } + if (info.status === SESSION_DISPLAY_STATUS.INITIALIZING) { + return "text-amber-600 dark:text-amber-300"; + } + return "text-muted-foreground"; + }; + return ( ); } diff --git a/src/app/[locale]/dashboard/availability/_components/availability-dashboard.tsx b/src/app/[locale]/dashboard/availability/_components/availability-dashboard.tsx new file mode 100644 index 000000000..6a552a79d --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/availability-dashboard.tsx @@ -0,0 +1,174 @@ +"use client"; + +import { useTranslations } from "next-intl"; +import { useCallback, useEffect, useState } from "react"; +import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; +import type { AvailabilityQueryResult } from "@/lib/availability"; +import { cn } from "@/lib/utils"; +import { EndpointTab } from "./endpoint/endpoint-tab"; +import { OverviewSection } from "./overview/overview-section"; +import { ProviderTab } from "./provider/provider-tab"; +import { FloatingProbeButton } from "./shared/floating-probe-button"; + +export type TimeRangeOption = "15min" | "1h" | "6h" | "24h" | "7d"; + +// Target number of buckets to fill the heatmap width consistently +const TARGET_BUCKETS = 60; + +const TIME_RANGE_MAP: Record = { + "15min": 15 * 60 * 1000, + "1h": 60 * 60 * 1000, + "6h": 6 * 60 * 60 * 1000, + "24h": 24 * 60 * 60 * 1000, + "7d": 7 * 24 * 60 * 60 * 1000, +}; + +function calculateBucketSize(timeRangeMs: number): number { + const bucketSizeMs = timeRangeMs / TARGET_BUCKETS; + const bucketSizeMinutes = bucketSizeMs / (60 * 1000); + return Math.max(0.25, Math.round(bucketSizeMinutes * 4) / 4); +} + +export function AvailabilityDashboard() { + const t = useTranslations("dashboard.availability"); + const [activeTab, setActiveTab] = useState<"provider" | "endpoint">("provider"); + const [timeRange, setTimeRange] = useState("24h"); + const [data, setData] = useState(null); + const [loading, setLoading] = useState(true); + const [refreshing, setRefreshing] = useState(false); + const [error, setError] = useState(null); + + const fetchData = useCallback(async () => { + try { + setRefreshing(true); + const now = new Date(); + const timeRangeMs = TIME_RANGE_MAP[timeRange]; + const startTime = new Date(now.getTime() - timeRangeMs); + const bucketSizeMinutes = calculateBucketSize(timeRangeMs); + + const params = new URLSearchParams({ + startTime: startTime.toISOString(), + endTime: now.toISOString(), + bucketSizeMinutes: bucketSizeMinutes.toString(), + maxBuckets: TARGET_BUCKETS.toString(), + }); + + const res = await fetch(`/api/availability?${params}`); + if (!res.ok) { + throw new Error(t("states.fetchFailed")); + } + + const result: AvailabilityQueryResult = await res.json(); + setData(result); + setError(null); + } catch (err) { + console.error("Failed to fetch availability data:", err); + setError(err instanceof Error ? err.message : t("states.fetchFailed")); + } finally { + setLoading(false); + setRefreshing(false); + } + }, [timeRange, t]); + + useEffect(() => { + fetchData(); + }, [fetchData]); + + // Auto-refresh: 30s for provider tab, 10s for endpoint tab + useEffect(() => { + const interval = activeTab === "provider" ? 30000 : 10000; + const timer = setInterval(fetchData, interval); + return () => clearInterval(timer); + }, [activeTab, fetchData]); + + // Calculate overview metrics + const providers = data?.providers ?? []; + const overviewMetrics = { + systemAvailability: data?.systemAvailability ?? 0, + avgLatency: + providers.length > 0 + ? providers.reduce((sum, p) => { + const latencies = p.timeBuckets + .filter((b) => b.avgLatencyMs > 0) + .map((b) => b.avgLatencyMs); + return ( + sum + + (latencies.length > 0 ? latencies.reduce((a, b) => a + b, 0) / latencies.length : 0) + ); + }, 0) / + Math.max(1, providers.filter((p) => p.timeBuckets.some((b) => b.avgLatencyMs > 0)).length) + : 0, + errorRate: + providers.length > 0 + ? providers.reduce((sum, p) => { + const total = p.totalRequests; + const errors = p.timeBuckets.reduce((s, b) => s + b.redCount, 0); + return sum + (total > 0 ? errors / total : 0); + }, 0) / providers.length + : 0, + activeProbes: providers.filter((p) => p.currentStatus !== "unknown").length, + totalProbes: providers.length, + healthyCount: providers.filter((p) => p.currentStatus === "green").length, + unhealthyCount: providers.filter((p) => p.currentStatus === "red").length, + }; + + return ( +
+ {/* Overview Section */} + + + {/* Tabs */} + setActiveTab(v as "provider" | "endpoint")} + className="w-full" + > + + + {t("tabs.provider")} + + + {t("tabs.endpoint")} + + + + + + + + + + + + + {/* Floating Probe Button */} + +
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/availability-skeleton.tsx b/src/app/[locale]/dashboard/availability/_components/availability-skeleton.tsx index 484ff9098..972f6694a 100644 --- a/src/app/[locale]/dashboard/availability/_components/availability-skeleton.tsx +++ b/src/app/[locale]/dashboard/availability/_components/availability-skeleton.tsx @@ -1,18 +1,87 @@ -import { ListSkeleton, LoadingState } from "@/components/loading/page-skeletons"; import { Skeleton } from "@/components/ui/skeleton"; +import { cn } from "@/lib/utils"; -export function AvailabilityViewSkeleton() { +export function AvailabilityDashboardSkeleton() { return ( -
-
- - - +
+ {/* Overview Section - 4 Gauge Cards */} +
+ {[...Array(4)].map((_, i) => ( +
+
+ + + +
+
+ ))}
-
- + + {/* Tabs */} +
+ + +
+ + {/* Main Content Area */} +
+ {/* Time Range Selector */} +
+
+ {[...Array(5)].map((_, i) => ( + + ))} +
+ +
+ + {/* Lane Chart Area */} +
+ +
+ {[...Array(5)].map((_, i) => ( +
+ + + +
+ ))} +
+
+ + {/* Latency Chart Area */} +
+ + +
-
); } + +// Keep the old skeleton for backward compatibility if needed elsewhere +export function AvailabilityViewSkeleton() { + return ; +} diff --git a/src/app/[locale]/dashboard/availability/_components/endpoint/endpoint-tab.tsx b/src/app/[locale]/dashboard/availability/_components/endpoint/endpoint-tab.tsx new file mode 100644 index 000000000..e4806fdb5 --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/endpoint/endpoint-tab.tsx @@ -0,0 +1,316 @@ +"use client"; + +import { Radio, RefreshCw } from "lucide-react"; +import { useTranslations } from "next-intl"; +import { useCallback, useEffect, useState } from "react"; +import { toast } from "sonner"; +import { + getProviderEndpointProbeLogs, + getProviderEndpoints, + getProviderVendors, + probeProviderEndpoint, +} from "@/actions/provider-endpoints"; +import { Button } from "@/components/ui/button"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Skeleton } from "@/components/ui/skeleton"; +import { cn } from "@/lib/utils"; +import type { ProviderEndpoint, ProviderEndpointProbeLog, ProviderVendor } from "@/types/provider"; +import { LatencyCurve } from "./latency-curve"; +import { ProbeGrid } from "./probe-grid"; +import { ProbeTerminal } from "./probe-terminal"; + +type ProviderType = + | "claude" + | "claude-auth" + | "codex" + | "gemini" + | "gemini-cli" + | "openai-compatible"; + +const PROVIDER_TYPES: ProviderType[] = [ + "claude", + "claude-auth", + "codex", + "gemini", + "gemini-cli", + "openai-compatible", +]; + +export function EndpointTab() { + const t = useTranslations("dashboard.availability"); + + // State + const [vendors, setVendors] = useState([]); + const [selectedVendorId, setSelectedVendorId] = useState(null); + const [selectedType, setSelectedType] = useState(null); + const [endpoints, setEndpoints] = useState([]); + const [selectedEndpoint, setSelectedEndpoint] = useState(null); + const [probeLogs, setProbeLogs] = useState([]); + + // Loading states + const [loadingVendors, setLoadingVendors] = useState(true); + const [loadingEndpoints, setLoadingEndpoints] = useState(false); + const [loadingLogs, setLoadingLogs] = useState(false); + const [probing, setProbing] = useState(false); + + // Fetch vendors on mount + useEffect(() => { + const fetchVendors = async () => { + try { + const vendors = await getProviderVendors(); + setVendors(vendors); + if (vendors.length > 0) { + setSelectedVendorId(vendors[0].id); + } + } catch (error) { + console.error("Failed to fetch vendors:", error); + } finally { + setLoadingVendors(false); + } + }; + fetchVendors(); + }, []); + + // Fetch endpoints when vendor or type changes + useEffect(() => { + if (!selectedVendorId || !selectedType) { + setEndpoints([]); + return; + } + + const fetchEndpoints = async () => { + setLoadingEndpoints(true); + try { + const endpoints = await getProviderEndpoints({ + vendorId: selectedVendorId, + providerType: selectedType, + }); + setEndpoints(endpoints); + if (endpoints.length > 0) { + setSelectedEndpoint(endpoints[0]); + } else { + setSelectedEndpoint(null); + } + } catch (error) { + console.error("Failed to fetch endpoints:", error); + } finally { + setLoadingEndpoints(false); + } + }; + fetchEndpoints(); + }, [selectedVendorId, selectedType]); + + // Fetch probe logs when endpoint changes + const fetchProbeLogs = useCallback(async () => { + if (!selectedEndpoint) { + setProbeLogs([]); + return; + } + + setLoadingLogs(true); + try { + const result = await getProviderEndpointProbeLogs({ + endpointId: selectedEndpoint.id, + limit: 100, + }); + if (result.ok && result.data) { + setProbeLogs(result.data.logs); + } + } catch (error) { + console.error("Failed to fetch probe logs:", error); + } finally { + setLoadingLogs(false); + } + }, [selectedEndpoint]); + + useEffect(() => { + fetchProbeLogs(); + }, [fetchProbeLogs]); + + // Auto-refresh logs every 10 seconds + useEffect(() => { + if (!selectedEndpoint) return; + const timer = setInterval(fetchProbeLogs, 10000); + return () => clearInterval(timer); + }, [selectedEndpoint, fetchProbeLogs]); + + // Handle manual probe + const handleProbe = async () => { + if (!selectedEndpoint) return; + + setProbing(true); + try { + const result = await probeProviderEndpoint({ + endpointId: selectedEndpoint.id, + }); + if (result.ok) { + toast.success(t("actions.probeSuccess")); + // Refresh logs and endpoints + fetchProbeLogs(); + if (selectedVendorId && selectedType) { + const endpoints = await getProviderEndpoints({ + vendorId: selectedVendorId, + providerType: selectedType, + }); + setEndpoints(endpoints); + // Update selected endpoint with new data + const updated = endpoints.find((e) => e.id === selectedEndpoint.id); + if (updated) setSelectedEndpoint(updated); + } + } else { + toast.error(result.error || t("actions.probeFailed")); + } + } catch (error) { + console.error("Probe failed:", error); + toast.error(t("actions.probeFailed")); + } finally { + setProbing(false); + } + }; + + if (loadingVendors) { + return ( +
+
+ + +
+
+ + +
+ +
+ ); + } + + return ( +
+ {/* Filters */} +
+
+ {/* Vendor Select */} + + + {/* Provider Type Select */} + +
+ + {/* Probe Button */} + +
+ + {/* Main Content */} +
+ {/* Probe Grid */} +
+

{t("probeGrid.title")}

+ {loadingEndpoints ? ( +
+ {[...Array(4)].map((_, i) => ( + + ))} +
+ ) : ( + + )} +
+ + {/* Latency Curve */} +
+ {loadingLogs ? ( + + ) : ( + + )} +
+
+ + {/* Probe Terminal */} +
+ {loadingLogs && probeLogs.length === 0 ? ( + + ) : ( + + )} +
+
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/endpoint/latency-curve.tsx b/src/app/[locale]/dashboard/availability/_components/endpoint/latency-curve.tsx new file mode 100644 index 000000000..acd2dcbf2 --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/endpoint/latency-curve.tsx @@ -0,0 +1,171 @@ +"use client"; + +import { useTranslations } from "next-intl"; +import { useMemo } from "react"; +import { CartesianGrid, Line, LineChart, ResponsiveContainer, XAxis, YAxis } from "recharts"; +import { + type ChartConfig, + ChartContainer, + ChartTooltip, + ChartTooltipContent, +} from "@/components/ui/chart"; +import { cn } from "@/lib/utils"; +import type { ProviderEndpointProbeLog } from "@/types/provider"; + +interface LatencyCurveProps { + logs: ProviderEndpointProbeLog[]; + className?: string; +} + +const chartConfig = { + latency: { + label: "Latency", + color: "hsl(var(--primary))", + }, +} satisfies ChartConfig; + +export function LatencyCurve({ logs, className }: LatencyCurveProps) { + const t = useTranslations("dashboard.availability.latencyCurve"); + + // Transform logs to chart data + const chartData = useMemo(() => { + return logs + .filter((log) => log.latencyMs !== null) + .map((log) => ({ + time: log.createdAt, + timestamp: new Date(log.createdAt).getTime(), + latency: log.latencyMs, + ok: log.ok, + statusCode: log.statusCode, + })) + .sort((a, b) => a.timestamp - b.timestamp); + }, [logs]); + + if (chartData.length === 0) { + return ( +
+ {t("noData")} +
+ ); + } + + const formatTime = (time: string) => { + const date = new Date(time); + return date.toLocaleTimeString(undefined, { + hour: "2-digit", + minute: "2-digit", + second: "2-digit", + }); + }; + + const formatLatency = (value: number) => { + if (value < 1000) return `${Math.round(value)}ms`; + return `${(value / 1000).toFixed(1)}s`; + }; + + // Calculate stats + const latencies = chartData.map((d) => d.latency).filter((l): l is number => l !== null); + const avgLatency = + latencies.length > 0 ? latencies.reduce((a, b) => a + b, 0) / latencies.length : 0; + const minLatency = latencies.length > 0 ? Math.min(...latencies) : 0; + const maxLatency = latencies.length > 0 ? Math.max(...latencies) : 0; + + return ( +
+
+

{t("title")}

+
+ + {t("avg")}:{" "} + {formatLatency(avgLatency)} + + + {t("min")}:{" "} + {formatLatency(minLatency)} + + + {t("max")}: {formatLatency(maxLatency)} + +
+
+ + + + + + + + + + + + + { + if (!active || !payload?.length) return null; + const data = payload[0]?.payload; + return ( +
+
{formatTime(label as string)}
+
+
{formatLatency(payload[0]?.value as number)}
+
+ {data?.statusCode || (data?.ok ? "OK" : "FAIL")} +
+
+
+ ); + }} + /> + { + const { cx, cy, payload } = props; + if (!payload.ok) { + return ( + + ); + } + return null; + }} + activeDot={{ + r: 6, + fill: "hsl(var(--primary))", + stroke: "hsl(var(--background))", + strokeWidth: 2, + }} + /> +
+
+
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/endpoint/probe-grid.tsx b/src/app/[locale]/dashboard/availability/_components/endpoint/probe-grid.tsx new file mode 100644 index 000000000..a3de6d58e --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/endpoint/probe-grid.tsx @@ -0,0 +1,165 @@ +"use client"; + +import { CheckCircle2, HelpCircle, XCircle } from "lucide-react"; +import { useTranslations } from "next-intl"; +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip"; +import { cn } from "@/lib/utils"; +import type { ProviderEndpoint } from "@/types/provider"; + +interface ProbeGridProps { + endpoints: ProviderEndpoint[]; + selectedEndpointId?: number | null; + onEndpointSelect?: (endpoint: ProviderEndpoint) => void; + className?: string; +} + +function getStatusConfig(endpoint: ProviderEndpoint) { + if (endpoint.lastProbeOk === null) { + return { + icon: HelpCircle, + color: "text-slate-400", + bgColor: "bg-slate-400/10", + borderColor: "border-slate-400/30", + label: "unknown", + }; + } + if (endpoint.lastProbeOk) { + return { + icon: CheckCircle2, + color: "text-emerald-500", + bgColor: "bg-emerald-500/10", + borderColor: "border-emerald-500/30", + label: "healthy", + }; + } + return { + icon: XCircle, + color: "text-rose-500", + bgColor: "bg-rose-500/10", + borderColor: "border-rose-500/30", + label: "unhealthy", + }; +} + +function formatLatency(ms: number | null): string { + if (ms === null) return "-"; + if (ms < 1000) return `${Math.round(ms)}ms`; + return `${(ms / 1000).toFixed(2)}s`; +} + +function formatTime(date: Date | string | null): string { + if (!date) return "-"; + const d = typeof date === "string" ? new Date(date) : date; + return d.toLocaleTimeString(undefined, { + hour: "2-digit", + minute: "2-digit", + second: "2-digit", + }); +} + +export function ProbeGrid({ + endpoints, + selectedEndpointId, + onEndpointSelect, + className, +}: ProbeGridProps) { + const t = useTranslations("dashboard.availability.probeGrid"); + + if (endpoints.length === 0) { + return ( +
+ {t("noEndpoints")} +
+ ); + } + + return ( + +
+ {endpoints.map((endpoint) => { + const status = getStatusConfig(endpoint); + const StatusIcon = status.icon; + const isSelected = selectedEndpointId === endpoint.id; + + return ( + + + + + +
+

{endpoint.label || endpoint.url}

+

{t(`status.${status.label}`)}

+ {endpoint.lastProbeErrorMessage && ( +

{endpoint.lastProbeErrorMessage}

+ )} +
+
+
+ ); + })} +
+
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/endpoint/probe-terminal.tsx b/src/app/[locale]/dashboard/availability/_components/endpoint/probe-terminal.tsx new file mode 100644 index 000000000..b31213083 --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/endpoint/probe-terminal.tsx @@ -0,0 +1,285 @@ +"use client"; + +import { AlertCircle, CheckCircle2, Download, Trash2, XCircle } from "lucide-react"; +import { useTranslations } from "next-intl"; +import { useEffect, useRef, useState } from "react"; +import { Button } from "@/components/ui/button"; +import { cn } from "@/lib/utils"; +import type { ProviderEndpointProbeLog } from "@/types/provider"; + +interface ProbeTerminalProps { + logs: ProviderEndpointProbeLog[]; + maxLines?: number; + autoScroll?: boolean; + onLogClick?: (log: ProviderEndpointProbeLog) => void; + className?: string; +} + +function formatTime(date: Date | string): string { + const d = typeof date === "string" ? new Date(date) : date; + return d.toLocaleTimeString(undefined, { + hour: "2-digit", + minute: "2-digit", + second: "2-digit", + }); +} + +function formatLatency(ms: number | null): string { + if (ms === null) return "-"; + if (ms < 1000) return `${Math.round(ms)}ms`; + return `${(ms / 1000).toFixed(2)}s`; +} + +function getLogLevel(log: ProviderEndpointProbeLog): "success" | "error" | "warn" { + if (log.ok) return "success"; + if (log.errorType === "timeout") return "warn"; + return "error"; +} + +const levelConfig = { + success: { + icon: CheckCircle2, + label: "OK", + color: "text-emerald-500", + bgColor: "bg-emerald-500/5", + borderColor: "border-l-emerald-500", + }, + error: { + icon: XCircle, + label: "FAIL", + color: "text-rose-500", + bgColor: "bg-rose-500/5", + borderColor: "border-l-rose-500", + }, + warn: { + icon: AlertCircle, + label: "WARN", + color: "text-amber-500", + bgColor: "bg-amber-500/5", + borderColor: "border-l-amber-500", + }, +}; + +export function ProbeTerminal({ + logs, + maxLines = 100, + autoScroll = true, + onLogClick, + className, +}: ProbeTerminalProps) { + const t = useTranslations("dashboard.availability.terminal"); + const containerRef = useRef(null); + const [userScrolled, setUserScrolled] = useState(false); + const [filter, setFilter] = useState(""); + + // Auto-scroll to bottom when new logs arrive + useEffect(() => { + if (autoScroll && !userScrolled && containerRef.current) { + containerRef.current.scrollTop = containerRef.current.scrollHeight; + } + }, [logs, autoScroll, userScrolled]); + + // Detect user scroll + const handleScroll = () => { + if (!containerRef.current) return; + const { scrollTop, scrollHeight, clientHeight } = containerRef.current; + const isAtBottom = scrollHeight - scrollTop - clientHeight < 50; + setUserScrolled(!isAtBottom); + }; + + // Filter logs + const filteredLogs = logs + .filter((log) => { + if (!filter) return true; + const searchLower = filter.toLowerCase(); + return ( + log.errorMessage?.toLowerCase().includes(searchLower) || + log.errorType?.toLowerCase().includes(searchLower) || + log.statusCode?.toString().includes(searchLower) + ); + }) + .slice(-maxLines); + + const handleDownload = () => { + const content = filteredLogs + .map((log) => { + const time = formatTime(log.createdAt); + const status = log.ok ? "OK" : "FAIL"; + const latency = formatLatency(log.latencyMs); + const error = log.errorMessage || ""; + return `[${time}] ${status} ${log.statusCode || "-"} ${latency} ${error}`; + }) + .join("\n"); + + const blob = new Blob([content], { type: "text/plain" }); + const url = URL.createObjectURL(blob); + const a = document.createElement("a"); + a.href = url; + a.download = `probe-logs-${new Date().toISOString().slice(0, 10)}.txt`; + a.click(); + URL.revokeObjectURL(url); + }; + + return ( +
+ {/* Scanline Overlay - only visible in dark mode */} +
+ + {/* Header */} +
+
+ {/* Traffic Lights */} +
+
+
+
+
+ + {t("title")} + +
+
+ {t("live")} +
+
+
+ +
+
+ + {/* Log Content */} +
+ {filteredLogs.length === 0 ? ( +
+ {t("noLogs")} +
+ ) : ( + filteredLogs.map((log) => { + const level = getLogLevel(log); + const config = levelConfig[level]; + const Icon = config.icon; + + return ( + + ); + }) + )} + + {/* Loading indicator */} + {logs.length > 0 && ( +
+ [{formatTime(new Date())}] + ... +
+ )} +
+ + {/* Filter Input */} +
+
+ > + setFilter(e.target.value)} + placeholder={t("filterPlaceholder")} + className={cn( + "flex-1 bg-transparent border-none text-sm font-mono", + "text-foreground placeholder:text-muted-foreground/50", + "focus:outline-none focus:ring-0" + )} + /> +
+
+
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/overview/gauge-card.tsx b/src/app/[locale]/dashboard/availability/_components/overview/gauge-card.tsx new file mode 100644 index 000000000..facd414ae --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/overview/gauge-card.tsx @@ -0,0 +1,222 @@ +"use client"; + +import type { LucideIcon } from "lucide-react"; +import { ArrowDown, ArrowRight, ArrowUp } from "lucide-react"; +import { useEffect, useRef, useState } from "react"; +import { cn } from "@/lib/utils"; + +interface GaugeCardProps { + value: number; + label: string; + icon: LucideIcon; + trend?: { + value: number; + direction: "up" | "down" | "stable"; + }; + thresholds?: { + warning: number; + critical: number; + }; + size?: "sm" | "md" | "lg"; + formatter?: (value: number) => string; + invertColors?: boolean; + className?: string; +} + +const sizeConfig = { + sm: { gauge: 64, stroke: 4, iconSize: 16, fontSize: "text-lg" }, + md: { gauge: 80, stroke: 5, iconSize: 20, fontSize: "text-2xl" }, + lg: { gauge: 96, stroke: 6, iconSize: 24, fontSize: "text-3xl" }, +}; + +function getGaugeColor( + value: number, + thresholds: { warning: number; critical: number }, + invertColors: boolean +): string { + if (invertColors) { + // For metrics where lower is better (error rate, latency) + if (value <= thresholds.critical) return "text-emerald-500"; + if (value <= thresholds.warning) return "text-amber-500"; + return "text-rose-500"; + } + // For metrics where higher is better (availability) + if (value >= thresholds.warning) return "text-emerald-500"; + if (value >= thresholds.critical) return "text-amber-500"; + return "text-rose-500"; +} + +function getTrendIcon(direction: "up" | "down" | "stable") { + switch (direction) { + case "up": + return ArrowUp; + case "down": + return ArrowDown; + default: + return ArrowRight; + } +} + +function getTrendColor(direction: "up" | "down" | "stable", invertColors: boolean) { + if (direction === "stable") return "text-muted-foreground bg-muted/50"; + if (invertColors) { + // For inverted metrics, down is good + return direction === "down" + ? "text-emerald-500 bg-emerald-500/10" + : "text-rose-500 bg-rose-500/10"; + } + // For normal metrics, up is good + return direction === "up" ? "text-emerald-500 bg-emerald-500/10" : "text-rose-500 bg-rose-500/10"; +} + +export function GaugeCard({ + value, + label, + icon: Icon, + trend, + thresholds = { warning: 80, critical: 50 }, + size = "md", + formatter = (v) => `${v.toFixed(1)}%`, + invertColors = false, + className, +}: GaugeCardProps) { + const [displayValue, setDisplayValue] = useState(0); + const prevValueRef = useRef(0); + const config = sizeConfig[size]; + + // Animate value changes + useEffect(() => { + let cancelled = false; + const duration = 800; + const startValue = prevValueRef.current; + const diff = value - startValue; + const startTime = Date.now(); + + const animate = () => { + if (cancelled) return; + const elapsed = Date.now() - startTime; + const progress = Math.min(elapsed / duration, 1); + // Ease out cubic + const easeProgress = 1 - (1 - progress) ** 3; + const currentValue = startValue + diff * easeProgress; + + setDisplayValue(currentValue); + + if (progress < 1) { + requestAnimationFrame(animate); + } else { + prevValueRef.current = value; + } + }; + + requestAnimationFrame(animate); + return () => { + cancelled = true; + }; + }, [value]); + + // SVG gauge calculations + const radius = (config.gauge - config.stroke) / 2; + const circumference = 2 * Math.PI * radius; + const normalizedValue = Math.min(Math.max(displayValue, 0), 100); + const offset = circumference - (normalizedValue / 100) * circumference; + const gaugeColor = getGaugeColor(displayValue, thresholds, invertColors); + + const TrendIcon = trend ? getTrendIcon(trend.direction) : null; + const trendColor = trend ? getTrendColor(trend.direction, invertColors) : ""; + + return ( +
+ {/* Subtle glow effect */} +
+ +
+ {/* Circular Gauge */} +
+ + {/* Background circle */} + + {/* Progress circle with gradient */} + + + {/* Center icon */} +
+ +
+
+ + {/* Content */} +
+

{label}

+

+ {formatter(displayValue)} +

+ {/* Trend indicator */} + {trend && TrendIcon && ( +
+ + + {trend.value > 0 ? "+" : ""} + {trend.value.toFixed(1)}% + +
+ )} +
+
+
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/overview/overview-section.tsx b/src/app/[locale]/dashboard/availability/_components/overview/overview-section.tsx new file mode 100644 index 000000000..87cbae350 --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/overview/overview-section.tsx @@ -0,0 +1,168 @@ +"use client"; + +import { Activity, AlertTriangle, Clock, ShieldCheck } from "lucide-react"; +import { useTranslations } from "next-intl"; +import { Skeleton } from "@/components/ui/skeleton"; +import { cn } from "@/lib/utils"; +import { GaugeCard } from "./gauge-card"; + +interface OverviewSectionProps { + systemAvailability: number; + avgLatency: number; + errorRate: number; + activeProbes: number; + totalProbes: number; + loading?: boolean; + refreshing?: boolean; +} + +export function OverviewSection({ + systemAvailability, + avgLatency, + errorRate, + activeProbes, + totalProbes, + loading, + refreshing, +}: OverviewSectionProps) { + const t = useTranslations("dashboard.availability.overview"); + + if (loading) { + return ( +
+ {[...Array(4)].map((_, i) => ( +
+
+ +
+ + +
+
+
+ ))} +
+ ); + } + + // Calculate trends (mock for now - would need historical data) + const availabilityTrend = + systemAvailability > 0.95 + ? { value: 0.1, direction: "up" as const } + : systemAvailability < 0.8 + ? { value: -2.5, direction: "down" as const } + : { value: 0, direction: "stable" as const }; + + const latencyTrend = + avgLatency < 200 + ? { value: -5, direction: "down" as const } + : avgLatency > 500 + ? { value: 15, direction: "up" as const } + : { value: 0, direction: "stable" as const }; + + const errorTrend = + errorRate < 0.01 + ? { value: 0, direction: "stable" as const } + : errorRate > 0.05 + ? { value: 2.3, direction: "up" as const } + : { value: -0.5, direction: "down" as const }; + + return ( +
+ {/* System Availability */} + `${v.toFixed(2)}%`} + /> + + {/* Average Latency */} + + avgLatency < 1000 ? `${Math.round(avgLatency)}ms` : `${(avgLatency / 1000).toFixed(2)}s` + } + invertColors + /> + + {/* Error Rate */} + `${v.toFixed(2)}%`} + invertColors + /> + + {/* Active Probes */} +
+ {/* Glow effect */} +
+ +
+
+
+

{t("activeProbes")}

+

+ {activeProbes} + /{totalProbes} +

+
+
+ +
+
+ + {/* Progress bar */} +
+
+ {t("load")} + {totalProbes > 0 ? Math.round((activeProbes / totalProbes) * 100) : 0}% +
+
+
0 ? (activeProbes / totalProbes) * 100 : 0}%` }} + /> +
+
+
+
+
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/provider/confidence-badge.tsx b/src/app/[locale]/dashboard/availability/_components/provider/confidence-badge.tsx new file mode 100644 index 000000000..008a5d7ff --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/provider/confidence-badge.tsx @@ -0,0 +1,98 @@ +"use client"; + +import { useTranslations } from "next-intl"; +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip"; +import { cn } from "@/lib/utils"; + +interface ConfidenceBadgeProps { + requestCount: number; + thresholds?: { + low: number; + medium: number; + high: number; + }; + className?: string; +} + +type ConfidenceLevel = "low" | "medium" | "high"; + +function getConfidenceLevel( + count: number, + thresholds: { low: number; medium: number; high: number } +): ConfidenceLevel { + if (count >= thresholds.high) return "high"; + if (count >= thresholds.medium) return "medium"; + return "low"; +} + +const confidenceConfig: Record< + ConfidenceLevel, + { bars: number; color: string; bgColor: string; borderStyle: string } +> = { + low: { + bars: 1, + color: "bg-slate-400", + bgColor: "bg-slate-400/10", + borderStyle: "border-dashed border-slate-400/50", + }, + medium: { + bars: 2, + color: "bg-amber-500", + bgColor: "bg-amber-500/10", + borderStyle: "border-solid border-amber-500/50", + }, + high: { + bars: 3, + color: "bg-emerald-500", + bgColor: "bg-emerald-500/10", + borderStyle: "border-solid border-emerald-500/50", + }, +}; + +export function ConfidenceBadge({ + requestCount, + thresholds = { low: 10, medium: 50, high: 200 }, + className, +}: ConfidenceBadgeProps) { + const t = useTranslations("dashboard.availability.confidence"); + const level = getConfidenceLevel(requestCount, thresholds); + const config = confidenceConfig[level]; + + return ( + + + +
+ {/* Signal bars */} +
+ {[1, 2, 3].map((bar) => ( +
+ ))} +
+
+ + +
+

{t(level)}

+

{t(`${level}Tooltip`, { count: requestCount })}

+
+
+ + + ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/provider/lane-chart.tsx b/src/app/[locale]/dashboard/availability/_components/provider/lane-chart.tsx new file mode 100644 index 000000000..59966d575 --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/provider/lane-chart.tsx @@ -0,0 +1,332 @@ +"use client"; + +import { useTranslations } from "next-intl"; +import { useMemo } from "react"; +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip"; +import type { ProviderAvailabilitySummary, TimeBucketMetrics } from "@/lib/availability"; +import { cn } from "@/lib/utils"; +import { ConfidenceBadge } from "./confidence-badge"; + +interface LaneChartProps { + providers: ProviderAvailabilitySummary[]; + bucketSizeMinutes: number; + startTime: string; + endTime: string; + onProviderClick?: (providerId: number) => void; + className?: string; +} + +// Threshold for switching between dots and bars visualization +const HIGH_VOLUME_THRESHOLD = 50; + +function getAvailabilityColor(score: number, hasData: boolean): string { + if (!hasData) return "bg-slate-300/50 dark:bg-slate-600/50"; + if (score < 0.5) return "bg-rose-500"; + if (score < 0.8) return "bg-orange-500"; + if (score < 0.95) return "bg-lime-500"; + return "bg-emerald-500"; +} + +function getStatusColor(status: string): string { + switch (status) { + case "green": + return "text-emerald-500"; + case "red": + return "text-rose-500"; + default: + return "text-slate-400"; + } +} + +function formatBucketTime(isoString: string, bucketSizeMinutes: number): string { + const date = new Date(isoString); + if (bucketSizeMinutes >= 1440) { + return date.toLocaleDateString(undefined, { month: "short", day: "numeric" }); + } + if (bucketSizeMinutes >= 60) { + return date.toLocaleString(undefined, { + month: "short", + day: "numeric", + hour: "2-digit", + minute: "2-digit", + }); + } + if (bucketSizeMinutes < 1) { + return date.toLocaleTimeString(undefined, { + hour: "2-digit", + minute: "2-digit", + second: "2-digit", + }); + } + return date.toLocaleTimeString(undefined, { hour: "2-digit", minute: "2-digit" }); +} + +function formatLatency(ms: number): string { + if (ms < 1000) return `${Math.round(ms)}ms`; + return `${(ms / 1000).toFixed(2)}s`; +} + +function formatPercentage(value: number): string { + return `${(value * 100).toFixed(1)}%`; +} + +export function LaneChart({ + providers, + bucketSizeMinutes, + startTime, + endTime, + onProviderClick, + className, +}: LaneChartProps) { + const t = useTranslations("dashboard.availability.laneChart"); + + // Generate unified time buckets + const unifiedBuckets = useMemo(() => { + const start = new Date(startTime); + const end = new Date(endTime); + const bucketSizeMs = bucketSizeMinutes * 60 * 1000; + + const buckets: string[] = []; + let current = new Date(Math.floor(start.getTime() / bucketSizeMs) * bucketSizeMs); + + while (current.getTime() < end.getTime()) { + buckets.push(current.toISOString()); + current = new Date(current.getTime() + bucketSizeMs); + } + + return buckets; + }, [startTime, endTime, bucketSizeMinutes]); + + // Generate time labels (show ~7 labels) + const timeLabels = useMemo(() => { + if (unifiedBuckets.length === 0) return []; + const step = Math.max(1, Math.floor(unifiedBuckets.length / 6)); + const labels: { position: number; label: string }[] = []; + + for (let i = 0; i < unifiedBuckets.length; i += step) { + labels.push({ + position: (i / unifiedBuckets.length) * 100, + label: formatBucketTime(unifiedBuckets[i], bucketSizeMinutes), + }); + } + + return labels; + }, [unifiedBuckets, bucketSizeMinutes]); + + const getBucketData = ( + provider: ProviderAvailabilitySummary, + bucketStart: string + ): TimeBucketMetrics | null => { + return provider.timeBuckets.find((b) => b.bucketStart === bucketStart) || null; + }; + + if (providers.length === 0) { + return
{t("noData")}
; + } + + return ( + +
+ {/* Time labels header */} +
+
+
+ {timeLabels.map((label, i) => ( + + {label.label} + + ))} +
+
+
+ + {/* Provider lanes */} + {providers.map((provider) => { + const isHighVolume = provider.totalRequests >= HIGH_VOLUME_THRESHOLD; + + return ( +
onProviderClick?.(provider.providerId)} + > + {/* Provider info */} +
+
+ + + {provider.providerName} + +
+
+ + + {isHighVolume ? t("denseData") : t("sparseData")} + +
+
+ + {/* Lane visualization */} +
+ {/* Grid lines */} +
+ + {/* Data visualization */} +
+ {isHighVolume ? ( + // High volume: solid bars +
+ {unifiedBuckets.map((bucketStart) => { + const bucket = getBucketData(provider, bucketStart); + const hasData = bucket !== null && bucket.totalRequests > 0; + const score = hasData ? bucket.availabilityScore : 0; + const height = hasData + ? Math.max(20, Math.min(100, bucket.totalRequests / 2)) + : 0; + + return ( + + +
+ + + + + + ); + })} +
+ ) : ( + // Low volume: scatter dots +
+ {unifiedBuckets.map((bucketStart, index) => { + const bucket = getBucketData(provider, bucketStart); + const hasData = bucket !== null && bucket.totalRequests > 0; + if (!hasData) return null; + + const score = bucket.availabilityScore; + const size = Math.max(6, Math.min(12, bucket.totalRequests * 2)); + const position = (index / unifiedBuckets.length) * 100; + + return ( + + +
+ + + + + + ); + })} + {/* No data indicator */} + {provider.totalRequests === 0 && ( +
+
+
+ )} +
+ )} +
+
+ + {/* Summary stats */} +
+
+ {provider.currentStatus === "unknown" + ? t("noData") + : formatPercentage(provider.currentAvailability)} +
+
+ {provider.totalRequests > 0 + ? t("requests", { count: provider.totalRequests.toLocaleString() }) + : t("noRequests")} +
+
+
+ ); + })} +
+ + ); +} + +function BucketTooltip({ + bucketStart, + bucket, + bucketSizeMinutes, +}: { + bucketStart: string; + bucket: TimeBucketMetrics | null; + bucketSizeMinutes: number; +}) { + const t = useTranslations("dashboard.availability.laneChart"); + const hasData = bucket !== null && bucket.totalRequests > 0; + + return ( +
+
{formatBucketTime(bucketStart, bucketSizeMinutes)}
+ {hasData && bucket ? ( + <> +
{t("requests", { count: bucket.totalRequests })}
+
{t("availability", { value: formatPercentage(bucket.availabilityScore) })}
+
+ {t("latency")}: {formatLatency(bucket.avgLatencyMs)} +
+
+ OK: {bucket.greenCount} + ERR: {bucket.redCount} +
+ + ) : ( +
{t("noData")}
+ )} +
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/provider/latency-chart.tsx b/src/app/[locale]/dashboard/availability/_components/provider/latency-chart.tsx new file mode 100644 index 000000000..088ff7645 --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/provider/latency-chart.tsx @@ -0,0 +1,203 @@ +"use client"; + +import { useTranslations } from "next-intl"; +import { useMemo } from "react"; +import { Area, AreaChart, CartesianGrid, ResponsiveContainer, XAxis, YAxis } from "recharts"; +import { + type ChartConfig, + ChartContainer, + ChartTooltip, + ChartTooltipContent, +} from "@/components/ui/chart"; +import type { ProviderAvailabilitySummary } from "@/lib/availability"; +import { cn } from "@/lib/utils"; + +interface LatencyChartProps { + providers: ProviderAvailabilitySummary[]; + className?: string; +} + +const chartConfig = { + p50: { + label: "P50", + color: "hsl(var(--chart-2))", + }, + p95: { + label: "P95", + color: "hsl(var(--chart-4))", + }, + p99: { + label: "P99", + color: "hsl(var(--chart-1))", + }, +} satisfies ChartConfig; + +export function LatencyChart({ providers, className }: LatencyChartProps) { + const t = useTranslations("dashboard.availability.latencyChart"); + + // Aggregate latency data across all providers + const chartData = useMemo(() => { + // Collect all unique bucket times + const bucketMap = new Map(); + + for (const provider of providers) { + for (const bucket of provider.timeBuckets) { + if (bucket.totalRequests === 0) continue; + + const existing = bucketMap.get(bucket.bucketStart) || { + p50: [], + p95: [], + p99: [], + }; + + existing.p50.push(bucket.p50LatencyMs); + existing.p95.push(bucket.p95LatencyMs); + existing.p99.push(bucket.p99LatencyMs); + + bucketMap.set(bucket.bucketStart, existing); + } + } + + // Calculate averages and format for chart + return Array.from(bucketMap.entries()) + .map(([time, values]) => ({ + time, + timestamp: new Date(time).getTime(), + p50: values.p50.length > 0 ? values.p50.reduce((a, b) => a + b, 0) / values.p50.length : 0, + p95: values.p95.length > 0 ? values.p95.reduce((a, b) => a + b, 0) / values.p95.length : 0, + p99: values.p99.length > 0 ? values.p99.reduce((a, b) => a + b, 0) / values.p99.length : 0, + })) + .sort((a, b) => a.timestamp - b.timestamp); + }, [providers]); + + if (chartData.length === 0) { + return ( +
+ {t("noData")} +
+ ); + } + + const formatTime = (time: string) => { + const date = new Date(time); + return date.toLocaleTimeString(undefined, { + hour: "2-digit", + minute: "2-digit", + }); + }; + + const formatLatency = (value: number) => { + if (value < 1000) return `${Math.round(value)}ms`; + return `${(value / 1000).toFixed(1)}s`; + }; + + return ( +
+
+

{t("title")}

+
+
+
+ {t("p50")} +
+
+
+ {t("p95")} +
+
+
+ {t("p99")} +
+
+
+ + + + + + + + + + + + + + + + + + + + + { + if (!active || !payload?.length) return null; + return ( +
+
{formatTime(label as string)}
+
+ {payload.map((item) => ( +
+
+ + {chartConfig[item.dataKey as keyof typeof chartConfig]?.label || + item.dataKey} + : + + {formatLatency(item.value as number)} +
+ ))} +
+
+ ); + }} + /> + + + + + +
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/provider/provider-tab.tsx b/src/app/[locale]/dashboard/availability/_components/provider/provider-tab.tsx new file mode 100644 index 000000000..7b2899d7e --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/provider/provider-tab.tsx @@ -0,0 +1,239 @@ +"use client"; + +import { RefreshCw } from "lucide-react"; +import { useTranslations } from "next-intl"; +import { useMemo, useState } from "react"; +import { Button } from "@/components/ui/button"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Skeleton } from "@/components/ui/skeleton"; +import type { AvailabilityQueryResult } from "@/lib/availability"; +import { cn } from "@/lib/utils"; +import type { TimeRangeOption } from "../availability-dashboard"; +import { TimeRangeSelector } from "../shared/time-range-selector"; +import { LaneChart } from "./lane-chart"; +import { LatencyChart } from "./latency-chart"; + +interface ProviderTabProps { + data: AvailabilityQueryResult | null; + loading: boolean; + refreshing: boolean; + error: string | null; + timeRange: TimeRangeOption; + onTimeRangeChange: (value: TimeRangeOption) => void; + onRefresh: () => void; +} + +type SortOption = "availability" | "name" | "requests"; + +export function ProviderTab({ + data, + loading, + refreshing, + error, + timeRange, + onTimeRangeChange, + onRefresh, +}: ProviderTabProps) { + const t = useTranslations("dashboard.availability"); + const [sortBy, setSortBy] = useState("availability"); + + // Sort providers based on selected option + const sortedProviders = useMemo(() => { + if (!data?.providers) return []; + + return [...data.providers].sort((a, b) => { + switch (sortBy) { + case "availability": + if (a.currentStatus === "unknown" && b.currentStatus !== "unknown") return 1; + if (b.currentStatus === "unknown" && a.currentStatus !== "unknown") return -1; + return b.currentAvailability - a.currentAvailability; + case "name": + return a.providerName.localeCompare(b.providerName); + case "requests": + return b.totalRequests - a.totalRequests; + default: + return 0; + } + }); + }, [data?.providers, sortBy]); + + if (loading) { + return ( +
+ {/* Controls skeleton */} +
+
+ + +
+ +
+ + {/* Lane chart skeleton */} +
+ +
+ {[...Array(5)].map((_, i) => ( +
+ + + +
+ ))} +
+
+ + {/* Latency chart skeleton */} +
+ +
+
+ ); + } + + if (error) { + return ( +
+

{error}

+ +
+ ); + } + + return ( +
+ {/* Controls */} +
+
+ + +
+ +
+ + {/* Lane Chart */} +
+
+

{t("laneChart.title")}

+ {data && ( + + {t("heatmap.bucketSize")}: {data.bucketSizeMinutes} {t("heatmap.minutes")} + + )} +
+ {data && ( + + )} +
+ + {/* Latency Distribution Chart */} +
+ {data && } +
+ + {/* Legend */} +
+
+
+
+ {t("legend.green")} +
+
+
+ {t("legend.lime")} +
+
+
+ {t("legend.orange")} +
+
+
+ {t("legend.red")} +
+
+
+ {t("legend.noData")} +
+
+
+
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/shared/floating-probe-button.tsx b/src/app/[locale]/dashboard/availability/_components/shared/floating-probe-button.tsx new file mode 100644 index 000000000..b03626e1f --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/shared/floating-probe-button.tsx @@ -0,0 +1,67 @@ +"use client"; + +import { Radio } from "lucide-react"; +import { useTranslations } from "next-intl"; +import { useState } from "react"; +import { toast } from "sonner"; +import { cn } from "@/lib/utils"; + +interface FloatingProbeButtonProps { + onProbeComplete?: () => void; + className?: string; +} + +export function FloatingProbeButton({ onProbeComplete, className }: FloatingProbeButtonProps) { + const t = useTranslations("dashboard.availability.actions"); + const [isProbing, setIsProbing] = useState(false); + + const handleProbeAll = async () => { + if (isProbing) return; + + setIsProbing(true); + try { + // Trigger global probe via API + const res = await fetch("/api/availability/probe-all", { method: "POST" }); + if (!res.ok) { + throw new Error("Probe failed"); + } + toast.success(t("probeSuccess")); + onProbeComplete?.(); + } catch (error) { + console.error("Probe all failed:", error); + toast.error(t("probeFailed")); + } finally { + setIsProbing(false); + } + }; + + return ( + + ); +} diff --git a/src/app/[locale]/dashboard/availability/_components/shared/time-range-selector.tsx b/src/app/[locale]/dashboard/availability/_components/shared/time-range-selector.tsx new file mode 100644 index 000000000..7857f2da3 --- /dev/null +++ b/src/app/[locale]/dashboard/availability/_components/shared/time-range-selector.tsx @@ -0,0 +1,37 @@ +"use client"; + +import { useTranslations } from "next-intl"; +import { cn } from "@/lib/utils"; +import type { TimeRangeOption } from "../availability-dashboard"; + +interface TimeRangeSelectorProps { + value: TimeRangeOption; + onChange: (value: TimeRangeOption) => void; + className?: string; +} + +const TIME_RANGE_OPTIONS: TimeRangeOption[] = ["15min", "1h", "6h", "24h", "7d"]; + +export function TimeRangeSelector({ value, onChange, className }: TimeRangeSelectorProps) { + const t = useTranslations("dashboard.availability.timeRange"); + + return ( +
+ {TIME_RANGE_OPTIONS.map((option) => ( + + ))} +
+ ); +} diff --git a/src/app/[locale]/dashboard/availability/page.tsx b/src/app/[locale]/dashboard/availability/page.tsx index 8cdba43f6..f6603fd50 100644 --- a/src/app/[locale]/dashboard/availability/page.tsx +++ b/src/app/[locale]/dashboard/availability/page.tsx @@ -5,8 +5,8 @@ import { Section } from "@/components/section"; import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; import { getSession } from "@/lib/auth"; -import { AvailabilityViewSkeleton } from "./_components/availability-skeleton"; -import { AvailabilityView } from "./_components/availability-view"; +import { AvailabilityDashboard } from "./_components/availability-dashboard"; +import { AvailabilityDashboardSkeleton } from "./_components/availability-skeleton"; export const dynamic = "force-dynamic"; @@ -44,8 +44,8 @@ export default async function AvailabilityPage() { return (
- }> - + }> +
diff --git a/src/app/[locale]/settings/providers/_components/forms/provider-form/sections/network-section.tsx b/src/app/[locale]/settings/providers/_components/forms/provider-form/sections/network-section.tsx index 2659c432b..bf2a7e709 100644 --- a/src/app/[locale]/settings/providers/_components/forms/provider-form/sections/network-section.tsx +++ b/src/app/[locale]/settings/providers/_components/forms/provider-form/sections/network-section.tsx @@ -246,7 +246,7 @@ export function NetworkSection() { dispatch({ type: "SET_REQUEST_TIMEOUT_NON_STREAMING", payload: value }) } disabled={state.ui.isPending} - min="60" + min="0" max="1200" icon={Clock} isCore={true} diff --git a/src/app/v1/_lib/proxy/errors.ts b/src/app/v1/_lib/proxy/errors.ts index d789f6fd3..e12d6e7ff 100644 --- a/src/app/v1/_lib/proxy/errors.ts +++ b/src/app/v1/_lib/proxy/errors.ts @@ -870,6 +870,85 @@ export function isHttp2Error(error: Error): boolean { return HTTP2_ERROR_PATTERNS.some((pattern) => errorString.includes(pattern.toUpperCase())); } +/** + * SSL/TLS Certificate Error Detection Patterns + * + * Covers common SSL certificate validation errors: + * - Certificate hostname mismatch (altname validation) + * - Self-signed certificates + * - Expired certificates + * - Certificate chain validation failures + * - Unable to verify issuer + */ +const SSL_ERROR_PATTERNS = [ + "certificate", + "ssl", + "tls", + "cert_", + "unable to verify", + "self signed", + "hostname mismatch", + "unable_to_get_issuer_cert", + "cert_has_expired", + "depth_zero_self_signed_cert", + "unable_to_verify_leaf_signature", + "err_tls_cert_altname_invalid", + "cert_untrusted", + "altnames", +]; + +/** + * Detect if an error is an SSL/TLS certificate error + * + * SSL certificate errors occur during TLS handshake when: + * - Certificate hostname doesn't match the requested domain + * - Certificate is self-signed and not trusted + * - Certificate has expired + * - Certificate chain cannot be verified + * + * Detection logic: + * 1. Check error name + * 2. Check error message + * 3. Check error code (Node.js style) + * 4. Check nested cause (for wrapped errors) + * + * @param error - Error object to check + * @returns true if the error is an SSL certificate error + * + * @example + * // Certificate hostname mismatch + * isSSLCertificateError(new Error('ERR_TLS_CERT_ALTNAME_INVALID')) // true + * + * // Self-signed certificate + * isSSLCertificateError(new Error('self signed certificate')) // true + * + * // Non-SSL error + * isSSLCertificateError(new Error('Connection refused')) // false + */ +export function isSSLCertificateError(error: unknown): boolean { + // Handle non-Error objects + if (!(error instanceof Error)) { + return false; + } + + // Combine error information for detection + const errorString = [error.name, error.message, (error as NodeJS.ErrnoException).code ?? ""] + .join(" ") + .toLowerCase(); + + // Check if any SSL pattern matches + if (SSL_ERROR_PATTERNS.some((pattern) => errorString.includes(pattern.toLowerCase()))) { + return true; + } + + // Check nested cause (for wrapped errors like "Request failed" with SSL cause) + if (error.cause instanceof Error) { + return isSSLCertificateError(error.cause); + } + + return false; +} + const SENSITIVE_HEADERS = new Set([ "authorization", "proxy-authorization", // 代理认证 diff --git a/src/app/v1/_lib/proxy/forwarder.ts b/src/app/v1/_lib/proxy/forwarder.ts index 30d693c0a..87e8d02c6 100644 --- a/src/app/v1/_lib/proxy/forwarder.ts +++ b/src/app/v1/_lib/proxy/forwarder.ts @@ -16,7 +16,11 @@ import { PROVIDER_DEFAULTS, PROVIDER_LIMITS } from "@/lib/constants/provider.con import { recordEndpointFailure, recordEndpointSuccess } from "@/lib/endpoint-circuit-breaker"; import { logger } from "@/lib/logger"; import { getPreferredProviderEndpoints } from "@/lib/provider-endpoints/endpoint-selector"; -import { createProxyAgentForProvider } from "@/lib/proxy-agent"; +import { + getGlobalAgentPool, + getProxyAgentForProvider, + type ProxyConfigWithCacheKey, +} from "@/lib/proxy-agent"; import { SessionManager } from "@/lib/session-manager"; import { CONTEXT_1M_BETA_HEADER, shouldApplyContext1m } from "@/lib/special-attributes"; import { @@ -41,6 +45,7 @@ import { isClientAbortError, isEmptyResponseError, isHttp2Error, + isSSLCertificateError, ProxyError, sanitizeUrl, } from "./errors"; @@ -252,7 +257,21 @@ export class ProxyForwarder { endpointCandidates.push({ endpointId: null, baseUrl: currentProvider.url }); } - maxAttemptsPerProvider = Math.max(maxAttemptsPerProvider, endpointCandidates.length); + // Truncate endpoints to maxRetryAttempts count + // Ensures only the N lowest-latency endpoints are used (N = maxRetryAttempts) + // Note: getPreferredProviderEndpoints already returns endpoints sorted by latency (ascending) + if (endpointCandidates.length > maxAttemptsPerProvider) { + const originalCount = endpointCandidates.length; + endpointCandidates.length = maxAttemptsPerProvider; + + logger.debug("ProxyForwarder: Truncated endpoint candidates to match maxRetryAttempts", { + providerId: currentProvider.id, + providerName: currentProvider.name, + originalEndpointCount: originalCount, + truncatedTo: maxAttemptsPerProvider, + selectedEndpointIds: endpointCandidates.map((e) => e.endpointId), + }); + } let endpointAttemptsEvaluated = 0; let allEndpointAttemptsTimedOut = true; @@ -262,6 +281,13 @@ export class ProxyForwarder { providerName: currentProvider.name, totalProvidersAttempted, maxRetryAttempts: maxAttemptsPerProvider, + endpointCount: endpointCandidates.length, + endpointSelectionCriteria: "latency_ascending", + selectedEndpoints: endpointCandidates.map((e, idx) => ({ + index: idx, + endpointId: e.endpointId, + baseUrl: sanitizeUrl(e.baseUrl), + })), }); if ( @@ -1550,8 +1576,11 @@ export class ProxyForwarder { // ⭐ 获取 HTTP/2 全局开关设置 const enableHttp2 = await isHttp2Enabled(); - // ⭐ 应用代理配置(如果配置了) - const proxyConfig = createProxyAgentForProvider(provider, proxyUrl, enableHttp2); + // ⭐ 应用代理配置(如果配置了)- 使用 Agent Pool 缓存连接 + const proxyConfig = await getProxyAgentForProvider(provider, proxyUrl, enableHttp2); + // 用于直连场景的 cacheKey(SSL 错误时标记不健康) + let directConnectionCacheKey: string | null = null; + if (proxyConfig) { init.dispatcher = proxyConfig.agent; logger.info("ProxyForwarder: Using proxy", { @@ -1563,11 +1592,19 @@ export class ProxyForwarder { http2Enabled: proxyConfig.http2Enabled, }); } else if (enableHttp2) { - // 直连场景:创建支持 HTTP/2 的 Agent - init.dispatcher = new Agent({ allowH2: true }); - logger.debug("ProxyForwarder: Using HTTP/2 Agent for direct connection", { + // 直连场景:使用 Agent Pool 获取缓存的 HTTP/2 Agent(避免内存泄漏) + const pool = getGlobalAgentPool(); + const { agent, cacheKey } = await pool.getAgent({ + endpointUrl: proxyUrl, + proxyUrl: null, + enableHttp2: true, + }); + init.dispatcher = agent; + directConnectionCacheKey = cacheKey; + logger.debug("ProxyForwarder: Using cached HTTP/2 Agent for direct connection", { providerId: provider.id, providerName: provider.name, + cacheKey, }); } @@ -1620,6 +1657,21 @@ export class ProxyForwarder { syscall?: string; // 系统调用:如 'getaddrinfo'、'connect'、'read'、'write' }; + // ⭐ SSL 证书错误检测:标记 Agent 为不健康,下次请求将创建新 Agent + const sslErrorCacheKey = proxyConfig?.cacheKey ?? directConnectionCacheKey; + if (isSSLCertificateError(err) && sslErrorCacheKey) { + const pool = getGlobalAgentPool(); + pool.markUnhealthy(sslErrorCacheKey, err.message); + logger.warn("ProxyForwarder: SSL certificate error detected, marked agent as unhealthy", { + providerId: provider.id, + providerName: provider.name, + cacheKey: sslErrorCacheKey, + connectionType: proxyConfig ? "proxy" : "direct", + errorMessage: err.message, + errorCode: err.code, + }); + } + // ⭐ 超时错误检测(优先级:response > client) if (responseController.signal.aborted && !session.clientAbortSignal?.aborted) { @@ -1764,9 +1816,21 @@ export class ProxyForwarder { const http1FallbackInit = { ...init }; delete http1FallbackInit.dispatcher; + // ⭐ 标记 HTTP/2 Agent 为不健康,避免后续请求重复失败 + const http2CacheKey = proxyConfig?.cacheKey ?? directConnectionCacheKey; + if (http2CacheKey) { + const pool = getGlobalAgentPool(); + pool.markUnhealthy(http2CacheKey, `HTTP/2 protocol error: ${err.message}`); + logger.debug("ProxyForwarder: Marked HTTP/2 agent as unhealthy due to protocol error", { + providerId: provider.id, + providerName: provider.name, + cacheKey: http2CacheKey, + }); + } + // 如果使用了代理,创建不支持 HTTP/2 的代理 Agent if (proxyConfig) { - const http1ProxyConfig = createProxyAgentForProvider(provider, proxyUrl, false); + const http1ProxyConfig = await getProxyAgentForProvider(provider, proxyUrl, false); if (http1ProxyConfig) { http1FallbackInit.dispatcher = http1ProxyConfig.agent; } diff --git a/src/app/v1/_lib/proxy/thinking-signature-rectifier.test.ts b/src/app/v1/_lib/proxy/thinking-signature-rectifier.test.ts index ef1403c97..622d72dc7 100644 --- a/src/app/v1/_lib/proxy/thinking-signature-rectifier.test.ts +++ b/src/app/v1/_lib/proxy/thinking-signature-rectifier.test.ts @@ -66,6 +66,28 @@ describe("thinking-signature-rectifier", () => { "invalid_signature_in_thinking_block" ); }); + + test("应命中:signature Extra inputs are not permitted(上游不支持 signature 字段)", () => { + // 从 Anthropic 官方渠道切换到第三方渠道时,历史 content block 包含 signature 字段 + // 第三方渠道不支持该字段,返回 "Extra inputs are not permitted" 错误 + expect( + detectThinkingSignatureRectifierTrigger( + "content.1.tool_use.signature: Extra inputs are not permitted" + ) + ).toBe("invalid_signature_in_thinking_block"); + + // 完整错误消息格式(含 request id) + expect( + detectThinkingSignatureRectifierTrigger( + '{"error":{"type":"","message":"***.***content.1.tool_use.signature: Extra inputs are not permitted (request id: 20260122042750493345237oUastQMk)"}}' + ) + ).toBe("invalid_signature_in_thinking_block"); + + // 大小写混合 + expect( + detectThinkingSignatureRectifierTrigger("Signature: EXTRA INPUTS ARE NOT PERMITTED") + ).toBe("invalid_signature_in_thinking_block"); + }); }); describe("rectifyAnthropicRequestMessage", () => { diff --git a/src/app/v1/_lib/proxy/thinking-signature-rectifier.ts b/src/app/v1/_lib/proxy/thinking-signature-rectifier.ts index a2596c406..813d8ac45 100644 --- a/src/app/v1/_lib/proxy/thinking-signature-rectifier.ts +++ b/src/app/v1/_lib/proxy/thinking-signature-rectifier.ts @@ -60,6 +60,16 @@ export function detectThinkingSignatureRectifierTrigger( return "invalid_signature_in_thinking_block"; // 复用现有触发类型,整流逻辑相同 } + // 检测:signature 字段不被接受(上游 API 返回 "xxx.signature: Extra inputs are not permitted") + // 场景:请求体中存在 signature 字段但上游 API 不支持(如非 Anthropic 官方 API) + // 常见于从 Anthropic 官方渠道切换到第三方渠道时,历史 content block 包含 signature 字段 + const looksLikeExtraSignatureField = + lower.includes("signature") && lower.includes("extra inputs are not permitted"); + + if (looksLikeExtraSignatureField) { + return "invalid_signature_in_thinking_block"; // 复用现有触发类型,整流逻辑相同 + } + // 与默认错误规则保持一致(Issue #432 / Rule 6) if (/非法请求|illegal request|invalid request/i.test(errorMessage)) { return "invalid_request"; diff --git a/src/components/ui/chart.tsx b/src/components/ui/chart.tsx index ca82f9b5b..6e9cbc01f 100644 --- a/src/components/ui/chart.tsx +++ b/src/components/ui/chart.tsx @@ -63,7 +63,7 @@ function ChartContainer({ {children} diff --git a/src/components/ui/tag-input.tsx b/src/components/ui/tag-input.tsx index 9a39d7bd2..fd381c0b6 100644 --- a/src/components/ui/tag-input.tsx +++ b/src/components/ui/tag-input.tsx @@ -2,6 +2,7 @@ import { X } from "lucide-react"; import * as React from "react"; +import { createPortal } from "react-dom"; import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip"; import { cn } from "@/lib/utils"; import { Badge } from "./badge"; @@ -66,8 +67,14 @@ export function TagInput({ const [inputValue, setInputValue] = React.useState(""); const [showSuggestions, setShowSuggestions] = React.useState(false); const [highlightedIndex, setHighlightedIndex] = React.useState(-1); + const [dropdownPosition, setDropdownPosition] = React.useState<{ + top: number; + left: number; + width: number; + } | null>(null); const inputRef = React.useRef(null); const containerRef = React.useRef(null); + const dropdownRef = React.useRef(null); const normalizedMaxVisible = React.useMemo(() => { if (maxVisibleTags === undefined) return undefined; @@ -93,6 +100,64 @@ export function TagInput({ previousShowSuggestions.current = showSuggestions; }, [showSuggestions, onSuggestionsClose]); + // Calculate dropdown position when showing suggestions + // Using fixed positioning, so use viewport coordinates directly (no scroll offset) + React.useEffect(() => { + if (showSuggestions && containerRef.current) { + const rect = containerRef.current.getBoundingClientRect(); + setDropdownPosition({ + top: rect.bottom + 4, + left: rect.left, + width: rect.width, + }); + } + }, [showSuggestions]); + + // Update position on scroll/resize (recalculate viewport coords) + React.useEffect(() => { + if (!showSuggestions) return; + + const updatePosition = () => { + if (containerRef.current) { + const rect = containerRef.current.getBoundingClientRect(); + setDropdownPosition({ + top: rect.bottom + 4, + left: rect.left, + width: rect.width, + }); + } + }; + + window.addEventListener("scroll", updatePosition, true); + window.addEventListener("resize", updatePosition); + + return () => { + window.removeEventListener("scroll", updatePosition, true); + window.removeEventListener("resize", updatePosition); + }; + }, [showSuggestions]); + + // Close dropdown when clicking outside + React.useEffect(() => { + if (!showSuggestions) return; + + const handleClickOutside = (e: MouseEvent) => { + const target = e.target as Node; + if ( + containerRef.current && + !containerRef.current.contains(target) && + dropdownRef.current && + !dropdownRef.current.contains(target) + ) { + setShowSuggestions(false); + setHighlightedIndex(-1); + } + }; + + document.addEventListener("mousedown", handleClickOutside); + return () => document.removeEventListener("mousedown", handleClickOutside); + }, [showSuggestions]); + const inputMinWidthClass = normalizedMaxVisible === undefined ? "min-w-[120px]" : "min-w-[60px]"; // Normalize suggestions so callers can provide either strings or { value, label } objects. @@ -404,27 +469,40 @@ export function TagInput({ )} {/* 建议下拉列表 */} - {showSuggestions && filteredSuggestions.length > 0 && ( -
- {filteredSuggestions.map((suggestion, index) => ( - - ))} -
- )} + {showSuggestions && + filteredSuggestions.length > 0 && + dropdownPosition && + typeof document !== "undefined" && + createPortal( +
+ {filteredSuggestions.map((suggestion, index) => ( + + ))} +
, + document.body + )}
); } diff --git a/src/lib/proxy-agent.ts b/src/lib/proxy-agent.ts index e3d5787e7..23a73dbc6 100644 --- a/src/lib/proxy-agent.ts +++ b/src/lib/proxy-agent.ts @@ -1,5 +1,6 @@ import { socksDispatcher } from "fetch-socks"; import { Agent, type Dispatcher, ProxyAgent, setGlobalDispatcher } from "undici"; +import { getGlobalAgentPool as getPool } from "@/lib/proxy-agent/agent-pool"; import type { Provider } from "@/types/provider"; import { getEnvConfig } from "./config/env.schema"; import { logger } from "./logger"; @@ -239,3 +240,71 @@ export function isValidProxyUrl(proxyUrl: string): boolean { return false; } } + +// Re-export from agent-pool module +export { + type AgentPool, + type AgentPoolConfig, + type AgentPoolStats, + generateAgentCacheKey, + getGlobalAgentPool, + resetGlobalAgentPool, +} from "./proxy-agent/agent-pool"; + +/** + * Extended ProxyConfig with cache key for health management + */ +export interface ProxyConfigWithCacheKey extends ProxyConfig { + /** Cache key for marking agent as unhealthy on SSL errors */ + cacheKey: string; +} + +/** + * Get proxy agent for provider using the global Agent Pool + * + * This is the recommended way to get a proxy agent as it: + * 1. Reuses agents across requests to the same endpoint + * 2. Isolates connections between different endpoints + * 3. Supports health management (mark unhealthy on SSL errors) + * + * @param provider Provider configuration + * @param targetUrl Target request URL + * @param enableHttp2 Whether to enable HTTP/2 (default: false) + * @returns ProxyConfig with cacheKey, or null if no proxy configured + */ +export async function getProxyAgentForProvider( + provider: Provider | ProviderProxyConfig, + targetUrl: string, + enableHttp2 = false +): Promise { + // No proxy configured + if (!provider.proxyUrl) { + return null; + } + + const proxyUrl = provider.proxyUrl.trim(); + if (!proxyUrl) { + return null; + } + + const pool = getPool(); + + const { agent, cacheKey } = await pool.getAgent({ + endpointUrl: targetUrl, + proxyUrl, + enableHttp2, + }); + + // Determine actual HTTP/2 status (SOCKS doesn't support HTTP/2) + const parsedProxy = new URL(proxyUrl); + const isSocks = parsedProxy.protocol === "socks5:" || parsedProxy.protocol === "socks4:"; + const actualHttp2Enabled = isSocks ? false : enableHttp2; + + return { + agent, + fallbackToDirect: provider.proxyFallbackToDirect ?? false, + proxyUrl: maskProxyUrl(proxyUrl), + http2Enabled: actualHttp2Enabled, + cacheKey, + }; +} diff --git a/src/lib/proxy-agent/agent-pool.ts b/src/lib/proxy-agent/agent-pool.ts new file mode 100644 index 000000000..c1ff28d8c --- /dev/null +++ b/src/lib/proxy-agent/agent-pool.ts @@ -0,0 +1,450 @@ +/** + * Agent Pool - Connection caching for HTTP/HTTPS requests + * + * Provides Agent caching per endpoint to: + * 1. Reuse connections across requests to the same endpoint + * 2. Isolate connections between different endpoints (prevents SSL certificate issues) + * 3. Support health management (mark unhealthy on SSL errors) + * 4. Implement TTL-based expiration and LRU eviction + */ +import { socksDispatcher } from "fetch-socks"; +import { Agent, type Dispatcher, ProxyAgent } from "undici"; +import { getEnvConfig } from "@/lib/config/env.schema"; +import { logger } from "@/lib/logger"; + +/** + * Agent Pool Configuration + */ +export interface AgentPoolConfig { + /** Maximum total number of cached agents (default: 100) */ + maxTotalAgents: number; + /** Agent TTL in milliseconds (default: 300000 = 5 minutes) */ + agentTtlMs: number; + /** Connection idle timeout in milliseconds (default: 60000 = 1 minute) */ + connectionIdleTimeoutMs: number; + /** Cleanup interval in milliseconds (default: 30000 = 30 seconds) */ + cleanupIntervalMs: number; +} + +/** + * Cached Agent entry + */ +interface CachedAgent { + agent: Dispatcher; + endpointKey: string; + createdAt: number; + lastUsedAt: number; + requestCount: number; + healthy: boolean; +} + +/** + * Agent Pool Statistics + */ +export interface AgentPoolStats { + cacheSize: number; + totalRequests: number; + cacheHits: number; + cacheMisses: number; + hitRate: number; + unhealthyAgents: number; + evictedAgents: number; +} + +/** + * Get Agent parameters + */ +export interface GetAgentParams { + endpointUrl: string; + proxyUrl: string | null; + enableHttp2: boolean; +} + +/** + * Get Agent result + */ +export interface GetAgentResult { + agent: Dispatcher; + isNew: boolean; + cacheKey: string; +} + +/** + * Agent Pool interface + */ +export interface AgentPool { + /** + * Get or create an Agent for the given parameters + */ + getAgent(params: GetAgentParams): Promise; + + /** + * Mark an Agent as unhealthy (will be replaced on next getAgent call) + */ + markUnhealthy(cacheKey: string, reason: string): void; + + /** + * Evict all Agents for a specific endpoint + */ + evictEndpoint(endpointKey: string): Promise; + + /** + * Get pool statistics + */ + getPoolStats(): AgentPoolStats; + + /** + * Cleanup expired Agents + * @returns Number of agents cleaned up + */ + cleanup(): Promise; + + /** + * Shutdown the pool and close all agents + */ + shutdown(): Promise; +} + +/** + * Generate cache key for Agent lookup + * + * Format: "${endpointOrigin}|${proxyOrigin || 'direct'}|${h2 ? 'h2' : 'h1'}" + * Note: Only uses proxy origin (without credentials) to avoid exposing sensitive data in logs/metrics + */ +export function generateAgentCacheKey(params: GetAgentParams): string { + const url = new URL(params.endpointUrl); + const origin = url.origin; + let proxy = "direct"; + if (params.proxyUrl) { + const proxyUrl = new URL(params.proxyUrl); + // Use only origin (protocol + host + port) to avoid exposing credentials + proxy = proxyUrl.origin; + } + const protocol = params.enableHttp2 ? "h2" : "h1"; + return `${origin}|${proxy}|${protocol}`; +} + +/** + * Default Agent Pool configuration + */ +const DEFAULT_CONFIG: AgentPoolConfig = { + maxTotalAgents: 100, + agentTtlMs: 300000, // 5 minutes + connectionIdleTimeoutMs: 60000, // 1 minute + cleanupIntervalMs: 30000, // 30 seconds +}; + +/** + * Agent Pool Implementation + */ +export class AgentPoolImpl implements AgentPool { + private cache: Map = new Map(); + private unhealthyKeys: Set = new Set(); + private cleanupTimer: ReturnType | null = null; + private config: AgentPoolConfig; + private stats = { + totalRequests: 0, + cacheHits: 0, + cacheMisses: 0, + evictedAgents: 0, + }; + /** Pending agent creation promises to prevent race conditions */ + private pendingCreations: Map> = new Map(); + + constructor(config: Partial = {}) { + this.config = { ...DEFAULT_CONFIG, ...config }; + this.startCleanupTimer(); + } + + private startCleanupTimer(): void { + if (this.cleanupTimer) { + clearInterval(this.cleanupTimer); + } + this.cleanupTimer = setInterval(() => { + void this.cleanup(); + }, this.config.cleanupIntervalMs); + // Allow process to exit gracefully without waiting for cleanup timer + this.cleanupTimer.unref(); + } + + async getAgent(params: GetAgentParams): Promise { + const cacheKey = generateAgentCacheKey(params); + this.stats.totalRequests++; + + // Check if marked as unhealthy + if (this.unhealthyKeys.has(cacheKey)) { + this.unhealthyKeys.delete(cacheKey); + await this.evictByKey(cacheKey); + } + + // Try to get from cache + const cached = this.cache.get(cacheKey); + if (cached && !this.isExpired(cached)) { + cached.lastUsedAt = Date.now(); + cached.requestCount++; + this.stats.cacheHits++; + return { agent: cached.agent, isNew: false, cacheKey }; + } + + // Check if there's a pending creation for this key (race condition prevention) + const pending = this.pendingCreations.get(cacheKey); + if (pending) { + // Wait for the pending creation and return its result + const result = await pending; + // Count as cache hit - we're reusing the pending result, not creating a new agent + // Note: Don't decrement cacheMisses here since we never incremented it for this request + this.stats.cacheHits++; + return { ...result, isNew: false }; + } + + // Cache miss - create new agent with race condition protection + this.stats.cacheMisses++; + + // Create the agent creation promise and store it + const creationPromise = this.createAgentWithCache(params, cacheKey, cached); + this.pendingCreations.set(cacheKey, creationPromise); + + try { + return await creationPromise; + } finally { + // Clean up pending creation + this.pendingCreations.delete(cacheKey); + } + } + + /** + * Internal method to create agent and update cache + * Separated to enable race condition protection via Promise caching + */ + private async createAgentWithCache( + params: GetAgentParams, + cacheKey: string, + existingCached: CachedAgent | undefined + ): Promise { + // Evict old entry if exists + if (existingCached) { + await this.evictByKey(cacheKey); + } + + // Create new agent + const agent = await this.createAgent(params); + const url = new URL(params.endpointUrl); + + const newCached: CachedAgent = { + agent, + endpointKey: url.origin, + createdAt: Date.now(), + lastUsedAt: Date.now(), + requestCount: 1, + healthy: true, + }; + + this.cache.set(cacheKey, newCached); + + // Enforce max size (LRU eviction) + await this.enforceMaxSize(); + + return { agent, isNew: true, cacheKey }; + } + + markUnhealthy(cacheKey: string, reason: string): void { + this.unhealthyKeys.add(cacheKey); + logger.warn("AgentPool: Agent marked as unhealthy", { + cacheKey, + reason, + }); + } + + async evictEndpoint(endpointKey: string): Promise { + const keysToEvict: string[] = []; + + for (const [key, cached] of this.cache.entries()) { + if (cached.endpointKey === endpointKey) { + keysToEvict.push(key); + } + } + + for (const key of keysToEvict) { + await this.evictByKey(key); + } + } + + getPoolStats(): AgentPoolStats { + const unhealthyCount = this.unhealthyKeys.size; + const hitRate = + this.stats.totalRequests > 0 ? this.stats.cacheHits / this.stats.totalRequests : 0; + + return { + cacheSize: this.cache.size, + totalRequests: this.stats.totalRequests, + cacheHits: this.stats.cacheHits, + cacheMisses: this.stats.cacheMisses, + hitRate, + unhealthyAgents: unhealthyCount, + evictedAgents: this.stats.evictedAgents, + }; + } + + async cleanup(): Promise { + const now = Date.now(); + const keysToCleanup: string[] = []; + + for (const [key, cached] of this.cache.entries()) { + if (this.isExpired(cached, now)) { + keysToCleanup.push(key); + } + } + + for (const key of keysToCleanup) { + await this.evictByKey(key); + } + + if (keysToCleanup.length > 0) { + logger.debug("AgentPool: Cleaned up expired agents", { + count: keysToCleanup.length, + }); + } + + return keysToCleanup.length; + } + + async shutdown(): Promise { + if (this.cleanupTimer) { + clearInterval(this.cleanupTimer); + this.cleanupTimer = null; + } + + const closePromises: Promise[] = []; + + for (const [key, cached] of this.cache.entries()) { + closePromises.push(this.closeAgent(cached.agent, key)); + } + + await Promise.all(closePromises); + this.cache.clear(); + this.unhealthyKeys.clear(); + + logger.info("AgentPool: Shutdown complete"); + } + + private isExpired(cached: CachedAgent, now: number = Date.now()): boolean { + return now - cached.lastUsedAt > this.config.agentTtlMs; + } + + private async evictByKey(key: string): Promise { + const cached = this.cache.get(key); + if (cached) { + await this.closeAgent(cached.agent, key); + this.cache.delete(key); + this.stats.evictedAgents++; + } + } + + private async closeAgent(agent: Dispatcher, key: string): Promise { + try { + if (typeof agent.close === "function") { + await agent.close(); + } else if (typeof agent.destroy === "function") { + await agent.destroy(); + } + } catch (error) { + logger.warn("AgentPool: Error closing agent", { + key, + error: error instanceof Error ? error.message : String(error), + }); + } + } + + private async enforceMaxSize(): Promise { + if (this.cache.size <= this.config.maxTotalAgents) { + return; + } + + // Sort by lastUsedAt (oldest first) for LRU eviction + const entries = Array.from(this.cache.entries()).sort( + ([, a], [, b]) => a.lastUsedAt - b.lastUsedAt + ); + + const toEvict = entries.slice(0, this.cache.size - this.config.maxTotalAgents); + + for (const [key] of toEvict) { + await this.evictByKey(key); + } + } + + private async createAgent(params: GetAgentParams): Promise { + const { + FETCH_CONNECT_TIMEOUT: connectTimeout, + FETCH_HEADERS_TIMEOUT: headersTimeout, + FETCH_BODY_TIMEOUT: bodyTimeout, + } = getEnvConfig(); + + // No proxy - create direct Agent + if (!params.proxyUrl) { + return new Agent({ + connectTimeout, + headersTimeout, + bodyTimeout, + allowH2: params.enableHttp2, + }); + } + + const proxyUrl = params.proxyUrl.trim(); + const parsedProxy = new URL(proxyUrl); + + // SOCKS proxy + if (parsedProxy.protocol === "socks5:" || parsedProxy.protocol === "socks4:") { + return socksDispatcher( + { + type: parsedProxy.protocol === "socks5:" ? 5 : 4, + host: parsedProxy.hostname, + port: parseInt(parsedProxy.port, 10) || 1080, + userId: parsedProxy.username || undefined, + password: parsedProxy.password || undefined, + }, + { + connect: { + timeout: connectTimeout, + }, + } + ); + } + + // HTTP/HTTPS proxy + if (parsedProxy.protocol === "http:" || parsedProxy.protocol === "https:") { + return new ProxyAgent({ + uri: proxyUrl, + allowH2: params.enableHttp2, + connectTimeout, + headersTimeout, + bodyTimeout, + }); + } + + throw new Error(`Unsupported proxy protocol: ${parsedProxy.protocol}`); + } +} + +// Global singleton instance +let globalAgentPool: AgentPool | null = null; + +/** + * Get the global Agent Pool singleton + */ +export function getGlobalAgentPool(): AgentPool { + if (!globalAgentPool) { + globalAgentPool = new AgentPoolImpl(); + logger.info("AgentPool: Global instance created"); + } + return globalAgentPool; +} + +/** + * Reset the global Agent Pool (for testing) + */ +export async function resetGlobalAgentPool(): Promise { + if (globalAgentPool) { + await globalAgentPool.shutdown(); + globalAgentPool = null; + } +} diff --git a/src/lib/session-status.ts b/src/lib/session-status.ts new file mode 100644 index 000000000..a03896c2c --- /dev/null +++ b/src/lib/session-status.ts @@ -0,0 +1,93 @@ +import { logger } from "@/lib/logger"; + +/** + * Session Display Status Constants + * English uppercase abbreviations (no i18n for status labels) + */ +export const SESSION_DISPLAY_STATUS = { + IN_PROGRESS: "IN_PROGRESS", + IDLE: "IDLE", + INITIALIZING: "INITIALIZING", +} as const; + +export type SessionDisplayStatus = + (typeof SESSION_DISPLAY_STATUS)[keyof typeof SESSION_DISPLAY_STATUS]; + +/** + * Session Status Info for UI rendering + */ +export interface SessionStatusInfo { + status: SessionDisplayStatus; + label: string; + tooltipKey: string; + color: string; + pulse: boolean; +} + +/** + * Input type for session status calculation + */ +export interface SessionStatusInput { + concurrentCount?: number; + requestCount?: number; + status?: "in_progress" | "completed" | "error"; +} + +/** + * Determine session display status based on request state + * + * Logic: + * - IN_PROGRESS: concurrentCount > 0 AND requestCount > 1 (has active requests, not first) + * - INITIALIZING: requestCount <= 1 AND concurrentCount > 0 (first request still running) + * - IDLE: concurrentCount === 0 (all requests completed) + * + * @param session - Session data with concurrent and request counts + * @returns SessionStatusInfo for UI rendering + */ +export function getSessionDisplayStatus(session: SessionStatusInput): SessionStatusInfo { + const { concurrentCount = 0, requestCount = 0, status } = session; + + logger.trace("getSessionDisplayStatus", { concurrentCount, requestCount, status }); + + // Error status takes priority + if (status === "error") { + return { + status: SESSION_DISPLAY_STATUS.IN_PROGRESS, + label: "ERROR", + tooltipKey: "status.errorTooltip", + color: "text-rose-500 dark:text-rose-400", + pulse: true, + }; + } + + // INITIALIZING: first request still running + if (requestCount <= 1 && concurrentCount > 0) { + return { + status: SESSION_DISPLAY_STATUS.INITIALIZING, + label: SESSION_DISPLAY_STATUS.INITIALIZING, + tooltipKey: "status.initializingTooltip", + color: "text-amber-500 dark:text-amber-400", + pulse: true, + }; + } + + // IN_PROGRESS: has active requests + if (concurrentCount > 0) { + return { + status: SESSION_DISPLAY_STATUS.IN_PROGRESS, + label: SESSION_DISPLAY_STATUS.IN_PROGRESS, + tooltipKey: "status.inProgressTooltip", + color: "text-emerald-500 dark:text-emerald-400", + pulse: true, + }; + } + + // IDLE: no active requests + return { + status: SESSION_DISPLAY_STATUS.IDLE, + label: SESSION_DISPLAY_STATUS.IDLE, + tooltipKey: "status.idleTooltip", + color: "text-muted-foreground/50", + pulse: false, + }; +} diff --git a/src/lib/session-tracker.ts b/src/lib/session-tracker.ts index 72c2d2874..da1f1d241 100644 --- a/src/lib/session-tracker.ts +++ b/src/lib/session-tracker.ts @@ -550,6 +550,62 @@ export class SessionTracker { } } + /** + * 批量获取多个 session 的并发计数 + * 用于 dashboard 显示优化,避免 N+1 查询 + * + * @param sessionIds - Session ID 数组 + * @returns Map + */ + static async getConcurrentCountBatch(sessionIds: string[]): Promise> { + const result = new Map(); + + if (sessionIds.length === 0) { + return result; + } + + const redis = getRedisClient(); + if (!redis || redis.status !== "ready") { + for (const id of sessionIds) { + result.set(id, 0); + } + return result; + } + + try { + const pipeline = redis.pipeline(); + for (const sessionId of sessionIds) { + pipeline.get(`session:${sessionId}:concurrent_count`); + } + + const results = await pipeline.exec(); + if (!results) { + for (const id of sessionIds) { + result.set(id, 0); + } + return result; + } + + for (let i = 0; i < sessionIds.length; i++) { + const [err, count] = results[i]; + result.set(sessionIds[i], !err && count ? parseInt(count as string, 10) : 0); + } + + logger.trace("SessionTracker: Got concurrent count batch", { + count: sessionIds.length, + nonZero: Array.from(result.values()).filter((v) => v > 0).length, + }); + + return result; + } catch (error) { + logger.error("SessionTracker: Failed to get concurrent count batch", { error }); + for (const id of sessionIds) { + result.set(id, 0); + } + return result; + } + } + /** * 获取 session 当前并发计数 * diff --git a/src/types/session.ts b/src/types/session.ts index a89b4a34c..10e5ed9ea 100644 --- a/src/types/session.ts +++ b/src/types/session.ts @@ -37,6 +37,7 @@ export interface ActiveSessionInfo { // 派生字段 durationMs?: number; // 总耗时 requestCount?: number; // 请求次数 + concurrentCount?: number; // 并发请求数(用于实时状态计算) } /** diff --git a/tests/unit/dashboard/availability/availability-dashboard.test.tsx b/tests/unit/dashboard/availability/availability-dashboard.test.tsx new file mode 100644 index 000000000..debe497c3 --- /dev/null +++ b/tests/unit/dashboard/availability/availability-dashboard.test.tsx @@ -0,0 +1,326 @@ +/** + * @vitest-environment happy-dom + */ + +import { describe, expect, test } from "vitest"; + +// Test the time range calculation logic from AvailabilityDashboard + +describe("AvailabilityDashboard - time range calculations", () => { + type TimeRangeOption = "15min" | "1h" | "6h" | "24h" | "7d"; + + const TIME_RANGE_MAP: Record = { + "15min": 15 * 60 * 1000, + "1h": 60 * 60 * 1000, + "6h": 6 * 60 * 60 * 1000, + "24h": 24 * 60 * 60 * 1000, + "7d": 7 * 24 * 60 * 60 * 1000, + }; + + const TARGET_BUCKETS = 60; + + function calculateBucketSize(timeRangeMs: number): number { + const bucketSizeMs = timeRangeMs / TARGET_BUCKETS; + const bucketSizeMinutes = bucketSizeMs / (60 * 1000); + return Math.max(0.25, Math.round(bucketSizeMinutes * 4) / 4); + } + + describe("TIME_RANGE_MAP values", () => { + test("15min should be 15 minutes in milliseconds", () => { + expect(TIME_RANGE_MAP["15min"]).toBe(15 * 60 * 1000); + expect(TIME_RANGE_MAP["15min"]).toBe(900000); + }); + + test("1h should be 1 hour in milliseconds", () => { + expect(TIME_RANGE_MAP["1h"]).toBe(60 * 60 * 1000); + expect(TIME_RANGE_MAP["1h"]).toBe(3600000); + }); + + test("6h should be 6 hours in milliseconds", () => { + expect(TIME_RANGE_MAP["6h"]).toBe(6 * 60 * 60 * 1000); + expect(TIME_RANGE_MAP["6h"]).toBe(21600000); + }); + + test("24h should be 24 hours in milliseconds", () => { + expect(TIME_RANGE_MAP["24h"]).toBe(24 * 60 * 60 * 1000); + expect(TIME_RANGE_MAP["24h"]).toBe(86400000); + }); + + test("7d should be 7 days in milliseconds", () => { + expect(TIME_RANGE_MAP["7d"]).toBe(7 * 24 * 60 * 60 * 1000); + expect(TIME_RANGE_MAP["7d"]).toBe(604800000); + }); + }); + + describe("calculateBucketSize", () => { + test("should calculate bucket size for 15min range", () => { + const bucketSize = calculateBucketSize(TIME_RANGE_MAP["15min"]); + // 15min / 60 buckets = 0.25 minutes per bucket + expect(bucketSize).toBe(0.25); + }); + + test("should calculate bucket size for 1h range", () => { + const bucketSize = calculateBucketSize(TIME_RANGE_MAP["1h"]); + // 60min / 60 buckets = 1 minute per bucket + expect(bucketSize).toBe(1); + }); + + test("should calculate bucket size for 6h range", () => { + const bucketSize = calculateBucketSize(TIME_RANGE_MAP["6h"]); + // 360min / 60 buckets = 6 minutes per bucket + expect(bucketSize).toBe(6); + }); + + test("should calculate bucket size for 24h range", () => { + const bucketSize = calculateBucketSize(TIME_RANGE_MAP["24h"]); + // 1440min / 60 buckets = 24 minutes per bucket + expect(bucketSize).toBe(24); + }); + + test("should calculate bucket size for 7d range", () => { + const bucketSize = calculateBucketSize(TIME_RANGE_MAP["7d"]); + // 10080min / 60 buckets = 168 minutes per bucket + expect(bucketSize).toBe(168); + }); + + test("should enforce minimum bucket size of 0.25 minutes", () => { + // Very small time range + const bucketSize = calculateBucketSize(1000); // 1 second + expect(bucketSize).toBe(0.25); + }); + + test("should round to nearest 0.25 minutes", () => { + // Test rounding behavior + const testCases = [ + { input: 60 * 60 * 1000 * 1.1, expected: 1 }, // ~1.1 min -> 1 + { input: 60 * 60 * 1000 * 1.3, expected: 1.25 }, // ~1.3 min -> 1.25 + { input: 60 * 60 * 1000 * 1.6, expected: 1.5 }, // ~1.6 min -> 1.5 + { input: 60 * 60 * 1000 * 1.9, expected: 2 }, // ~1.9 min -> 2 + ]; + + for (const { input, expected } of testCases) { + const result = calculateBucketSize(input); + expect(result).toBeCloseTo(expected, 1); + } + }); + }); + + describe("time range date calculations", () => { + test("should calculate correct start time for each range", () => { + const now = new Date("2024-01-15T12:00:00Z"); + + for (const [range, ms] of Object.entries(TIME_RANGE_MAP)) { + const startTime = new Date(now.getTime() - ms); + const diff = now.getTime() - startTime.getTime(); + expect(diff).toBe(ms); + } + }); + + test("15min range should go back 15 minutes", () => { + const now = new Date("2024-01-15T12:00:00Z"); + const startTime = new Date(now.getTime() - TIME_RANGE_MAP["15min"]); + expect(startTime.toISOString()).toBe("2024-01-15T11:45:00.000Z"); + }); + + test("24h range should go back 24 hours", () => { + const now = new Date("2024-01-15T12:00:00Z"); + const startTime = new Date(now.getTime() - TIME_RANGE_MAP["24h"]); + expect(startTime.toISOString()).toBe("2024-01-14T12:00:00.000Z"); + }); + + test("7d range should go back 7 days", () => { + const now = new Date("2024-01-15T12:00:00Z"); + const startTime = new Date(now.getTime() - TIME_RANGE_MAP["7d"]); + expect(startTime.toISOString()).toBe("2024-01-08T12:00:00.000Z"); + }); + }); +}); + +describe("AvailabilityDashboard - overview metrics calculations", () => { + interface TimeBucket { + avgLatencyMs: number; + redCount: number; + totalRequests: number; + } + + interface Provider { + timeBuckets: TimeBucket[]; + totalRequests: number; + currentStatus: "green" | "yellow" | "red" | "unknown"; + } + + function calculateAvgLatency(providers: Provider[]): number { + if (providers.length === 0) return 0; + + const providersWithLatency = providers.filter((p) => + p.timeBuckets.some((b) => b.avgLatencyMs > 0) + ); + + if (providersWithLatency.length === 0) return 0; + + const totalLatency = providersWithLatency.reduce((sum, p) => { + const latencies = p.timeBuckets.filter((b) => b.avgLatencyMs > 0).map((b) => b.avgLatencyMs); + if (latencies.length === 0) return sum; + return sum + latencies.reduce((a, b) => a + b, 0) / latencies.length; + }, 0); + + return totalLatency / providersWithLatency.length; + } + + function calculateErrorRate(providers: Provider[]): number { + if (providers.length === 0) return 0; + + const totalErrorRate = providers.reduce((sum, p) => { + const total = p.totalRequests; + const errors = p.timeBuckets.reduce((s, b) => s + b.redCount, 0); + return sum + (total > 0 ? errors / total : 0); + }, 0); + + return totalErrorRate / providers.length; + } + + function countByStatus(providers: Provider[], status: string): number { + return providers.filter((p) => p.currentStatus === status).length; + } + + describe("calculateAvgLatency", () => { + test("should return 0 for empty providers", () => { + expect(calculateAvgLatency([])).toBe(0); + }); + + test("should calculate average latency across providers", () => { + const providers: Provider[] = [ + { + timeBuckets: [{ avgLatencyMs: 100, redCount: 0, totalRequests: 10 }], + totalRequests: 10, + currentStatus: "green", + }, + { + timeBuckets: [{ avgLatencyMs: 200, redCount: 0, totalRequests: 10 }], + totalRequests: 10, + currentStatus: "green", + }, + ]; + expect(calculateAvgLatency(providers)).toBe(150); + }); + + test("should ignore providers with no latency data", () => { + const providers: Provider[] = [ + { + timeBuckets: [{ avgLatencyMs: 100, redCount: 0, totalRequests: 10 }], + totalRequests: 10, + currentStatus: "green", + }, + { + timeBuckets: [{ avgLatencyMs: 0, redCount: 0, totalRequests: 0 }], + totalRequests: 0, + currentStatus: "unknown", + }, + ]; + expect(calculateAvgLatency(providers)).toBe(100); + }); + + test("should average multiple buckets within a provider", () => { + const providers: Provider[] = [ + { + timeBuckets: [ + { avgLatencyMs: 100, redCount: 0, totalRequests: 10 }, + { avgLatencyMs: 200, redCount: 0, totalRequests: 10 }, + { avgLatencyMs: 300, redCount: 0, totalRequests: 10 }, + ], + totalRequests: 30, + currentStatus: "green", + }, + ]; + expect(calculateAvgLatency(providers)).toBe(200); + }); + }); + + describe("calculateErrorRate", () => { + test("should return 0 for empty providers", () => { + expect(calculateErrorRate([])).toBe(0); + }); + + test("should calculate error rate correctly", () => { + const providers: Provider[] = [ + { + timeBuckets: [{ avgLatencyMs: 100, redCount: 10, totalRequests: 100 }], + totalRequests: 100, + currentStatus: "green", + }, + ]; + expect(calculateErrorRate(providers)).toBe(0.1); // 10% + }); + + test("should average error rates across providers", () => { + const providers: Provider[] = [ + { + timeBuckets: [{ avgLatencyMs: 100, redCount: 10, totalRequests: 100 }], + totalRequests: 100, + currentStatus: "green", + }, + { + timeBuckets: [{ avgLatencyMs: 100, redCount: 20, totalRequests: 100 }], + totalRequests: 100, + currentStatus: "yellow", + }, + ]; + expect(calculateErrorRate(providers)).toBeCloseTo(0.15, 10); // (10% + 20%) / 2 + }); + + test("should handle providers with zero requests", () => { + const providers: Provider[] = [ + { + timeBuckets: [{ avgLatencyMs: 0, redCount: 0, totalRequests: 0 }], + totalRequests: 0, + currentStatus: "unknown", + }, + ]; + expect(calculateErrorRate(providers)).toBe(0); + }); + }); + + describe("countByStatus", () => { + const providers: Provider[] = [ + { timeBuckets: [], totalRequests: 100, currentStatus: "green" }, + { timeBuckets: [], totalRequests: 100, currentStatus: "green" }, + { timeBuckets: [], totalRequests: 50, currentStatus: "yellow" }, + { timeBuckets: [], totalRequests: 10, currentStatus: "red" }, + { timeBuckets: [], totalRequests: 0, currentStatus: "unknown" }, + ]; + + test("should count green providers", () => { + expect(countByStatus(providers, "green")).toBe(2); + }); + + test("should count yellow providers", () => { + expect(countByStatus(providers, "yellow")).toBe(1); + }); + + test("should count red providers", () => { + expect(countByStatus(providers, "red")).toBe(1); + }); + + test("should count unknown providers", () => { + expect(countByStatus(providers, "unknown")).toBe(1); + }); + + test("should return 0 for non-existent status", () => { + expect(countByStatus(providers, "nonexistent")).toBe(0); + }); + }); +}); + +describe("AvailabilityDashboard - auto-refresh intervals", () => { + test("provider tab should use 30 second interval", () => { + const activeTab = "provider"; + const interval = activeTab === "provider" ? 30000 : 10000; + expect(interval).toBe(30000); + }); + + test("endpoint tab should use 10 second interval", () => { + const activeTab = "endpoint"; + const interval = activeTab === "provider" ? 30000 : 10000; + expect(interval).toBe(10000); + }); +}); diff --git a/tests/unit/dashboard/availability/confidence-badge.test.tsx b/tests/unit/dashboard/availability/confidence-badge.test.tsx new file mode 100644 index 000000000..bc15ed4d1 --- /dev/null +++ b/tests/unit/dashboard/availability/confidence-badge.test.tsx @@ -0,0 +1,136 @@ +/** + * @vitest-environment happy-dom + */ + +import { describe, expect, test } from "vitest"; + +// Test the pure functions extracted from ConfidenceBadge component +// These determine confidence levels based on request counts + +describe("ConfidenceBadge - getConfidenceLevel logic", () => { + type ConfidenceLevel = "low" | "medium" | "high"; + + function getConfidenceLevel( + count: number, + thresholds: { low: number; medium: number; high: number } + ): ConfidenceLevel { + if (count >= thresholds.high) return "high"; + if (count >= thresholds.medium) return "medium"; + return "low"; + } + + describe("with default thresholds (low: 10, medium: 50, high: 200)", () => { + const thresholds = { low: 10, medium: 50, high: 200 }; + + test("should return low confidence for counts < medium threshold", () => { + expect(getConfidenceLevel(0, thresholds)).toBe("low"); + expect(getConfidenceLevel(5, thresholds)).toBe("low"); + expect(getConfidenceLevel(10, thresholds)).toBe("low"); + expect(getConfidenceLevel(49, thresholds)).toBe("low"); + }); + + test("should return medium confidence for counts >= medium and < high", () => { + expect(getConfidenceLevel(50, thresholds)).toBe("medium"); + expect(getConfidenceLevel(100, thresholds)).toBe("medium"); + expect(getConfidenceLevel(199, thresholds)).toBe("medium"); + }); + + test("should return high confidence for counts >= high threshold", () => { + expect(getConfidenceLevel(200, thresholds)).toBe("high"); + expect(getConfidenceLevel(500, thresholds)).toBe("high"); + expect(getConfidenceLevel(1000, thresholds)).toBe("high"); + }); + }); + + describe("with custom thresholds", () => { + const customThresholds = { low: 5, medium: 20, high: 100 }; + + test("should respect custom thresholds", () => { + expect(getConfidenceLevel(4, customThresholds)).toBe("low"); + expect(getConfidenceLevel(5, customThresholds)).toBe("low"); + expect(getConfidenceLevel(19, customThresholds)).toBe("low"); + expect(getConfidenceLevel(20, customThresholds)).toBe("medium"); + expect(getConfidenceLevel(99, customThresholds)).toBe("medium"); + expect(getConfidenceLevel(100, customThresholds)).toBe("high"); + }); + }); + + describe("edge cases", () => { + const thresholds = { low: 10, medium: 50, high: 200 }; + + test("should handle zero requests", () => { + expect(getConfidenceLevel(0, thresholds)).toBe("low"); + }); + + test("should handle negative values (treat as low)", () => { + expect(getConfidenceLevel(-1, thresholds)).toBe("low"); + expect(getConfidenceLevel(-100, thresholds)).toBe("low"); + }); + + test("should handle very large values", () => { + expect(getConfidenceLevel(1000000, thresholds)).toBe("high"); + }); + + test("should handle exact threshold boundaries", () => { + // At exactly medium threshold + expect(getConfidenceLevel(50, thresholds)).toBe("medium"); + // At exactly high threshold + expect(getConfidenceLevel(200, thresholds)).toBe("high"); + }); + }); +}); + +describe("ConfidenceBadge - visual configuration", () => { + const confidenceConfig = { + low: { + bars: 1, + color: "bg-slate-400", + bgColor: "bg-slate-400/10", + borderStyle: "border-dashed border-slate-400/50", + }, + medium: { + bars: 2, + color: "bg-amber-500", + bgColor: "bg-amber-500/10", + borderStyle: "border-solid border-amber-500/50", + }, + high: { + bars: 3, + color: "bg-emerald-500", + bgColor: "bg-emerald-500/10", + borderStyle: "border-solid border-emerald-500/50", + }, + }; + + test("low confidence should show 1 bar with dashed border", () => { + expect(confidenceConfig.low.bars).toBe(1); + expect(confidenceConfig.low.borderStyle).toContain("border-dashed"); + }); + + test("medium confidence should show 2 bars with solid border", () => { + expect(confidenceConfig.medium.bars).toBe(2); + expect(confidenceConfig.medium.borderStyle).toContain("border-solid"); + }); + + test("high confidence should show 3 bars with solid border", () => { + expect(confidenceConfig.high.bars).toBe(3); + expect(confidenceConfig.high.borderStyle).toContain("border-solid"); + }); + + test("each level should have distinct colors", () => { + expect(confidenceConfig.low.color).not.toBe(confidenceConfig.medium.color); + expect(confidenceConfig.medium.color).not.toBe(confidenceConfig.high.color); + expect(confidenceConfig.low.color).not.toBe(confidenceConfig.high.color); + }); + + test("bar heights should increase progressively", () => { + // Bar heights are calculated as bar * 4px + const lowMaxHeight = confidenceConfig.low.bars * 4; + const mediumMaxHeight = confidenceConfig.medium.bars * 4; + const highMaxHeight = confidenceConfig.high.bars * 4; + + expect(lowMaxHeight).toBe(4); + expect(mediumMaxHeight).toBe(8); + expect(highMaxHeight).toBe(12); + }); +}); diff --git a/tests/unit/dashboard/availability/gauge-card.test.tsx b/tests/unit/dashboard/availability/gauge-card.test.tsx new file mode 100644 index 000000000..8a2479b5a --- /dev/null +++ b/tests/unit/dashboard/availability/gauge-card.test.tsx @@ -0,0 +1,174 @@ +/** + * @vitest-environment happy-dom + */ + +import { describe, expect, test } from "vitest"; + +// Test the pure functions extracted from GaugeCard component +// These are the core logic that determines gauge colors and trend indicators + +describe("GaugeCard - getGaugeColor logic", () => { + // Replicate the getGaugeColor function logic for testing + function getGaugeColor( + value: number, + thresholds: { warning: number; critical: number }, + invertColors: boolean + ): string { + if (invertColors) { + // For metrics where lower is better (error rate, latency) + if (value <= thresholds.critical) return "text-emerald-500"; + if (value <= thresholds.warning) return "text-amber-500"; + return "text-rose-500"; + } + // For metrics where higher is better (availability) + if (value >= thresholds.warning) return "text-emerald-500"; + if (value >= thresholds.critical) return "text-amber-500"; + return "text-rose-500"; + } + + describe("normal metrics (higher is better)", () => { + const thresholds = { warning: 80, critical: 50 }; + const invertColors = false; + + test("should return green for values >= warning threshold", () => { + expect(getGaugeColor(100, thresholds, invertColors)).toBe("text-emerald-500"); + expect(getGaugeColor(95, thresholds, invertColors)).toBe("text-emerald-500"); + expect(getGaugeColor(80, thresholds, invertColors)).toBe("text-emerald-500"); + }); + + test("should return amber for values between critical and warning", () => { + expect(getGaugeColor(79, thresholds, invertColors)).toBe("text-amber-500"); + expect(getGaugeColor(65, thresholds, invertColors)).toBe("text-amber-500"); + expect(getGaugeColor(50, thresholds, invertColors)).toBe("text-amber-500"); + }); + + test("should return red for values < critical threshold", () => { + expect(getGaugeColor(49, thresholds, invertColors)).toBe("text-rose-500"); + expect(getGaugeColor(25, thresholds, invertColors)).toBe("text-rose-500"); + expect(getGaugeColor(0, thresholds, invertColors)).toBe("text-rose-500"); + }); + }); + + describe("inverted metrics (lower is better)", () => { + const thresholds = { warning: 10, critical: 5 }; + const invertColors = true; + + test("should return green for values <= critical threshold", () => { + expect(getGaugeColor(0, thresholds, invertColors)).toBe("text-emerald-500"); + expect(getGaugeColor(3, thresholds, invertColors)).toBe("text-emerald-500"); + expect(getGaugeColor(5, thresholds, invertColors)).toBe("text-emerald-500"); + }); + + test("should return amber for values between critical and warning", () => { + expect(getGaugeColor(6, thresholds, invertColors)).toBe("text-amber-500"); + expect(getGaugeColor(8, thresholds, invertColors)).toBe("text-amber-500"); + expect(getGaugeColor(10, thresholds, invertColors)).toBe("text-amber-500"); + }); + + test("should return red for values > warning threshold", () => { + expect(getGaugeColor(11, thresholds, invertColors)).toBe("text-rose-500"); + expect(getGaugeColor(50, thresholds, invertColors)).toBe("text-rose-500"); + expect(getGaugeColor(100, thresholds, invertColors)).toBe("text-rose-500"); + }); + }); +}); + +describe("GaugeCard - getTrendColor logic", () => { + function getTrendColor(direction: "up" | "down" | "stable", invertColors: boolean) { + if (direction === "stable") return "text-muted-foreground bg-muted/50"; + if (invertColors) { + // For inverted metrics, down is good + return direction === "down" + ? "text-emerald-500 bg-emerald-500/10" + : "text-rose-500 bg-rose-500/10"; + } + // For normal metrics, up is good + return direction === "up" + ? "text-emerald-500 bg-emerald-500/10" + : "text-rose-500 bg-rose-500/10"; + } + + describe("normal metrics", () => { + const invertColors = false; + + test("should return green for upward trend", () => { + expect(getTrendColor("up", invertColors)).toBe("text-emerald-500 bg-emerald-500/10"); + }); + + test("should return red for downward trend", () => { + expect(getTrendColor("down", invertColors)).toBe("text-rose-500 bg-rose-500/10"); + }); + + test("should return muted for stable trend", () => { + expect(getTrendColor("stable", invertColors)).toBe("text-muted-foreground bg-muted/50"); + }); + }); + + describe("inverted metrics", () => { + const invertColors = true; + + test("should return green for downward trend (lower is better)", () => { + expect(getTrendColor("down", invertColors)).toBe("text-emerald-500 bg-emerald-500/10"); + }); + + test("should return red for upward trend (higher is worse)", () => { + expect(getTrendColor("up", invertColors)).toBe("text-rose-500 bg-rose-500/10"); + }); + + test("should return muted for stable trend", () => { + expect(getTrendColor("stable", invertColors)).toBe("text-muted-foreground bg-muted/50"); + }); + }); +}); + +describe("GaugeCard - SVG calculations", () => { + const sizeConfig = { + sm: { gauge: 64, stroke: 4 }, + md: { gauge: 80, stroke: 5 }, + lg: { gauge: 96, stroke: 6 }, + }; + + test("should calculate correct radius for each size", () => { + for (const [size, config] of Object.entries(sizeConfig)) { + const radius = (config.gauge - config.stroke) / 2; + expect(radius).toBeGreaterThan(0); + // Radius should be less than half the gauge size + expect(radius).toBeLessThan(config.gauge / 2); + } + }); + + test("should calculate correct circumference", () => { + const config = sizeConfig.md; + const radius = (config.gauge - config.stroke) / 2; + const circumference = 2 * Math.PI * radius; + expect(circumference).toBeCloseTo(2 * Math.PI * 37.5, 2); + }); + + test("should calculate correct offset for different values", () => { + const config = sizeConfig.md; + const radius = (config.gauge - config.stroke) / 2; + const circumference = 2 * Math.PI * radius; + + // 0% should have full offset (empty gauge) + const offset0 = circumference - (0 / 100) * circumference; + expect(offset0).toBe(circumference); + + // 100% should have zero offset (full gauge) + const offset100 = circumference - (100 / 100) * circumference; + expect(offset100).toBe(0); + + // 50% should have half offset + const offset50 = circumference - (50 / 100) * circumference; + expect(offset50).toBeCloseTo(circumference / 2, 2); + }); + + test("should clamp values between 0 and 100", () => { + const normalizeValue = (value: number) => Math.min(Math.max(value, 0), 100); + + expect(normalizeValue(-10)).toBe(0); + expect(normalizeValue(0)).toBe(0); + expect(normalizeValue(50)).toBe(50); + expect(normalizeValue(100)).toBe(100); + expect(normalizeValue(150)).toBe(100); + }); +}); diff --git a/tests/unit/dashboard/availability/probe-terminal.test.tsx b/tests/unit/dashboard/availability/probe-terminal.test.tsx new file mode 100644 index 000000000..e2d1f8d18 --- /dev/null +++ b/tests/unit/dashboard/availability/probe-terminal.test.tsx @@ -0,0 +1,247 @@ +/** + * @vitest-environment happy-dom + */ + +import { describe, expect, test } from "vitest"; + +// Test the pure functions extracted from ProbeTerminal component +// These handle log formatting, filtering, and status determination + +describe("ProbeTerminal - formatTime", () => { + function formatTime(date: Date | string): string { + const d = typeof date === "string" ? new Date(date) : date; + return d.toLocaleTimeString(undefined, { + hour: "2-digit", + minute: "2-digit", + second: "2-digit", + }); + } + + test("should format Date object correctly", () => { + const date = new Date("2024-01-15T10:30:45Z"); + const result = formatTime(date); + // Result format depends on locale, but should contain time components + expect(result).toMatch(/\d{1,2}:\d{2}:\d{2}/); + }); + + test("should format ISO string correctly", () => { + const result = formatTime("2024-01-15T10:30:45Z"); + expect(result).toMatch(/\d{1,2}:\d{2}:\d{2}/); + }); + + test("should handle different date formats", () => { + const result1 = formatTime("2024-01-15T00:00:00Z"); + const result2 = formatTime("2024-12-31T23:59:59Z"); + expect(result1).toMatch(/\d{1,2}:\d{2}:\d{2}/); + expect(result2).toMatch(/\d{1,2}:\d{2}:\d{2}/); + }); +}); + +describe("ProbeTerminal - formatLatency", () => { + function formatLatency(ms: number | null): string { + if (ms === null) return "-"; + if (ms < 1000) return `${Math.round(ms)}ms`; + return `${(ms / 1000).toFixed(2)}s`; + } + + test("should return dash for null values", () => { + expect(formatLatency(null)).toBe("-"); + }); + + test("should format milliseconds for values < 1000", () => { + expect(formatLatency(0)).toBe("0ms"); + expect(formatLatency(100)).toBe("100ms"); + expect(formatLatency(500)).toBe("500ms"); + expect(formatLatency(999)).toBe("999ms"); + }); + + test("should format seconds for values >= 1000", () => { + expect(formatLatency(1000)).toBe("1.00s"); + expect(formatLatency(1500)).toBe("1.50s"); + expect(formatLatency(2345)).toBe("2.35s"); + expect(formatLatency(10000)).toBe("10.00s"); + }); + + test("should round milliseconds to nearest integer", () => { + expect(formatLatency(100.4)).toBe("100ms"); + expect(formatLatency(100.5)).toBe("101ms"); + expect(formatLatency(100.9)).toBe("101ms"); + }); +}); + +describe("ProbeTerminal - getLogLevel", () => { + type LogLevel = "success" | "error" | "warn"; + + interface ProbeLog { + ok: boolean; + errorType: string | null; + } + + function getLogLevel(log: ProbeLog): LogLevel { + if (log.ok) return "success"; + if (log.errorType === "timeout") return "warn"; + return "error"; + } + + test("should return success for ok logs", () => { + expect(getLogLevel({ ok: true, errorType: null })).toBe("success"); + expect(getLogLevel({ ok: true, errorType: "timeout" })).toBe("success"); + }); + + test("should return warn for timeout errors", () => { + expect(getLogLevel({ ok: false, errorType: "timeout" })).toBe("warn"); + }); + + test("should return error for other failures", () => { + expect(getLogLevel({ ok: false, errorType: null })).toBe("error"); + expect(getLogLevel({ ok: false, errorType: "connection_refused" })).toBe("error"); + expect(getLogLevel({ ok: false, errorType: "ssl_error" })).toBe("error"); + expect(getLogLevel({ ok: false, errorType: "dns_error" })).toBe("error"); + }); +}); + +describe("ProbeTerminal - log filtering", () => { + interface MockLog { + id: number; + errorMessage: string | null; + errorType: string | null; + statusCode: number | null; + } + + function filterLogs(logs: MockLog[], filter: string): MockLog[] { + if (!filter) return logs; + const searchLower = filter.toLowerCase(); + return logs.filter((log) => { + return ( + log.errorMessage?.toLowerCase().includes(searchLower) || + log.errorType?.toLowerCase().includes(searchLower) || + log.statusCode?.toString().includes(searchLower) + ); + }); + } + + const mockLogs: MockLog[] = [ + { id: 1, errorMessage: "Connection refused", errorType: "connection_error", statusCode: null }, + { id: 2, errorMessage: null, errorType: "timeout", statusCode: null }, + { id: 3, errorMessage: "SSL certificate error", errorType: "ssl_error", statusCode: null }, + { id: 4, errorMessage: null, errorType: null, statusCode: 200 }, + { id: 5, errorMessage: null, errorType: null, statusCode: 500 }, + { id: 6, errorMessage: "Bad Gateway", errorType: "http_error", statusCode: 502 }, + ]; + + test("should return all logs when filter is empty", () => { + expect(filterLogs(mockLogs, "")).toHaveLength(6); + expect(filterLogs(mockLogs, "")).toEqual(mockLogs); + }); + + test("should filter by error message", () => { + const result = filterLogs(mockLogs, "connection"); + expect(result).toHaveLength(1); + expect(result[0].id).toBe(1); + }); + + test("should filter by error type", () => { + const result = filterLogs(mockLogs, "timeout"); + expect(result).toHaveLength(1); + expect(result[0].id).toBe(2); + }); + + test("should filter by status code", () => { + const result = filterLogs(mockLogs, "500"); + expect(result).toHaveLength(1); + expect(result[0].id).toBe(5); + }); + + test("should be case insensitive", () => { + const result1 = filterLogs(mockLogs, "SSL"); + const result2 = filterLogs(mockLogs, "ssl"); + expect(result1).toHaveLength(1); + expect(result2).toHaveLength(1); + expect(result1[0].id).toBe(result2[0].id); + }); + + test("should match partial strings", () => { + const result = filterLogs(mockLogs, "error"); + // Should match: connection_error, ssl_error, http_error, and "SSL certificate error" + expect(result.length).toBeGreaterThan(0); + }); + + test("should return empty array when no matches", () => { + const result = filterLogs(mockLogs, "nonexistent"); + expect(result).toHaveLength(0); + }); +}); + +describe("ProbeTerminal - levelConfig", () => { + const levelConfig = { + success: { + label: "OK", + color: "text-emerald-500", + bgColor: "bg-emerald-500/5", + borderColor: "border-l-emerald-500", + }, + error: { + label: "FAIL", + color: "text-rose-500", + bgColor: "bg-rose-500/5", + borderColor: "border-l-rose-500", + }, + warn: { + label: "WARN", + color: "text-amber-500", + bgColor: "bg-amber-500/5", + borderColor: "border-l-amber-500", + }, + }; + + test("success level should have green colors", () => { + expect(levelConfig.success.color).toContain("emerald"); + expect(levelConfig.success.bgColor).toContain("emerald"); + expect(levelConfig.success.borderColor).toContain("emerald"); + }); + + test("error level should have red colors", () => { + expect(levelConfig.error.color).toContain("rose"); + expect(levelConfig.error.bgColor).toContain("rose"); + expect(levelConfig.error.borderColor).toContain("rose"); + }); + + test("warn level should have amber colors", () => { + expect(levelConfig.warn.color).toContain("amber"); + expect(levelConfig.warn.bgColor).toContain("amber"); + expect(levelConfig.warn.borderColor).toContain("amber"); + }); + + test("each level should have distinct labels", () => { + expect(levelConfig.success.label).toBe("OK"); + expect(levelConfig.error.label).toBe("FAIL"); + expect(levelConfig.warn.label).toBe("WARN"); + }); +}); + +describe("ProbeTerminal - maxLines slicing", () => { + function sliceLogs(logs: T[], maxLines: number): T[] { + return logs.slice(-maxLines); + } + + test("should return all logs when count <= maxLines", () => { + const logs = [1, 2, 3, 4, 5]; + expect(sliceLogs(logs, 10)).toEqual([1, 2, 3, 4, 5]); + expect(sliceLogs(logs, 5)).toEqual([1, 2, 3, 4, 5]); + }); + + test("should return last N logs when count > maxLines", () => { + const logs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + expect(sliceLogs(logs, 5)).toEqual([6, 7, 8, 9, 10]); + expect(sliceLogs(logs, 3)).toEqual([8, 9, 10]); + }); + + test("should handle empty array", () => { + expect(sliceLogs([], 10)).toEqual([]); + }); + + test("should handle maxLines of 1", () => { + const logs = [1, 2, 3]; + expect(sliceLogs(logs, 1)).toEqual([3]); + }); +}); diff --git a/tests/unit/dashboard/live-sessions-panel-dynamic-items.test.tsx b/tests/unit/dashboard/live-sessions-panel-dynamic-items.test.tsx new file mode 100644 index 000000000..f690469d6 --- /dev/null +++ b/tests/unit/dashboard/live-sessions-panel-dynamic-items.test.tsx @@ -0,0 +1,253 @@ +/** + * @vitest-environment happy-dom + */ + +import fs from "node:fs"; +import path from "node:path"; +import type { ReactNode } from "react"; +import { act } from "react"; +import { createRoot } from "react-dom/client"; +import { NextIntlClientProvider } from "next-intl"; +import { afterEach, beforeEach, describe, expect, test, vi } from "vitest"; +import { LiveSessionsPanel } from "@/app/[locale]/dashboard/_components/bento/live-sessions-panel"; +import type { ActiveSessionInfo } from "@/types/session"; + +vi.mock("next/navigation", () => ({ + useRouter: () => ({ + push: vi.fn(), + }), +})); + +const customsMessages = JSON.parse( + fs.readFileSync(path.join(process.cwd(), "messages/en/customs.json"), "utf8") +); + +const SESSION_ITEM_HEIGHT = 36; +const HEADER_HEIGHT = 48; +const FOOTER_HEIGHT = 36; + +function createMockSession(id: number): ActiveSessionInfo & { lastActivityAt?: number } { + return { + sessionId: `session_${id}`, + userName: `User ${id}`, + keyName: `key_${id}`, + model: "claude-sonnet-4-5-20250929", + providerName: "anthropic", + status: "in_progress", + startTime: Date.now() - 1000, + inputTokens: 100, + outputTokens: 50, + costUsd: 0.01, + lastActivityAt: Date.now(), + }; +} + +function renderWithIntl(node: ReactNode) { + const container = document.createElement("div"); + document.body.appendChild(container); + const root = createRoot(container); + + act(() => { + root.render( + + {node} + + ); + }); + + return { + container, + unmount: () => { + act(() => root.unmount()); + container.remove(); + }, + }; +} + +describe("LiveSessionsPanel - dynamic maxItems calculation", () => { + let resizeCallback: ResizeObserverCallback | null = null; + let observedElement: Element | null = null; + + beforeEach(() => { + resizeCallback = null; + observedElement = null; + + vi.stubGlobal( + "ResizeObserver", + class MockResizeObserver { + constructor(callback: ResizeObserverCallback) { + resizeCallback = callback; + } + observe(element: Element) { + observedElement = element; + } + unobserve() {} + disconnect() {} + } + ); + }); + + afterEach(() => { + vi.unstubAllGlobals(); + }); + + test("should calculate maxItems based on container height when maxItems prop is not provided", () => { + const sessions = Array.from({ length: 20 }, (_, i) => createMockSession(i + 1)); + + const { container, unmount } = renderWithIntl( + + ); + + const bentoCard = container.querySelector("[class*='flex-col']"); + expect(bentoCard).toBeTruthy(); + + // Simulate container height that can fit 5 items + // Available height = containerHeight - HEADER_HEIGHT - FOOTER_HEIGHT + // Items = floor(availableHeight / SESSION_ITEM_HEIGHT) + // For 5 items: availableHeight = 5 * 36 = 180, containerHeight = 180 + 48 + 36 = 264 + const containerHeight = 264; + + if (observedElement && resizeCallback) { + Object.defineProperty(observedElement, "clientHeight", { + value: containerHeight, + configurable: true, + }); + + act(() => { + resizeCallback!([{ target: observedElement } as ResizeObserverEntry], {} as ResizeObserver); + }); + } + + // Count rendered session items (buttons with session info) + const sessionButtons = container.querySelectorAll("button[class*='flex items-center gap-3']"); + expect(sessionButtons.length).toBe(5); + + unmount(); + }); + + test("should show all sessions when container is large enough", () => { + const sessions = Array.from({ length: 3 }, (_, i) => createMockSession(i + 1)); + + const { container, unmount } = renderWithIntl( + + ); + + // Container height for 10 items (more than we have) + const containerHeight = 10 * SESSION_ITEM_HEIGHT + HEADER_HEIGHT + FOOTER_HEIGHT; + + if (observedElement && resizeCallback) { + Object.defineProperty(observedElement, "clientHeight", { + value: containerHeight, + configurable: true, + }); + + act(() => { + resizeCallback!([{ target: observedElement } as ResizeObserverEntry], {} as ResizeObserver); + }); + } + + const sessionButtons = container.querySelectorAll("button[class*='flex items-center gap-3']"); + expect(sessionButtons.length).toBe(3); + + unmount(); + }); + + test("should update displayed items when container resizes", () => { + const sessions = Array.from({ length: 15 }, (_, i) => createMockSession(i + 1)); + + const { container, unmount } = renderWithIntl( + + ); + + // Initial: container fits 4 items + let containerHeight = 4 * SESSION_ITEM_HEIGHT + HEADER_HEIGHT + FOOTER_HEIGHT; + + if (observedElement && resizeCallback) { + Object.defineProperty(observedElement, "clientHeight", { + value: containerHeight, + configurable: true, + }); + + act(() => { + resizeCallback!([{ target: observedElement } as ResizeObserverEntry], {} as ResizeObserver); + }); + } + + let sessionButtons = container.querySelectorAll("button[class*='flex items-center gap-3']"); + expect(sessionButtons.length).toBe(4); + + // Resize: container now fits 8 items + containerHeight = 8 * SESSION_ITEM_HEIGHT + HEADER_HEIGHT + FOOTER_HEIGHT; + + if (observedElement && resizeCallback) { + Object.defineProperty(observedElement, "clientHeight", { + value: containerHeight, + configurable: true, + }); + + act(() => { + resizeCallback!([{ target: observedElement } as ResizeObserverEntry], {} as ResizeObserver); + }); + } + + sessionButtons = container.querySelectorAll("button[class*='flex items-center gap-3']"); + expect(sessionButtons.length).toBe(8); + + unmount(); + }); + + test("should respect maxItems prop as upper limit when provided", () => { + const sessions = Array.from({ length: 20 }, (_, i) => createMockSession(i + 1)); + + const { container, unmount } = renderWithIntl( + + ); + + // Container can fit 10 items, but maxItems is 5 + const containerHeight = 10 * SESSION_ITEM_HEIGHT + HEADER_HEIGHT + FOOTER_HEIGHT; + + if (observedElement && resizeCallback) { + Object.defineProperty(observedElement, "clientHeight", { + value: containerHeight, + configurable: true, + }); + + act(() => { + resizeCallback!([{ target: observedElement } as ResizeObserverEntry], {} as ResizeObserver); + }); + } + + const sessionButtons = container.querySelectorAll("button[class*='flex items-center gap-3']"); + expect(sessionButtons.length).toBe(5); + + unmount(); + }); + + test("should show View All button with correct count", () => { + const sessions = Array.from({ length: 12 }, (_, i) => createMockSession(i + 1)); + + const { container, unmount } = renderWithIntl( + + ); + + // Container fits 6 items + const containerHeight = 6 * SESSION_ITEM_HEIGHT + HEADER_HEIGHT + FOOTER_HEIGHT; + + if (observedElement && resizeCallback) { + Object.defineProperty(observedElement, "clientHeight", { + value: containerHeight, + configurable: true, + }); + + act(() => { + resizeCallback!([{ target: observedElement } as ResizeObserverEntry], {} as ResizeObserver); + }); + } + + // Footer should show total count + expect(container.textContent).toContain("View All"); + expect(container.textContent).toContain("(12)"); + + unmount(); + }); +}); diff --git a/tests/unit/lib/proxy-agent/agent-pool.test.ts b/tests/unit/lib/proxy-agent/agent-pool.test.ts new file mode 100644 index 000000000..335057ace --- /dev/null +++ b/tests/unit/lib/proxy-agent/agent-pool.test.ts @@ -0,0 +1,467 @@ +/** + * Agent Pool Tests + * + * TDD: Tests written first, implementation follows + */ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +// Mock undici before importing agent-pool +vi.mock("undici", () => ({ + Agent: vi.fn().mockImplementation((options) => ({ + options, + close: vi.fn().mockResolvedValue(undefined), + destroy: vi.fn().mockResolvedValue(undefined), + })), + ProxyAgent: vi.fn().mockImplementation((options) => ({ + options, + close: vi.fn().mockResolvedValue(undefined), + destroy: vi.fn().mockResolvedValue(undefined), + })), +})); + +vi.mock("fetch-socks", () => ({ + socksDispatcher: vi.fn().mockImplementation((proxy, options) => ({ + proxy, + options, + close: vi.fn().mockResolvedValue(undefined), + destroy: vi.fn().mockResolvedValue(undefined), + })), +})); + +import { + type AgentPool, + AgentPoolImpl, + generateAgentCacheKey, + getGlobalAgentPool, + resetGlobalAgentPool, + type AgentPoolConfig, +} from "@/lib/proxy-agent/agent-pool"; + +describe("generateAgentCacheKey", () => { + it("should generate correct cache key for direct connection", () => { + const key = generateAgentCacheKey({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + expect(key).toBe("https://api.anthropic.com|direct|h1"); + }); + + it("should generate correct cache key with proxy", () => { + const key = generateAgentCacheKey({ + endpointUrl: "https://api.openai.com/v1/chat/completions", + proxyUrl: "http://proxy.example.com:8080", + enableHttp2: false, + }); + expect(key).toBe("https://api.openai.com|http://proxy.example.com:8080|h1"); + }); + + it("should generate correct cache key with HTTP/2 enabled", () => { + const key = generateAgentCacheKey({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: true, + }); + expect(key).toBe("https://api.anthropic.com|direct|h2"); + }); + + it("should generate correct cache key with proxy and HTTP/2", () => { + const key = generateAgentCacheKey({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: "https://secure-proxy.example.com:443", + enableHttp2: true, + }); + expect(key).toBe("https://api.anthropic.com|https://secure-proxy.example.com:443|h2"); + }); + + it("should use origin only (strip path and query)", () => { + const key = generateAgentCacheKey({ + endpointUrl: "https://api.anthropic.com/v1/messages?key=value", + proxyUrl: null, + enableHttp2: false, + }); + expect(key).toBe("https://api.anthropic.com|direct|h1"); + }); + + it("should handle different ports", () => { + const key = generateAgentCacheKey({ + endpointUrl: "https://api.example.com:8443/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + expect(key).toBe("https://api.example.com:8443|direct|h1"); + }); + + it("should differentiate HTTP and HTTPS", () => { + const httpKey = generateAgentCacheKey({ + endpointUrl: "http://api.example.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + const httpsKey = generateAgentCacheKey({ + endpointUrl: "https://api.example.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + expect(httpKey).not.toBe(httpsKey); + expect(httpKey).toBe("http://api.example.com|direct|h1"); + expect(httpsKey).toBe("https://api.example.com|direct|h1"); + }); +}); + +describe("AgentPool", () => { + let pool: AgentPool; + const defaultConfig: AgentPoolConfig = { + maxTotalAgents: 10, + agentTtlMs: 300000, // 5 minutes + connectionIdleTimeoutMs: 60000, // 1 minute + cleanupIntervalMs: 30000, // 30 seconds + }; + + beforeEach(() => { + vi.useFakeTimers(); + pool = new AgentPoolImpl(defaultConfig); + }); + + afterEach(async () => { + await pool.shutdown(); + vi.useRealTimers(); + vi.clearAllMocks(); + }); + + describe("caching behavior", () => { + it("should reuse Agent for same endpoint", async () => { + const params = { + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }; + + const result1 = await pool.getAgent(params); + const result2 = await pool.getAgent(params); + + expect(result1.cacheKey).toBe(result2.cacheKey); + expect(result1.agent).toBe(result2.agent); + expect(result1.isNew).toBe(true); + expect(result2.isNew).toBe(false); + }); + + it("should create different Agent for different endpoints", async () => { + const result1 = await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + + const result2 = await pool.getAgent({ + endpointUrl: "https://api.openai.com/v1/chat/completions", + proxyUrl: null, + enableHttp2: false, + }); + + expect(result1.cacheKey).not.toBe(result2.cacheKey); + expect(result1.agent).not.toBe(result2.agent); + expect(result1.isNew).toBe(true); + expect(result2.isNew).toBe(true); + }); + + it("should create different Agent for different proxy configs", async () => { + const result1 = await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + + const result2 = await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: "http://proxy.example.com:8080", + enableHttp2: false, + }); + + expect(result1.cacheKey).not.toBe(result2.cacheKey); + expect(result1.agent).not.toBe(result2.agent); + }); + + it("should create different Agent for HTTP/2 vs HTTP/1.1", async () => { + const result1 = await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + + const result2 = await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: true, + }); + + expect(result1.cacheKey).not.toBe(result2.cacheKey); + expect(result1.agent).not.toBe(result2.agent); + }); + + it("should track request count", async () => { + const params = { + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }; + + await pool.getAgent(params); + await pool.getAgent(params); + await pool.getAgent(params); + + const stats = pool.getPoolStats(); + expect(stats.totalRequests).toBe(3); + expect(stats.cacheHits).toBe(2); + expect(stats.cacheMisses).toBe(1); + }); + }); + + describe("health management", () => { + it("should create new Agent after marking unhealthy", async () => { + const params = { + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }; + + const result1 = await pool.getAgent(params); + pool.markUnhealthy(result1.cacheKey, "SSL certificate error"); + + const result2 = await pool.getAgent(params); + + expect(result2.isNew).toBe(true); + expect(result2.agent).not.toBe(result1.agent); + }); + + it("should track unhealthy agents in stats", async () => { + const params = { + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }; + + const result = await pool.getAgent(params); + pool.markUnhealthy(result.cacheKey, "SSL certificate error"); + + const stats = pool.getPoolStats(); + expect(stats.unhealthyAgents).toBe(1); + }); + + it("should evict all Agents for endpoint on evictEndpoint", async () => { + // Create agents for same endpoint with different configs + await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: true, + }); + await pool.getAgent({ + endpointUrl: "https://api.openai.com/v1/chat/completions", + proxyUrl: null, + enableHttp2: false, + }); + + const statsBefore = pool.getPoolStats(); + expect(statsBefore.cacheSize).toBe(3); + + await pool.evictEndpoint("https://api.anthropic.com"); + + const statsAfter = pool.getPoolStats(); + expect(statsAfter.cacheSize).toBe(1); + expect(statsAfter.evictedAgents).toBe(2); + }); + }); + + describe("expiration cleanup", () => { + it("should cleanup expired Agents", async () => { + const shortTtlPool = new AgentPoolImpl({ + ...defaultConfig, + agentTtlMs: 1000, // 1 second TTL + }); + + await shortTtlPool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + + expect(shortTtlPool.getPoolStats().cacheSize).toBe(1); + + // Advance time past TTL + vi.advanceTimersByTime(2000); + + const cleaned = await shortTtlPool.cleanup(); + expect(cleaned).toBe(1); + expect(shortTtlPool.getPoolStats().cacheSize).toBe(0); + + await shortTtlPool.shutdown(); + }); + + it("should not cleanup recently used Agents", async () => { + const shortTtlPool = new AgentPoolImpl({ + ...defaultConfig, + agentTtlMs: 1000, + }); + + const params = { + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }; + + await shortTtlPool.getAgent(params); + + // Advance time but not past TTL + vi.advanceTimersByTime(500); + + // Use the agent again (updates lastUsedAt) + await shortTtlPool.getAgent(params); + + // Advance time again + vi.advanceTimersByTime(500); + + const cleaned = await shortTtlPool.cleanup(); + expect(cleaned).toBe(0); + expect(shortTtlPool.getPoolStats().cacheSize).toBe(1); + + await shortTtlPool.shutdown(); + }); + + it("should implement LRU eviction when max size reached", async () => { + const smallPool = new AgentPoolImpl({ + ...defaultConfig, + maxTotalAgents: 2, + }); + + // Create 3 agents (exceeds max of 2) + await smallPool.getAgent({ + endpointUrl: "https://api1.example.com/v1", + proxyUrl: null, + enableHttp2: false, + }); + + vi.advanceTimersByTime(100); + + await smallPool.getAgent({ + endpointUrl: "https://api2.example.com/v1", + proxyUrl: null, + enableHttp2: false, + }); + + vi.advanceTimersByTime(100); + + await smallPool.getAgent({ + endpointUrl: "https://api3.example.com/v1", + proxyUrl: null, + enableHttp2: false, + }); + + // Should have evicted the oldest (LRU) + const stats = smallPool.getPoolStats(); + expect(stats.cacheSize).toBeLessThanOrEqual(2); + + await smallPool.shutdown(); + }); + }); + + describe("proxy support", () => { + it("should create ProxyAgent for HTTP proxy", async () => { + const result = await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: "http://proxy.example.com:8080", + enableHttp2: false, + }); + + expect(result.isNew).toBe(true); + expect(result.cacheKey).toContain("http://proxy.example.com:8080"); + }); + + it("should create SOCKS dispatcher for SOCKS proxy", async () => { + const result = await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: "socks5://proxy.example.com:1080", + enableHttp2: false, + }); + + expect(result.isNew).toBe(true); + expect(result.cacheKey).toContain("socks5://proxy.example.com:1080"); + }); + }); + + describe("pool stats", () => { + it("should return accurate pool statistics", async () => { + await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + + await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + + await pool.getAgent({ + endpointUrl: "https://api.openai.com/v1/chat/completions", + proxyUrl: null, + enableHttp2: false, + }); + + const stats = pool.getPoolStats(); + + expect(stats.cacheSize).toBe(2); + expect(stats.totalRequests).toBe(3); + expect(stats.cacheHits).toBe(1); + expect(stats.cacheMisses).toBe(2); + expect(stats.hitRate).toBeCloseTo(1 / 3, 2); + }); + }); + + describe("shutdown", () => { + it("should close all agents on shutdown", async () => { + await pool.getAgent({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: null, + enableHttp2: false, + }); + + await pool.getAgent({ + endpointUrl: "https://api.openai.com/v1/chat/completions", + proxyUrl: null, + enableHttp2: false, + }); + + await pool.shutdown(); + + const stats = pool.getPoolStats(); + expect(stats.cacheSize).toBe(0); + }); + }); +}); + +describe("getGlobalAgentPool", () => { + afterEach(async () => { + await resetGlobalAgentPool(); + }); + + it("should return singleton instance", () => { + const pool1 = getGlobalAgentPool(); + const pool2 = getGlobalAgentPool(); + + expect(pool1).toBe(pool2); + }); + + it("should create new instance after reset", async () => { + const pool1 = getGlobalAgentPool(); + await resetGlobalAgentPool(); + const pool2 = getGlobalAgentPool(); + + expect(pool1).not.toBe(pool2); + }); +}); diff --git a/tests/unit/lib/proxy-agent/get-proxy-agent.test.ts b/tests/unit/lib/proxy-agent/get-proxy-agent.test.ts new file mode 100644 index 000000000..3015cab2a --- /dev/null +++ b/tests/unit/lib/proxy-agent/get-proxy-agent.test.ts @@ -0,0 +1,346 @@ +/** + * getProxyAgentForProvider Tests + * + * TDD: Tests written first, implementation follows + */ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { Provider } from "@/types/provider"; + +// Create mock objects outside the mock factory +const mockAgent = { + close: vi.fn().mockResolvedValue(undefined), + destroy: vi.fn().mockResolvedValue(undefined), +}; + +const mockPool = { + getAgent: vi.fn().mockResolvedValue({ + agent: mockAgent, + isNew: true, + cacheKey: "https://api.anthropic.com|direct|h1", + }), + markUnhealthy: vi.fn(), + evictEndpoint: vi.fn().mockResolvedValue(undefined), + getPoolStats: vi.fn().mockReturnValue({ + cacheSize: 1, + totalRequests: 1, + cacheHits: 0, + cacheMisses: 1, + hitRate: 0, + unhealthyAgents: 0, + evictedAgents: 0, + }), + cleanup: vi.fn().mockResolvedValue(0), + shutdown: vi.fn().mockResolvedValue(undefined), +}; + +// Mock the agent pool module +vi.mock("@/lib/proxy-agent/agent-pool", () => ({ + getGlobalAgentPool: vi.fn(() => mockPool), + resetGlobalAgentPool: vi.fn().mockResolvedValue(undefined), + generateAgentCacheKey: vi.fn().mockImplementation((params) => { + const url = new URL(params.endpointUrl); + const proxy = params.proxyUrl || "direct"; + const protocol = params.enableHttp2 ? "h2" : "h1"; + return `${url.origin}|${proxy}|${protocol}`; + }), + AgentPoolImpl: vi.fn(), +})); + +// Import after mock setup +import { getProxyAgentForProvider, type ProxyConfigWithCacheKey } from "@/lib/proxy-agent"; + +describe("getProxyAgentForProvider", () => { + beforeEach(() => { + vi.clearAllMocks(); + // Reset default mock return value + mockPool.getAgent.mockResolvedValue({ + agent: mockAgent, + isNew: true, + cacheKey: "https://api.anthropic.com|direct|h1", + }); + }); + + afterEach(() => { + vi.clearAllMocks(); + }); + + describe("direct connection (no proxy)", () => { + it("should return null when provider has no proxy configured", async () => { + const provider: Partial = { + id: 1, + name: "Test Provider", + proxyUrl: null, + proxyFallbackToDirect: false, + }; + + const result = await getProxyAgentForProvider( + provider as Provider, + "https://api.anthropic.com/v1/messages", + false + ); + + expect(result).toBeNull(); + expect(mockPool.getAgent).not.toHaveBeenCalled(); + }); + + it("should return null when proxyUrl is empty string", async () => { + const provider: Partial = { + id: 1, + name: "Test Provider", + proxyUrl: "", + proxyFallbackToDirect: false, + }; + + const result = await getProxyAgentForProvider( + provider as Provider, + "https://api.anthropic.com/v1/messages", + false + ); + + expect(result).toBeNull(); + }); + + it("should return null when proxyUrl is whitespace only", async () => { + const provider: Partial = { + id: 1, + name: "Test Provider", + proxyUrl: " ", + proxyFallbackToDirect: false, + }; + + const result = await getProxyAgentForProvider( + provider as Provider, + "https://api.anthropic.com/v1/messages", + false + ); + + expect(result).toBeNull(); + }); + }); + + describe("with proxy configured", () => { + it("should return ProxyConfig with cacheKey for HTTP proxy", async () => { + const provider: Partial = { + id: 1, + name: "Test Provider", + proxyUrl: "http://proxy.example.com:8080", + proxyFallbackToDirect: false, + }; + + mockPool.getAgent.mockResolvedValueOnce({ + agent: mockAgent, + isNew: true, + cacheKey: "https://api.anthropic.com|http://proxy.example.com:8080|h1", + }); + + const result = await getProxyAgentForProvider( + provider as Provider, + "https://api.anthropic.com/v1/messages", + false + ); + + expect(result).not.toBeNull(); + expect(result?.cacheKey).toBe("https://api.anthropic.com|http://proxy.example.com:8080|h1"); + expect(result?.fallbackToDirect).toBe(false); + expect(result?.http2Enabled).toBe(false); + expect(mockPool.getAgent).toHaveBeenCalledWith({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: "http://proxy.example.com:8080", + enableHttp2: false, + }); + }); + + it("should return ProxyConfig with HTTP/2 enabled", async () => { + const provider: Partial = { + id: 1, + name: "Test Provider", + proxyUrl: "http://proxy.example.com:8080", + proxyFallbackToDirect: true, + }; + + mockPool.getAgent.mockResolvedValueOnce({ + agent: mockAgent, + isNew: true, + cacheKey: "https://api.anthropic.com|http://proxy.example.com:8080|h2", + }); + + const result = await getProxyAgentForProvider( + provider as Provider, + "https://api.anthropic.com/v1/messages", + true + ); + + expect(result).not.toBeNull(); + expect(result?.http2Enabled).toBe(true); + expect(result?.fallbackToDirect).toBe(true); + expect(mockPool.getAgent).toHaveBeenCalledWith({ + endpointUrl: "https://api.anthropic.com/v1/messages", + proxyUrl: "http://proxy.example.com:8080", + enableHttp2: true, + }); + }); + + it("should handle SOCKS proxy", async () => { + const provider: Partial = { + id: 1, + name: "Test Provider", + proxyUrl: "socks5://proxy.example.com:1080", + proxyFallbackToDirect: false, + }; + + mockPool.getAgent.mockResolvedValueOnce({ + agent: mockAgent, + isNew: true, + cacheKey: "https://api.anthropic.com|socks5://proxy.example.com:1080|h1", + }); + + const result = await getProxyAgentForProvider( + provider as Provider, + "https://api.anthropic.com/v1/messages", + false + ); + + expect(result).not.toBeNull(); + expect(result?.cacheKey).toContain("socks5://"); + }); + + it("should disable HTTP/2 for SOCKS proxy even when requested", async () => { + const provider: Partial = { + id: 1, + name: "Test Provider", + proxyUrl: "socks5://proxy.example.com:1080", + proxyFallbackToDirect: false, + }; + + mockPool.getAgent.mockResolvedValueOnce({ + agent: mockAgent, + isNew: true, + cacheKey: "https://api.anthropic.com|socks5://proxy.example.com:1080|h1", + }); + + const result = await getProxyAgentForProvider( + provider as Provider, + "https://api.anthropic.com/v1/messages", + true // Request HTTP/2 + ); + + expect(result).not.toBeNull(); + expect(result?.http2Enabled).toBe(false); // Should be false for SOCKS + }); + + it("should mask proxy URL in result", async () => { + const provider: Partial = { + id: 1, + name: "Test Provider", + proxyUrl: "http://user:password@proxy.example.com:8080", + proxyFallbackToDirect: false, + }; + + mockPool.getAgent.mockResolvedValueOnce({ + agent: mockAgent, + isNew: true, + cacheKey: "https://api.anthropic.com|http://user:password@proxy.example.com:8080|h1", + }); + + const result = await getProxyAgentForProvider( + provider as Provider, + "https://api.anthropic.com/v1/messages", + false + ); + + expect(result).not.toBeNull(); + // proxyUrl should be masked (password hidden) + expect(result?.proxyUrl).not.toContain("password"); + expect(result?.proxyUrl).toContain("***"); + }); + }); + + describe("ProviderProxyConfig interface", () => { + it("should work with minimal ProviderProxyConfig", async () => { + const config = { + id: 1, + proxyUrl: "http://proxy.example.com:8080", + proxyFallbackToDirect: false, + }; + + mockPool.getAgent.mockResolvedValueOnce({ + agent: mockAgent, + isNew: true, + cacheKey: "https://api.anthropic.com|http://proxy.example.com:8080|h1", + }); + + const result = await getProxyAgentForProvider( + config, + "https://api.anthropic.com/v1/messages", + false + ); + + expect(result).not.toBeNull(); + expect(result?.cacheKey).toBeDefined(); + }); + + it("should work with optional name field", async () => { + const config = { + id: 1, + name: "My Proxy", + proxyUrl: "http://proxy.example.com:8080", + proxyFallbackToDirect: true, + }; + + mockPool.getAgent.mockResolvedValueOnce({ + agent: mockAgent, + isNew: true, + cacheKey: "https://api.anthropic.com|http://proxy.example.com:8080|h1", + }); + + const result = await getProxyAgentForProvider( + config, + "https://api.anthropic.com/v1/messages", + false + ); + + expect(result).not.toBeNull(); + expect(result?.fallbackToDirect).toBe(true); + }); + }); + + describe("error handling", () => { + it("should throw on invalid proxy URL", async () => { + const provider: Partial = { + id: 1, + name: "Test Provider", + proxyUrl: "not-a-valid-url", + proxyFallbackToDirect: false, + }; + + mockPool.getAgent.mockRejectedValueOnce(new Error("Invalid URL")); + + await expect( + getProxyAgentForProvider( + provider as Provider, + "https://api.anthropic.com/v1/messages", + false + ) + ).rejects.toThrow(); + }); + + it("should throw on unsupported proxy protocol", async () => { + const provider: Partial = { + id: 1, + name: "Test Provider", + proxyUrl: "ftp://proxy.example.com:21", + proxyFallbackToDirect: false, + }; + + mockPool.getAgent.mockRejectedValueOnce(new Error("Unsupported proxy protocol")); + + await expect( + getProxyAgentForProvider( + provider as Provider, + "https://api.anthropic.com/v1/messages", + false + ) + ).rejects.toThrow(); + }); + }); +}); diff --git a/tests/unit/lib/session-status.test.ts b/tests/unit/lib/session-status.test.ts new file mode 100644 index 000000000..1c5fa7e68 --- /dev/null +++ b/tests/unit/lib/session-status.test.ts @@ -0,0 +1,255 @@ +import { describe, expect, test, vi } from "vitest"; + +vi.mock("@/lib/logger", () => ({ + logger: { + trace: vi.fn(), + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }, +})); + +import { + getSessionDisplayStatus, + SESSION_DISPLAY_STATUS, + type SessionStatusInput, +} from "@/lib/session-status"; + +describe("Session Status Logic", () => { + describe("getSessionDisplayStatus", () => { + test("IDLE: concurrentCount is 0 with no requests", () => { + const input: SessionStatusInput = { + concurrentCount: 0, + requestCount: 0, + status: "completed", + }; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.IDLE); + expect(result.label).toBe("IDLE"); + expect(result.pulse).toBe(false); + expect(result.tooltipKey).toBe("status.idleTooltip"); + }); + + test("IDLE: concurrentCount is 0 with completed requests", () => { + const input: SessionStatusInput = { + concurrentCount: 0, + requestCount: 5, + status: "completed", + }; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.IDLE); + expect(result.label).toBe("IDLE"); + expect(result.pulse).toBe(false); + }); + + test("INITIALIZING: first request still running (requestCount=0, concurrentCount>0)", () => { + const input: SessionStatusInput = { + concurrentCount: 1, + requestCount: 0, + status: "in_progress", + }; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.INITIALIZING); + expect(result.label).toBe("INITIALIZING"); + expect(result.pulse).toBe(true); + expect(result.tooltipKey).toBe("status.initializingTooltip"); + expect(result.color).toContain("amber"); + }); + + test("INITIALIZING: first request still running (requestCount=1, concurrentCount>0)", () => { + const input: SessionStatusInput = { + concurrentCount: 1, + requestCount: 1, + status: "in_progress", + }; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.INITIALIZING); + expect(result.label).toBe("INITIALIZING"); + expect(result.pulse).toBe(true); + }); + + test("IN_PROGRESS: has active requests after first (requestCount>1, concurrentCount>0)", () => { + const input: SessionStatusInput = { + concurrentCount: 2, + requestCount: 5, + status: "in_progress", + }; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.IN_PROGRESS); + expect(result.label).toBe("IN_PROGRESS"); + expect(result.pulse).toBe(true); + expect(result.tooltipKey).toBe("status.inProgressTooltip"); + expect(result.color).toContain("emerald"); + }); + + test("IN_PROGRESS: single active request after first completed", () => { + const input: SessionStatusInput = { + concurrentCount: 1, + requestCount: 2, + status: "in_progress", + }; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.IN_PROGRESS); + expect(result.label).toBe("IN_PROGRESS"); + expect(result.pulse).toBe(true); + }); + + test("ERROR: status is error takes priority", () => { + const input: SessionStatusInput = { + concurrentCount: 1, + requestCount: 3, + status: "error", + }; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.IN_PROGRESS); + expect(result.label).toBe("ERROR"); + expect(result.pulse).toBe(true); + expect(result.tooltipKey).toBe("status.errorTooltip"); + expect(result.color).toContain("rose"); + }); + + test("ERROR: status is error even with no concurrent requests", () => { + const input: SessionStatusInput = { + concurrentCount: 0, + requestCount: 5, + status: "error", + }; + + const result = getSessionDisplayStatus(input); + + expect(result.label).toBe("ERROR"); + expect(result.pulse).toBe(true); + }); + + test("handles undefined values with defaults", () => { + const input: SessionStatusInput = {}; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.IDLE); + expect(result.label).toBe("IDLE"); + expect(result.pulse).toBe(false); + }); + + test("handles partial input with only concurrentCount", () => { + const input: SessionStatusInput = { + concurrentCount: 1, + }; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.INITIALIZING); + expect(result.label).toBe("INITIALIZING"); + }); + + test("handles partial input with only requestCount", () => { + const input: SessionStatusInput = { + requestCount: 10, + }; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.IDLE); + expect(result.label).toBe("IDLE"); + }); + + test("high concurrency scenario", () => { + const input: SessionStatusInput = { + concurrentCount: 50, + requestCount: 100, + status: "in_progress", + }; + + const result = getSessionDisplayStatus(input); + + expect(result.status).toBe(SESSION_DISPLAY_STATUS.IN_PROGRESS); + expect(result.label).toBe("IN_PROGRESS"); + expect(result.pulse).toBe(true); + }); + }); + + describe("SESSION_DISPLAY_STATUS constants", () => { + test("constants are uppercase strings", () => { + expect(SESSION_DISPLAY_STATUS.IN_PROGRESS).toBe("IN_PROGRESS"); + expect(SESSION_DISPLAY_STATUS.IDLE).toBe("IDLE"); + expect(SESSION_DISPLAY_STATUS.INITIALIZING).toBe("INITIALIZING"); + }); + + test("constants are readonly", () => { + expect(Object.isFrozen(SESSION_DISPLAY_STATUS)).toBe(false); + expect(typeof SESSION_DISPLAY_STATUS).toBe("object"); + }); + }); + + describe("status transition scenarios", () => { + test("session lifecycle: new -> initializing -> in_progress -> idle", () => { + // New session, no requests yet + const newSession: SessionStatusInput = { + concurrentCount: 0, + requestCount: 0, + }; + expect(getSessionDisplayStatus(newSession).status).toBe(SESSION_DISPLAY_STATUS.IDLE); + + // First request starts + const initializing: SessionStatusInput = { + concurrentCount: 1, + requestCount: 0, + }; + expect(getSessionDisplayStatus(initializing).status).toBe( + SESSION_DISPLAY_STATUS.INITIALIZING + ); + + // First request completes, second starts + const inProgress: SessionStatusInput = { + concurrentCount: 1, + requestCount: 2, + }; + expect(getSessionDisplayStatus(inProgress).status).toBe(SESSION_DISPLAY_STATUS.IN_PROGRESS); + + // All requests complete + const idle: SessionStatusInput = { + concurrentCount: 0, + requestCount: 10, + }; + expect(getSessionDisplayStatus(idle).status).toBe(SESSION_DISPLAY_STATUS.IDLE); + }); + + test("error can occur at any stage", () => { + const errorDuringInit: SessionStatusInput = { + concurrentCount: 1, + requestCount: 0, + status: "error", + }; + expect(getSessionDisplayStatus(errorDuringInit).label).toBe("ERROR"); + + const errorDuringProgress: SessionStatusInput = { + concurrentCount: 3, + requestCount: 10, + status: "error", + }; + expect(getSessionDisplayStatus(errorDuringProgress).label).toBe("ERROR"); + + const errorAfterComplete: SessionStatusInput = { + concurrentCount: 0, + requestCount: 5, + status: "error", + }; + expect(getSessionDisplayStatus(errorAfterComplete).label).toBe("ERROR"); + }); + }); +}); diff --git a/tests/unit/proxy/proxy-forwarder-retry-limit.test.ts b/tests/unit/proxy/proxy-forwarder-retry-limit.test.ts new file mode 100644 index 000000000..b3276cc55 --- /dev/null +++ b/tests/unit/proxy/proxy-forwarder-retry-limit.test.ts @@ -0,0 +1,626 @@ +import { beforeEach, describe, expect, test, vi } from "vitest"; + +const mocks = vi.hoisted(() => { + return { + getPreferredProviderEndpoints: vi.fn(), + recordEndpointSuccess: vi.fn(async () => {}), + recordEndpointFailure: vi.fn(async () => {}), + recordSuccess: vi.fn(), + recordFailure: vi.fn(async () => {}), + getCircuitState: vi.fn(() => "closed"), + getProviderHealthInfo: vi.fn(async () => ({ + health: { failureCount: 0 }, + config: { failureThreshold: 3 }, + })), + isVendorTypeCircuitOpen: vi.fn(async () => false), + recordVendorTypeAllEndpointsTimeout: vi.fn(async () => {}), + findAllProviders: vi.fn(async () => []), + getCachedProviders: vi.fn(async () => []), + }; +}); + +vi.mock("@/lib/logger", () => ({ + logger: { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + trace: vi.fn(), + error: vi.fn(), + fatal: vi.fn(), + }, +})); + +vi.mock("@/lib/provider-endpoints/endpoint-selector", () => ({ + getPreferredProviderEndpoints: mocks.getPreferredProviderEndpoints, +})); + +vi.mock("@/lib/endpoint-circuit-breaker", () => ({ + recordEndpointSuccess: mocks.recordEndpointSuccess, + recordEndpointFailure: mocks.recordEndpointFailure, +})); + +vi.mock("@/lib/circuit-breaker", () => ({ + getCircuitState: mocks.getCircuitState, + getProviderHealthInfo: mocks.getProviderHealthInfo, + recordSuccess: mocks.recordSuccess, + recordFailure: mocks.recordFailure, +})); + +vi.mock("@/lib/vendor-type-circuit-breaker", () => ({ + isVendorTypeCircuitOpen: mocks.isVendorTypeCircuitOpen, + recordVendorTypeAllEndpointsTimeout: mocks.recordVendorTypeAllEndpointsTimeout, +})); + +vi.mock("@/repository/provider", () => ({ + findAllProviders: mocks.findAllProviders, +})); + +vi.mock("@/lib/cache/provider-cache", () => ({ + getCachedProviders: mocks.getCachedProviders, +})); + +vi.mock("@/app/v1/_lib/proxy/errors", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + categorizeErrorAsync: vi.fn(async () => actual.ErrorCategory.PROVIDER_ERROR), + }; +}); + +import { ProxyForwarder } from "@/app/v1/_lib/proxy/forwarder"; +import { ProxyError } from "@/app/v1/_lib/proxy/errors"; +import { ProxySession } from "@/app/v1/_lib/proxy/session"; +import type { Provider, ProviderEndpoint, ProviderType } from "@/types/provider"; + +function makeEndpoint(input: { + id: number; + vendorId: number; + providerType: ProviderType; + url: string; + lastProbeLatencyMs?: number | null; +}): ProviderEndpoint { + const now = new Date("2026-01-01T00:00:00.000Z"); + return { + id: input.id, + vendorId: input.vendorId, + providerType: input.providerType, + url: input.url, + label: null, + sortOrder: 0, + isEnabled: true, + lastProbedAt: null, + lastProbeOk: true, + lastProbeStatusCode: 200, + lastProbeLatencyMs: input.lastProbeLatencyMs ?? null, + lastProbeErrorType: null, + lastProbeErrorMessage: null, + createdAt: now, + updatedAt: now, + deletedAt: null, + }; +} + +function createProvider(overrides: Partial = {}): Provider { + return { + id: 1, + name: "test-provider", + url: "https://provider.example.com", + key: "test-key", + providerVendorId: 123, + isEnabled: true, + weight: 1, + priority: 0, + costMultiplier: 1, + groupTag: null, + providerType: "claude", + preserveClientIp: false, + modelRedirects: null, + allowedModels: null, + joinClaudePool: false, + codexInstructionsStrategy: "auto", + mcpPassthroughType: "none", + mcpPassthroughUrl: null, + limit5hUsd: null, + limitDailyUsd: null, + dailyResetMode: "fixed", + dailyResetTime: "00:00", + limitWeeklyUsd: null, + limitMonthlyUsd: null, + limitTotalUsd: null, + totalCostResetAt: null, + limitConcurrentSessions: 0, + maxRetryAttempts: null, + circuitBreakerFailureThreshold: 5, + circuitBreakerOpenDuration: 1_800_000, + circuitBreakerHalfOpenSuccessThreshold: 2, + proxyUrl: null, + proxyFallbackToDirect: false, + firstByteTimeoutStreamingMs: 30_000, + streamingIdleTimeoutMs: 10_000, + requestTimeoutNonStreamingMs: 600_000, + websiteUrl: null, + faviconUrl: null, + cacheTtlPreference: null, + context1mPreference: null, + codexReasoningEffortPreference: null, + codexReasoningSummaryPreference: null, + codexTextVerbosityPreference: null, + codexParallelToolCallsPreference: null, + tpm: 0, + rpm: 0, + rpd: 0, + cc: 0, + createdAt: new Date(), + updatedAt: new Date(), + deletedAt: null, + ...overrides, + }; +} + +function createSession(requestUrl: URL = new URL("https://example.com/v1/messages")): ProxySession { + const headers = new Headers(); + const session = Object.create(ProxySession.prototype); + + Object.assign(session, { + startTime: Date.now(), + method: "POST", + requestUrl, + headers, + originalHeaders: new Headers(headers), + headerLog: JSON.stringify(Object.fromEntries(headers.entries())), + request: { + model: "claude-3-opus", + log: "(test)", + message: { + model: "claude-3-opus", + messages: [{ role: "user", content: "hello" }], + }, + }, + userAgent: null, + context: null, + clientAbortSignal: null, + userName: "test-user", + authState: { success: true, user: null, key: null, apiKey: null }, + provider: null, + messageContext: null, + sessionId: null, + requestSequence: 1, + originalFormat: "claude", + providerType: null, + originalModelName: null, + originalUrlPathname: null, + providerChain: [], + cacheTtlResolved: null, + context1mApplied: false, + specialSettings: [], + cachedPriceData: undefined, + cachedBillingModelSource: undefined, + isHeaderModified: () => false, + }); + + return session as ProxySession; +} + +describe("ProxyForwarder - retry limit enforcement", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + test("endpoints > maxRetry: should only use top N lowest-latency endpoints", async () => { + vi.useFakeTimers(); + + try { + const session = createSession(); + // Configure provider with maxRetryAttempts=2 but 4 endpoints available + const provider = createProvider({ + providerType: "claude", + providerVendorId: 123, + maxRetryAttempts: 2, + }); + session.setProvider(provider); + + // Return 4 endpoints sorted by latency (lowest first) + mocks.getPreferredProviderEndpoints.mockResolvedValue([ + makeEndpoint({ + id: 1, + vendorId: 123, + providerType: "claude", + url: "https://ep1.example.com", + lastProbeLatencyMs: 100, + }), + makeEndpoint({ + id: 2, + vendorId: 123, + providerType: "claude", + url: "https://ep2.example.com", + lastProbeLatencyMs: 200, + }), + makeEndpoint({ + id: 3, + vendorId: 123, + providerType: "claude", + url: "https://ep3.example.com", + lastProbeLatencyMs: 300, + }), + makeEndpoint({ + id: 4, + vendorId: 123, + providerType: "claude", + url: "https://ep4.example.com", + lastProbeLatencyMs: 400, + }), + ]); + + const doForward = vi.spyOn( + ProxyForwarder as unknown as { doForward: (...args: unknown[]) => unknown }, + "doForward" + ); + + // First attempt fails, second succeeds + doForward.mockImplementationOnce(async () => { + throw new ProxyError("endpoint 1 failed", 500); + }); + doForward.mockResolvedValueOnce( + new Response("{}", { + status: 200, + headers: { "content-type": "application/json", "content-length": "2" }, + }) + ); + + const sendPromise = ProxyForwarder.send(session); + await vi.advanceTimersByTimeAsync(100); + const response = await sendPromise; + + expect(response.status).toBe(200); + // Should only call doForward twice (maxRetryAttempts=2) + expect(doForward).toHaveBeenCalledTimes(2); + + const chain = session.getProviderChain(); + expect(chain).toHaveLength(2); + + // First attempt should use endpoint 1 (lowest latency) + expect(chain[0].endpointId).toBe(1); + expect(chain[0].attemptNumber).toBe(1); + + // Second attempt should use endpoint 2 (second lowest latency) + expect(chain[1].endpointId).toBe(2); + expect(chain[1].attemptNumber).toBe(2); + + // Endpoints 3 and 4 should NOT be used + } finally { + vi.useRealTimers(); + } + }); + + test("endpoints < maxRetry: should cycle through all endpoints up to maxRetry times", async () => { + vi.useFakeTimers(); + + try { + const session = createSession(); + // Configure provider with maxRetryAttempts=5 but only 2 endpoints + const provider = createProvider({ + providerType: "claude", + providerVendorId: 123, + maxRetryAttempts: 5, + }); + session.setProvider(provider); + + mocks.getPreferredProviderEndpoints.mockResolvedValue([ + makeEndpoint({ + id: 1, + vendorId: 123, + providerType: "claude", + url: "https://ep1.example.com", + lastProbeLatencyMs: 100, + }), + makeEndpoint({ + id: 2, + vendorId: 123, + providerType: "claude", + url: "https://ep2.example.com", + lastProbeLatencyMs: 200, + }), + ]); + + const doForward = vi.spyOn( + ProxyForwarder as unknown as { doForward: (...args: unknown[]) => unknown }, + "doForward" + ); + + // All attempts fail except the last one + doForward.mockImplementation(async () => { + throw new ProxyError("failed", 500); + }); + // 5th attempt succeeds + doForward.mockImplementationOnce(async () => { + throw new ProxyError("failed", 500); + }); + doForward.mockImplementationOnce(async () => { + throw new ProxyError("failed", 500); + }); + doForward.mockImplementationOnce(async () => { + throw new ProxyError("failed", 500); + }); + doForward.mockImplementationOnce(async () => { + throw new ProxyError("failed", 500); + }); + doForward.mockResolvedValueOnce( + new Response("{}", { + status: 200, + headers: { "content-type": "application/json", "content-length": "2" }, + }) + ); + + const sendPromise = ProxyForwarder.send(session); + await vi.advanceTimersByTimeAsync(500); + const response = await sendPromise; + + expect(response.status).toBe(200); + // Should call doForward 5 times (maxRetryAttempts=5) + expect(doForward).toHaveBeenCalledTimes(5); + + const chain = session.getProviderChain(); + expect(chain).toHaveLength(5); + + // Verify cycling pattern: 1, 2, 1, 2, 1 + expect(chain[0].endpointId).toBe(1); + expect(chain[1].endpointId).toBe(2); + expect(chain[2].endpointId).toBe(1); + expect(chain[3].endpointId).toBe(2); + expect(chain[4].endpointId).toBe(1); + } finally { + vi.useRealTimers(); + } + }); + + test("endpoints = maxRetry: each endpoint should be tried exactly once", async () => { + vi.useFakeTimers(); + + try { + const session = createSession(); + // Configure provider with maxRetryAttempts=3 and 3 endpoints + const provider = createProvider({ + providerType: "claude", + providerVendorId: 123, + maxRetryAttempts: 3, + }); + session.setProvider(provider); + + mocks.getPreferredProviderEndpoints.mockResolvedValue([ + makeEndpoint({ + id: 1, + vendorId: 123, + providerType: "claude", + url: "https://ep1.example.com", + lastProbeLatencyMs: 100, + }), + makeEndpoint({ + id: 2, + vendorId: 123, + providerType: "claude", + url: "https://ep2.example.com", + lastProbeLatencyMs: 200, + }), + makeEndpoint({ + id: 3, + vendorId: 123, + providerType: "claude", + url: "https://ep3.example.com", + lastProbeLatencyMs: 300, + }), + ]); + + const doForward = vi.spyOn( + ProxyForwarder as unknown as { doForward: (...args: unknown[]) => unknown }, + "doForward" + ); + + // First two fail, third succeeds + doForward.mockImplementationOnce(async () => { + throw new ProxyError("failed", 500); + }); + doForward.mockImplementationOnce(async () => { + throw new ProxyError("failed", 500); + }); + doForward.mockResolvedValueOnce( + new Response("{}", { + status: 200, + headers: { "content-type": "application/json", "content-length": "2" }, + }) + ); + + const sendPromise = ProxyForwarder.send(session); + await vi.advanceTimersByTimeAsync(300); + const response = await sendPromise; + + expect(response.status).toBe(200); + expect(doForward).toHaveBeenCalledTimes(3); + + const chain = session.getProviderChain(); + expect(chain).toHaveLength(3); + + // Each endpoint tried exactly once + expect(chain[0].endpointId).toBe(1); + expect(chain[1].endpointId).toBe(2); + expect(chain[2].endpointId).toBe(3); + } finally { + vi.useRealTimers(); + } + }); + + test("MCP request: should use provider.url only, ignore vendor endpoints", async () => { + const session = createSession(new URL("https://example.com/mcp/custom-endpoint")); + const provider = createProvider({ + providerType: "claude", + providerVendorId: 123, + maxRetryAttempts: 2, + url: "https://provider.example.com/mcp", + }); + session.setProvider(provider); + + // Even if endpoints are available, MCP should not use them + mocks.getPreferredProviderEndpoints.mockResolvedValue([ + makeEndpoint({ + id: 1, + vendorId: 123, + providerType: "claude", + url: "https://ep1.example.com", + }), + makeEndpoint({ + id: 2, + vendorId: 123, + providerType: "claude", + url: "https://ep2.example.com", + }), + ]); + + const doForward = vi.spyOn( + ProxyForwarder as unknown as { doForward: (...args: unknown[]) => unknown }, + "doForward" + ); + doForward.mockResolvedValueOnce( + new Response("{}", { + status: 200, + headers: { "content-type": "application/json", "content-length": "2" }, + }) + ); + + const response = await ProxyForwarder.send(session); + expect(response.status).toBe(200); + + // getPreferredProviderEndpoints should NOT be called for MCP requests + expect(mocks.getPreferredProviderEndpoints).not.toHaveBeenCalled(); + + const chain = session.getProviderChain(); + expect(chain).toHaveLength(1); + // endpointId should be null (using provider.url) + expect(chain[0].endpointId).toBeNull(); + }); + + test("no vendor endpoints: should use provider.url with configured maxRetry", async () => { + vi.useFakeTimers(); + + try { + const session = createSession(); + // Provider without vendorId + const provider = createProvider({ + providerType: "claude", + providerVendorId: null as unknown as number, + maxRetryAttempts: 3, + url: "https://provider.example.com", + }); + session.setProvider(provider); + + const doForward = vi.spyOn( + ProxyForwarder as unknown as { doForward: (...args: unknown[]) => unknown }, + "doForward" + ); + + // First two fail, third succeeds + doForward.mockImplementationOnce(async () => { + throw new ProxyError("failed", 500); + }); + doForward.mockImplementationOnce(async () => { + throw new ProxyError("failed", 500); + }); + doForward.mockResolvedValueOnce( + new Response("{}", { + status: 200, + headers: { "content-type": "application/json", "content-length": "2" }, + }) + ); + + const sendPromise = ProxyForwarder.send(session); + await vi.advanceTimersByTimeAsync(300); + const response = await sendPromise; + + expect(response.status).toBe(200); + // Should retry up to maxRetryAttempts times + expect(doForward).toHaveBeenCalledTimes(3); + + // getPreferredProviderEndpoints should NOT be called (no vendorId) + expect(mocks.getPreferredProviderEndpoints).not.toHaveBeenCalled(); + + const chain = session.getProviderChain(); + expect(chain).toHaveLength(3); + // All attempts should use provider.url (endpointId=null) + expect(chain[0].endpointId).toBeNull(); + expect(chain[1].endpointId).toBeNull(); + expect(chain[2].endpointId).toBeNull(); + } finally { + vi.useRealTimers(); + } + }); + + test("all retries exhausted: should not exceed maxRetryAttempts", async () => { + vi.useFakeTimers(); + + try { + const session = createSession(); + const provider = createProvider({ + providerType: "claude", + providerVendorId: 123, + maxRetryAttempts: 2, + }); + session.setProvider(provider); + + // 4 endpoints available but maxRetry=2 + mocks.getPreferredProviderEndpoints.mockResolvedValue([ + makeEndpoint({ + id: 1, + vendorId: 123, + providerType: "claude", + url: "https://ep1.example.com", + lastProbeLatencyMs: 100, + }), + makeEndpoint({ + id: 2, + vendorId: 123, + providerType: "claude", + url: "https://ep2.example.com", + lastProbeLatencyMs: 200, + }), + makeEndpoint({ + id: 3, + vendorId: 123, + providerType: "claude", + url: "https://ep3.example.com", + lastProbeLatencyMs: 300, + }), + makeEndpoint({ + id: 4, + vendorId: 123, + providerType: "claude", + url: "https://ep4.example.com", + lastProbeLatencyMs: 400, + }), + ]); + + const doForward = vi.spyOn( + ProxyForwarder as unknown as { doForward: (...args: unknown[]) => unknown }, + "doForward" + ); + + // All attempts fail + doForward.mockImplementation(async () => { + throw new ProxyError("failed", 500); + }); + + const sendPromise = ProxyForwarder.send(session); + await vi.advanceTimersByTimeAsync(200); + + await expect(sendPromise).rejects.toThrow(); + + // Should only call doForward twice (maxRetryAttempts=2), NOT 4 times + expect(doForward).toHaveBeenCalledTimes(2); + + const chain = session.getProviderChain(); + // Only 2 attempts recorded + expect(chain).toHaveLength(2); + expect(chain[0].endpointId).toBe(1); + expect(chain[1].endpointId).toBe(2); + } finally { + vi.useRealTimers(); + } + }); +}); diff --git a/tests/unit/proxy/proxy-forwarder-thinking-signature-rectifier.test.ts b/tests/unit/proxy/proxy-forwarder-thinking-signature-rectifier.test.ts index d4a0e763b..d73d26238 100644 --- a/tests/unit/proxy/proxy-forwarder-thinking-signature-rectifier.test.ts +++ b/tests/unit/proxy/proxy-forwarder-thinking-signature-rectifier.test.ts @@ -376,4 +376,68 @@ describe("ProxyForwarder - thinking signature rectifier", () => { expect(special).not.toBeNull(); expect(JSON.stringify(special)).toContain("thinking_signature_rectifier"); }); + + test("命中 signature Extra inputs not permitted 错误时应整流并对同供应商重试一次", async () => { + const session = createSession(); + session.setProvider(createAnthropicProvider()); + + // 模拟包含 signature 字段的 tool_use content block + const msg = session.request.message as any; + msg.messages = [ + { + role: "assistant", + content: [ + { type: "text", text: "hello" }, + { + type: "tool_use", + id: "toolu_1", + name: "WebSearch", + input: { query: "q" }, + signature: "sig_tool_should_remove", + }, + ], + }, + ]; + + const doForward = vi.spyOn(ProxyForwarder as any, "doForward"); + + doForward.mockImplementationOnce(async () => { + throw new ProxyError("content.1.tool_use.signature: Extra inputs are not permitted", 400, { + body: "", + providerId: 1, + providerName: "anthropic-1", + }); + }); + + doForward.mockImplementationOnce(async (s: ProxySession) => { + const bodyMsg = s.request.message as any; + const blocks = bodyMsg.messages[0].content as any[]; + + // 验证 signature 字段已被移除 + expect(blocks.some((b: any) => "signature" in b)).toBe(false); + + const body = JSON.stringify({ + type: "message", + content: [{ type: "text", text: "ok" }], + }); + + return new Response(body, { + status: 200, + headers: { + "content-type": "application/json", + "content-length": String(body.length), + }, + }); + }); + + const response = await ProxyForwarder.send(session); + + expect(response.status).toBe(200); + expect(doForward).toHaveBeenCalledTimes(2); + expect(mocks.updateMessageRequestDetails).toHaveBeenCalledTimes(1); + + const special = session.getSpecialSettings(); + expect(special).not.toBeNull(); + expect(JSON.stringify(special)).toContain("thinking_signature_rectifier"); + }); }); diff --git a/tests/unit/proxy/ssl-error-detection.test.ts b/tests/unit/proxy/ssl-error-detection.test.ts new file mode 100644 index 000000000..b00d9e736 --- /dev/null +++ b/tests/unit/proxy/ssl-error-detection.test.ts @@ -0,0 +1,172 @@ +/** + * SSL Certificate Error Detection Tests + * + * TDD: Tests written first, implementation follows + */ +import { describe, expect, it } from "vitest"; +import { isSSLCertificateError } from "@/app/v1/_lib/proxy/errors"; + +describe("isSSLCertificateError", () => { + describe("should detect SSL certificate errors", () => { + it("should detect certificate hostname mismatch", () => { + const error = new Error("Hostname/IP does not match certificate's altnames"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect ERR_TLS_CERT_ALTNAME_INVALID", () => { + const error = new Error("ERR_TLS_CERT_ALTNAME_INVALID"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect self-signed certificate error", () => { + const error = new Error("self signed certificate"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect depth_zero_self_signed_cert", () => { + const error = new Error("depth_zero_self_signed_cert"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect expired certificate error", () => { + const error = new Error("certificate has expired"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect cert_has_expired", () => { + const error = new Error("cert_has_expired"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect unable to verify certificate", () => { + const error = new Error("unable to verify the first certificate"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect unable_to_verify_leaf_signature", () => { + const error = new Error("unable_to_verify_leaf_signature"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect unable_to_get_issuer_cert", () => { + const error = new Error("unable_to_get_issuer_cert"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect SSL handshake error", () => { + const error = new Error("SSL handshake failed"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect TLS error", () => { + const error = new Error("TLS connection failed"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect certificate chain error", () => { + const error = new Error("certificate chain is invalid"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect CERT_UNTRUSTED", () => { + const error = new Error("CERT_UNTRUSTED"); + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect error with code property", () => { + const error = new Error("Connection failed") as NodeJS.ErrnoException; + error.code = "UNABLE_TO_VERIFY_LEAF_SIGNATURE"; + expect(isSSLCertificateError(error)).toBe(true); + }); + + it("should detect error with name containing SSL", () => { + const error = new Error("Connection failed"); + error.name = "SSLError"; + expect(isSSLCertificateError(error)).toBe(true); + }); + }); + + describe("should not match non-SSL errors", () => { + it("should not match connection refused error", () => { + const error = new Error("Connection refused"); + expect(isSSLCertificateError(error)).toBe(false); + }); + + it("should not match timeout error", () => { + const error = new Error("Request timeout"); + expect(isSSLCertificateError(error)).toBe(false); + }); + + it("should not match DNS error", () => { + const error = new Error("getaddrinfo ENOTFOUND api.example.com"); + expect(isSSLCertificateError(error)).toBe(false); + }); + + it("should not match network unreachable error", () => { + const error = new Error("Network is unreachable"); + expect(isSSLCertificateError(error)).toBe(false); + }); + + it("should not match HTTP error", () => { + const error = new Error("HTTP 500 Internal Server Error"); + expect(isSSLCertificateError(error)).toBe(false); + }); + + it("should not match abort error", () => { + const error = new Error("The operation was aborted"); + error.name = "AbortError"; + expect(isSSLCertificateError(error)).toBe(false); + }); + + it("should not match ECONNRESET", () => { + const error = new Error("Connection reset by peer") as NodeJS.ErrnoException; + error.code = "ECONNRESET"; + expect(isSSLCertificateError(error)).toBe(false); + }); + + it("should not match ETIMEDOUT", () => { + const error = new Error("Connection timed out") as NodeJS.ErrnoException; + error.code = "ETIMEDOUT"; + expect(isSSLCertificateError(error)).toBe(false); + }); + + it("should not match generic error", () => { + const error = new Error("Something went wrong"); + expect(isSSLCertificateError(error)).toBe(false); + }); + + it("should not match empty error message", () => { + const error = new Error(""); + expect(isSSLCertificateError(error)).toBe(false); + }); + }); + + describe("edge cases", () => { + it("should handle non-Error objects", () => { + expect(isSSLCertificateError("certificate error")).toBe(false); + expect(isSSLCertificateError(null)).toBe(false); + expect(isSSLCertificateError(undefined)).toBe(false); + expect(isSSLCertificateError(123)).toBe(false); + expect(isSSLCertificateError({})).toBe(false); + }); + + it("should handle Error with undefined message", () => { + const error = new Error(); + expect(isSSLCertificateError(error)).toBe(false); + }); + + it("should be case insensitive", () => { + expect(isSSLCertificateError(new Error("CERTIFICATE ERROR"))).toBe(true); + expect(isSSLCertificateError(new Error("Certificate Error"))).toBe(true); + expect(isSSLCertificateError(new Error("SSL_ERROR"))).toBe(true); + expect(isSSLCertificateError(new Error("Ssl_Error"))).toBe(true); + }); + + it("should detect SSL error in nested cause", () => { + const cause = new Error("self signed certificate"); + const error = new Error("Request failed", { cause }); + // Note: This test documents expected behavior - implementation may need to check cause + expect(isSSLCertificateError(error)).toBe(true); + }); + }); +});