diff --git a/packages/types/src/codebase-index.ts b/packages/types/src/codebase-index.ts index be7778f5387..8ad66cbb68b 100644 --- a/packages/types/src/codebase-index.ts +++ b/packages/types/src/codebase-index.ts @@ -22,7 +22,7 @@ export const codebaseIndexConfigSchema = z.object({ codebaseIndexEnabled: z.boolean().optional(), codebaseIndexQdrantUrl: z.string().optional(), codebaseIndexEmbedderProvider: z - .enum(["openai", "ollama", "openai-compatible", "gemini", "mistral", "vercel-ai-gateway"]) + .enum(["openai", "ollama", "openai-compatible", "gemini", "mistral", "vercel-ai-gateway", "openrouter"]) .optional(), codebaseIndexEmbedderBaseUrl: z.string().optional(), codebaseIndexEmbedderModelId: z.string().optional(), @@ -51,6 +51,7 @@ export const codebaseIndexModelsSchema = z.object({ gemini: z.record(z.string(), z.object({ dimension: z.number() })).optional(), mistral: z.record(z.string(), z.object({ dimension: z.number() })).optional(), "vercel-ai-gateway": z.record(z.string(), z.object({ dimension: z.number() })).optional(), + openrouter: z.record(z.string(), z.object({ dimension: z.number() })).optional(), }) export type CodebaseIndexModels = z.infer @@ -68,6 +69,7 @@ export const codebaseIndexProviderSchema = z.object({ codebaseIndexGeminiApiKey: z.string().optional(), codebaseIndexMistralApiKey: z.string().optional(), codebaseIndexVercelAiGatewayApiKey: z.string().optional(), + codebaseIndexOpenRouterApiKey: z.string().optional(), }) export type CodebaseIndexProvider = z.infer diff --git a/packages/types/src/global-settings.ts b/packages/types/src/global-settings.ts index 1e17fb4c3cc..579356ae2d4 100644 --- a/packages/types/src/global-settings.ts +++ b/packages/types/src/global-settings.ts @@ -232,6 +232,7 @@ export const SECRET_STATE_KEYS = [ "codebaseIndexGeminiApiKey", "codebaseIndexMistralApiKey", "codebaseIndexVercelAiGatewayApiKey", + "codebaseIndexOpenRouterApiKey", "huggingFaceApiKey", "sambaNovaApiKey", "zaiApiKey", diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index c409f15a65d..04b7aea635b 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -2550,6 +2550,12 @@ export const webviewMessageHandler = async ( settings.codebaseIndexVercelAiGatewayApiKey, ) } + if (settings.codebaseIndexOpenRouterApiKey !== undefined) { + await provider.contextProxy.storeSecret( + "codebaseIndexOpenRouterApiKey", + settings.codebaseIndexOpenRouterApiKey, + ) + } // Send success response first - settings are saved regardless of validation await provider.postMessageToWebview({ @@ -2687,6 +2693,7 @@ export const webviewMessageHandler = async ( const hasVercelAiGatewayApiKey = !!(await provider.context.secrets.get( "codebaseIndexVercelAiGatewayApiKey", )) + const hasOpenRouterApiKey = !!(await provider.context.secrets.get("codebaseIndexOpenRouterApiKey")) provider.postMessageToWebview({ type: "codeIndexSecretStatus", @@ -2697,6 +2704,7 @@ export const webviewMessageHandler = async ( hasGeminiApiKey, hasMistralApiKey, hasVercelAiGatewayApiKey, + hasOpenRouterApiKey, }, }) break diff --git a/src/i18n/locales/ca/embeddings.json b/src/i18n/locales/ca/embeddings.json index 782f92cddfd..c00e336ee55 100644 --- a/src/i18n/locales/ca/embeddings.json +++ b/src/i18n/locales/ca/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Falta la configuració compatible amb OpenAI per crear l'embedder", "geminiConfigMissing": "Falta la configuració de Gemini per crear l'embedder", "mistralConfigMissing": "Falta la configuració de Mistral per crear l'embedder", + "openRouterConfigMissing": "Falta la configuració d'OpenRouter per crear l'embedder", "vercelAiGatewayConfigMissing": "Falta la configuració de Vercel AI Gateway per crear l'embedder", "invalidEmbedderType": "Tipus d'embedder configurat no vàlid: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "No s'ha pogut determinar la dimensió del vector per al model '{{modelId}}' amb el proveïdor '{{provider}}'. Assegura't que la 'Dimensió d'incrustació' estigui configurada correctament als paràmetres del proveïdor compatible amb OpenAI.", diff --git a/src/i18n/locales/de/embeddings.json b/src/i18n/locales/de/embeddings.json index 239d5d3c8a0..e0c50e0a3d6 100644 --- a/src/i18n/locales/de/embeddings.json +++ b/src/i18n/locales/de/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "OpenAI-kompatible Konfiguration fehlt für die Erstellung des Embedders", "geminiConfigMissing": "Gemini-Konfiguration fehlt für die Erstellung des Embedders", "mistralConfigMissing": "Mistral-Konfiguration fehlt für die Erstellung des Embedders", + "openRouterConfigMissing": "OpenRouter-Konfiguration fehlt für die Erstellung des Embedders", "vercelAiGatewayConfigMissing": "Vercel AI Gateway-Konfiguration fehlt für die Erstellung des Embedders", "invalidEmbedderType": "Ungültiger Embedder-Typ konfiguriert: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Konnte die Vektordimension für Modell '{{modelId}}' mit Anbieter '{{provider}}' nicht bestimmen. Stelle sicher, dass die 'Embedding-Dimension' in den OpenAI-kompatiblen Anbietereinstellungen korrekt eingestellt ist.", diff --git a/src/i18n/locales/en/embeddings.json b/src/i18n/locales/en/embeddings.json index fc902cadc16..5cf0322584f 100644 --- a/src/i18n/locales/en/embeddings.json +++ b/src/i18n/locales/en/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "OpenAI Compatible configuration missing for embedder creation", "geminiConfigMissing": "Gemini configuration missing for embedder creation", "mistralConfigMissing": "Mistral configuration missing for embedder creation", + "openRouterConfigMissing": "OpenRouter configuration missing for embedder creation", "vercelAiGatewayConfigMissing": "Vercel AI Gateway configuration missing for embedder creation", "invalidEmbedderType": "Invalid embedder type configured: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Could not determine vector dimension for model '{{modelId}}' with provider '{{provider}}'. Please ensure the 'Embedding Dimension' is correctly set in the OpenAI-Compatible provider settings.", diff --git a/src/i18n/locales/es/embeddings.json b/src/i18n/locales/es/embeddings.json index ac7522ab014..76cd5cf53ad 100644 --- a/src/i18n/locales/es/embeddings.json +++ b/src/i18n/locales/es/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Falta la configuración compatible con OpenAI para crear el incrustador", "geminiConfigMissing": "Falta la configuración de Gemini para crear el incrustador", "mistralConfigMissing": "Falta la configuración de Mistral para la creación del incrustador", + "openRouterConfigMissing": "Falta la configuración de OpenRouter para la creación del incrustador", "vercelAiGatewayConfigMissing": "Falta la configuración de Vercel AI Gateway para la creación del incrustador", "invalidEmbedderType": "Tipo de incrustador configurado inválido: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "No se pudo determinar la dimensión del vector para el modelo '{{modelId}}' con el proveedor '{{provider}}'. Asegúrate de que la 'Dimensión de incrustación' esté configurada correctamente en los ajustes del proveedor compatible con OpenAI.", diff --git a/src/i18n/locales/fr/embeddings.json b/src/i18n/locales/fr/embeddings.json index e4dff280129..8bb97735a85 100644 --- a/src/i18n/locales/fr/embeddings.json +++ b/src/i18n/locales/fr/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Configuration compatible OpenAI manquante pour la création de l'embedder", "geminiConfigMissing": "Configuration Gemini manquante pour la création de l'embedder", "mistralConfigMissing": "Configuration Mistral manquante pour la création de l'embedder", + "openRouterConfigMissing": "Configuration OpenRouter manquante pour la création de l'embedder", "vercelAiGatewayConfigMissing": "Configuration Vercel AI Gateway manquante pour la création de l'embedder", "invalidEmbedderType": "Type d'embedder configuré invalide : {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Impossible de déterminer la dimension du vecteur pour le modèle '{{modelId}}' avec le fournisseur '{{provider}}'. Assure-toi que la 'Dimension d'embedding' est correctement définie dans les paramètres du fournisseur compatible OpenAI.", diff --git a/src/i18n/locales/hi/embeddings.json b/src/i18n/locales/hi/embeddings.json index 108d1263730..26f9326e302 100644 --- a/src/i18n/locales/hi/embeddings.json +++ b/src/i18n/locales/hi/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "एम्बेडर बनाने के लिए OpenAI संगत कॉन्फ़िगरेशन गायब है", "geminiConfigMissing": "एम्बेडर बनाने के लिए Gemini कॉन्फ़िगरेशन गायब है", "mistralConfigMissing": "एम्बेडर निर्माण के लिए मिस्ट्रल कॉन्फ़िगरेशन गायब है", + "openRouterConfigMissing": "एम्बेडर निर्माण के लिए OpenRouter कॉन्फ़िगरेशन गायब है", "vercelAiGatewayConfigMissing": "एम्बेडर निर्माण के लिए Vercel AI Gateway कॉन्फ़िगरेशन गायब है", "invalidEmbedderType": "अमान्य एम्बेडर प्रकार कॉन्फ़िगर किया गया: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "प्रदाता '{{provider}}' के साथ मॉडल '{{modelId}}' के लिए वेक्टर आयाम निर्धारित नहीं कर सका। कृपया सुनिश्चित करें कि OpenAI-संगत प्रदाता सेटिंग्स में 'एम्बेडिंग आयाम' सही तरीके से सेट है।", diff --git a/src/i18n/locales/id/embeddings.json b/src/i18n/locales/id/embeddings.json index 8c0fdd490fa..b7cbf968514 100644 --- a/src/i18n/locales/id/embeddings.json +++ b/src/i18n/locales/id/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Konfigurasi yang kompatibel dengan OpenAI tidak ada untuk membuat embedder", "geminiConfigMissing": "Konfigurasi Gemini tidak ada untuk membuat embedder", "mistralConfigMissing": "Konfigurasi Mistral hilang untuk pembuatan embedder", + "openRouterConfigMissing": "Konfigurasi OpenRouter hilang untuk pembuatan embedder", "vercelAiGatewayConfigMissing": "Konfigurasi Vercel AI Gateway hilang untuk pembuatan embedder", "invalidEmbedderType": "Tipe embedder yang dikonfigurasi tidak valid: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Tidak dapat menentukan dimensi vektor untuk model '{{modelId}}' dengan penyedia '{{provider}}'. Pastikan 'Dimensi Embedding' diatur dengan benar di pengaturan penyedia yang kompatibel dengan OpenAI.", diff --git a/src/i18n/locales/it/embeddings.json b/src/i18n/locales/it/embeddings.json index 0ca32f906ea..220b902f2cb 100644 --- a/src/i18n/locales/it/embeddings.json +++ b/src/i18n/locales/it/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Configurazione compatibile con OpenAI mancante per la creazione dell'embedder", "geminiConfigMissing": "Configurazione Gemini mancante per la creazione dell'embedder", "mistralConfigMissing": "Configurazione di Mistral mancante per la creazione dell'embedder", + "openRouterConfigMissing": "Configurazione di OpenRouter mancante per la creazione dell'embedder", "vercelAiGatewayConfigMissing": "Configurazione di Vercel AI Gateway mancante per la creazione dell'embedder", "invalidEmbedderType": "Tipo di embedder configurato non valido: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Impossibile determinare la dimensione del vettore per il modello '{{modelId}}' con il provider '{{provider}}'. Assicurati che la 'Dimensione di embedding' sia impostata correttamente nelle impostazioni del provider compatibile con OpenAI.", diff --git a/src/i18n/locales/ja/embeddings.json b/src/i18n/locales/ja/embeddings.json index a1a37ebffa3..e74fef4138e 100644 --- a/src/i18n/locales/ja/embeddings.json +++ b/src/i18n/locales/ja/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "エンベッダー作成のためのOpenAI互換設定がありません", "geminiConfigMissing": "エンベッダー作成のためのGemini設定がありません", "mistralConfigMissing": "エンベッダー作成のためのMistral設定がありません", + "openRouterConfigMissing": "エンベッダー作成のためのOpenRouter設定がありません", "vercelAiGatewayConfigMissing": "エンベッダー作成のためのVercel AI Gateway設定がありません", "invalidEmbedderType": "無効なエンベッダータイプが設定されています: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "プロバイダー '{{provider}}' のモデル '{{modelId}}' の埋め込み次元を決定できませんでした。OpenAI互換プロバイダー設定で「埋め込み次元」が正しく設定されていることを確認してください。", diff --git a/src/i18n/locales/ko/embeddings.json b/src/i18n/locales/ko/embeddings.json index 6c81c9d7d75..31c73fa5f26 100644 --- a/src/i18n/locales/ko/embeddings.json +++ b/src/i18n/locales/ko/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "임베더 생성을 위한 OpenAI 호환 구성이 누락되었습니다", "geminiConfigMissing": "임베더 생성을 위한 Gemini 구성이 누락되었습니다", "mistralConfigMissing": "임베더 생성을 위한 Mistral 구성이 없습니다", + "openRouterConfigMissing": "임베더 생성을 위한 OpenRouter 구성이 없습니다", "vercelAiGatewayConfigMissing": "임베더 생성을 위한 Vercel AI Gateway 구성이 없습니다", "invalidEmbedderType": "잘못된 임베더 유형이 구성되었습니다: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "프로바이더 '{{provider}}'의 모델 '{{modelId}}'에 대한 벡터 차원을 결정할 수 없습니다. OpenAI 호환 프로바이더 설정에서 '임베딩 차원'이 올바르게 설정되어 있는지 확인하세요.", diff --git a/src/i18n/locales/nl/embeddings.json b/src/i18n/locales/nl/embeddings.json index 7ae59ae02a6..aa6d242f1f1 100644 --- a/src/i18n/locales/nl/embeddings.json +++ b/src/i18n/locales/nl/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "OpenAI-compatibele configuratie ontbreekt voor het maken van embedder", "geminiConfigMissing": "Gemini-configuratie ontbreekt voor het maken van embedder", "mistralConfigMissing": "Mistral-configuratie ontbreekt voor het maken van de embedder", + "openRouterConfigMissing": "OpenRouter-configuratie ontbreekt voor het maken van de embedder", "vercelAiGatewayConfigMissing": "Vercel AI Gateway-configuratie ontbreekt voor het maken van de embedder", "invalidEmbedderType": "Ongeldig embedder-type geconfigureerd: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Kan de vectordimensie voor model '{{modelId}}' met provider '{{provider}}' niet bepalen. Zorg ervoor dat de 'Embedding Dimensie' correct is ingesteld in de OpenAI-compatibele provider-instellingen.", diff --git a/src/i18n/locales/pl/embeddings.json b/src/i18n/locales/pl/embeddings.json index 8f75d00af88..88543ede38c 100644 --- a/src/i18n/locales/pl/embeddings.json +++ b/src/i18n/locales/pl/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Brak konfiguracji kompatybilnej z OpenAI do utworzenia embeddera", "geminiConfigMissing": "Brak konfiguracji Gemini do utworzenia embeddera", "mistralConfigMissing": "Brak konfiguracji Mistral do utworzenia embeddera", + "openRouterConfigMissing": "Brak konfiguracji OpenRouter do utworzenia embeddera", "vercelAiGatewayConfigMissing": "Brak konfiguracji Vercel AI Gateway do utworzenia embeddera", "invalidEmbedderType": "Skonfigurowano nieprawidłowy typ embeddera: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Nie można określić wymiaru wektora dla modelu '{{modelId}}' z dostawcą '{{provider}}'. Upewnij się, że 'Wymiar osadzania' jest poprawnie ustawiony w ustawieniach dostawcy kompatybilnego z OpenAI.", diff --git a/src/i18n/locales/pt-BR/embeddings.json b/src/i18n/locales/pt-BR/embeddings.json index ee135ed8b52..c67d0df686b 100644 --- a/src/i18n/locales/pt-BR/embeddings.json +++ b/src/i18n/locales/pt-BR/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Configuração compatível com OpenAI ausente para criação do embedder", "geminiConfigMissing": "Configuração do Gemini ausente para criação do embedder", "mistralConfigMissing": "Configuração do Mistral ausente para a criação do embedder", + "openRouterConfigMissing": "Configuração do OpenRouter ausente para a criação do embedder", "vercelAiGatewayConfigMissing": "Configuração do Vercel AI Gateway ausente para a criação do embedder", "invalidEmbedderType": "Tipo de embedder configurado inválido: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Não foi possível determinar a dimensão do vetor para o modelo '{{modelId}}' com o provedor '{{provider}}'. Certifique-se de que a 'Dimensão de Embedding' esteja configurada corretamente nas configurações do provedor compatível com OpenAI.", diff --git a/src/i18n/locales/ru/embeddings.json b/src/i18n/locales/ru/embeddings.json index 97301b32097..7e48af3d59e 100644 --- a/src/i18n/locales/ru/embeddings.json +++ b/src/i18n/locales/ru/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Отсутствует конфигурация, совместимая с OpenAI, для создания эмбеддера", "geminiConfigMissing": "Отсутствует конфигурация Gemini для создания эмбеддера", "mistralConfigMissing": "Конфигурация Mistral отсутствует для создания эмбеддера", + "openRouterConfigMissing": "Конфигурация OpenRouter отсутствует для создания эмбеддера", "vercelAiGatewayConfigMissing": "Конфигурация Vercel AI Gateway отсутствует для создания эмбеддера", "invalidEmbedderType": "Настроен недопустимый тип эмбеддера: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Не удалось определить размерность вектора для модели '{{modelId}}' с провайдером '{{provider}}'. Убедитесь, что 'Размерность эмбеддинга' правильно установлена в настройках провайдера, совместимого с OpenAI.", diff --git a/src/i18n/locales/tr/embeddings.json b/src/i18n/locales/tr/embeddings.json index 279fa97516a..36efc466e3d 100644 --- a/src/i18n/locales/tr/embeddings.json +++ b/src/i18n/locales/tr/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Gömücü oluşturmak için OpenAI uyumlu yapılandırması eksik", "geminiConfigMissing": "Gömücü oluşturmak için Gemini yapılandırması eksik", "mistralConfigMissing": "Gömücü oluşturmak için Mistral yapılandırması eksik", + "openRouterConfigMissing": "Gömücü oluşturmak için OpenRouter yapılandırması eksik", "vercelAiGatewayConfigMissing": "Gömücü oluşturmak için Vercel AI Gateway yapılandırması eksik", "invalidEmbedderType": "Geçersiz gömücü türü yapılandırıldı: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "'{{provider}}' sağlayıcısı ile '{{modelId}}' modeli için vektör boyutu belirlenemedi. OpenAI uyumlu sağlayıcı ayarlarında 'Gömme Boyutu'nun doğru ayarlandığından emin ol.", diff --git a/src/i18n/locales/vi/embeddings.json b/src/i18n/locales/vi/embeddings.json index 3e941aafb66..96496083caa 100644 --- a/src/i18n/locales/vi/embeddings.json +++ b/src/i18n/locales/vi/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Thiếu cấu hình tương thích OpenAI để tạo embedder", "geminiConfigMissing": "Thiếu cấu hình Gemini để tạo embedder", "mistralConfigMissing": "Thiếu cấu hình Mistral để tạo trình nhúng", + "openRouterConfigMissing": "Thiếu cấu hình OpenRouter để tạo trình nhúng", "vercelAiGatewayConfigMissing": "Thiếu cấu hình Vercel AI Gateway để tạo trình nhúng", "invalidEmbedderType": "Loại embedder được cấu hình không hợp lệ: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Không thể xác định kích thước vector cho mô hình '{{modelId}}' với nhà cung cấp '{{provider}}'. Hãy đảm bảo 'Kích thước Embedding' được cài đặt đúng trong cài đặt nhà cung cấp tương thích OpenAI.", diff --git a/src/i18n/locales/zh-CN/embeddings.json b/src/i18n/locales/zh-CN/embeddings.json index d16f7df32b2..dfc591391e5 100644 --- a/src/i18n/locales/zh-CN/embeddings.json +++ b/src/i18n/locales/zh-CN/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "创建嵌入器缺少 OpenAI 兼容配置", "geminiConfigMissing": "创建嵌入器缺少 Gemini 配置", "mistralConfigMissing": "创建嵌入器时缺少 Mistral 配置", + "openRouterConfigMissing": "创建嵌入器时缺少 OpenRouter 配置", "vercelAiGatewayConfigMissing": "创建嵌入器时缺少 Vercel AI Gateway 配置", "invalidEmbedderType": "配置的嵌入器类型无效:{{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "无法确定提供商 '{{provider}}' 的模型 '{{modelId}}' 的向量维度。请确保在 OpenAI 兼容提供商设置中正确设置了「嵌入维度」。", diff --git a/src/i18n/locales/zh-TW/embeddings.json b/src/i18n/locales/zh-TW/embeddings.json index 044de1dac22..24ed5190967 100644 --- a/src/i18n/locales/zh-TW/embeddings.json +++ b/src/i18n/locales/zh-TW/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "建立嵌入器缺少 OpenAI 相容設定", "geminiConfigMissing": "建立嵌入器缺少 Gemini 設定", "mistralConfigMissing": "建立嵌入器時缺少 Mistral 設定", + "openRouterConfigMissing": "建立嵌入器時缺少 OpenRouter 設定", "vercelAiGatewayConfigMissing": "建立嵌入器時缺少 Vercel AI Gateway 設定", "invalidEmbedderType": "設定的嵌入器類型無效:{{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "無法確定提供商 '{{provider}}' 的模型 '{{modelId}}' 的向量維度。請確保在 OpenAI 相容提供商設定中正確設定了「嵌入維度」。", diff --git a/src/services/code-index/config-manager.ts b/src/services/code-index/config-manager.ts index 2c0e8bb5c9e..5bc00b6ce35 100644 --- a/src/services/code-index/config-manager.ts +++ b/src/services/code-index/config-manager.ts @@ -20,6 +20,7 @@ export class CodeIndexConfigManager { private geminiOptions?: { apiKey: string } private mistralOptions?: { apiKey: string } private vercelAiGatewayOptions?: { apiKey: string } + private openRouterOptions?: { apiKey: string } private qdrantUrl?: string = "http://localhost:6333" private qdrantApiKey?: string private searchMinScore?: number @@ -71,6 +72,7 @@ export class CodeIndexConfigManager { const geminiApiKey = this.contextProxy?.getSecret("codebaseIndexGeminiApiKey") ?? "" const mistralApiKey = this.contextProxy?.getSecret("codebaseIndexMistralApiKey") ?? "" const vercelAiGatewayApiKey = this.contextProxy?.getSecret("codebaseIndexVercelAiGatewayApiKey") ?? "" + const openRouterApiKey = this.contextProxy?.getSecret("codebaseIndexOpenRouterApiKey") ?? "" // Update instance variables with configuration this.codebaseIndexEnabled = codebaseIndexEnabled ?? true @@ -108,6 +110,8 @@ export class CodeIndexConfigManager { this.embedderProvider = "mistral" } else if (codebaseIndexEmbedderProvider === "vercel-ai-gateway") { this.embedderProvider = "vercel-ai-gateway" + } else if (codebaseIndexEmbedderProvider === "openrouter") { + this.embedderProvider = "openrouter" } else { this.embedderProvider = "openai" } @@ -129,6 +133,7 @@ export class CodeIndexConfigManager { this.geminiOptions = geminiApiKey ? { apiKey: geminiApiKey } : undefined this.mistralOptions = mistralApiKey ? { apiKey: mistralApiKey } : undefined this.vercelAiGatewayOptions = vercelAiGatewayApiKey ? { apiKey: vercelAiGatewayApiKey } : undefined + this.openRouterOptions = openRouterApiKey ? { apiKey: openRouterApiKey } : undefined } /** @@ -147,6 +152,7 @@ export class CodeIndexConfigManager { geminiOptions?: { apiKey: string } mistralOptions?: { apiKey: string } vercelAiGatewayOptions?: { apiKey: string } + openRouterOptions?: { apiKey: string } qdrantUrl?: string qdrantApiKey?: string searchMinScore?: number @@ -167,6 +173,7 @@ export class CodeIndexConfigManager { geminiApiKey: this.geminiOptions?.apiKey ?? "", mistralApiKey: this.mistralOptions?.apiKey ?? "", vercelAiGatewayApiKey: this.vercelAiGatewayOptions?.apiKey ?? "", + openRouterApiKey: this.openRouterOptions?.apiKey ?? "", qdrantUrl: this.qdrantUrl ?? "", qdrantApiKey: this.qdrantApiKey ?? "", } @@ -192,6 +199,7 @@ export class CodeIndexConfigManager { geminiOptions: this.geminiOptions, mistralOptions: this.mistralOptions, vercelAiGatewayOptions: this.vercelAiGatewayOptions, + openRouterOptions: this.openRouterOptions, qdrantUrl: this.qdrantUrl, qdrantApiKey: this.qdrantApiKey, searchMinScore: this.currentSearchMinScore, @@ -234,6 +242,11 @@ export class CodeIndexConfigManager { const qdrantUrl = this.qdrantUrl const isConfigured = !!(apiKey && qdrantUrl) return isConfigured + } else if (this.embedderProvider === "openrouter") { + const apiKey = this.openRouterOptions?.apiKey + const qdrantUrl = this.qdrantUrl + const isConfigured = !!(apiKey && qdrantUrl) + return isConfigured } return false // Should not happen if embedderProvider is always set correctly } @@ -269,6 +282,7 @@ export class CodeIndexConfigManager { const prevGeminiApiKey = prev?.geminiApiKey ?? "" const prevMistralApiKey = prev?.mistralApiKey ?? "" const prevVercelAiGatewayApiKey = prev?.vercelAiGatewayApiKey ?? "" + const prevOpenRouterApiKey = prev?.openRouterApiKey ?? "" const prevQdrantUrl = prev?.qdrantUrl ?? "" const prevQdrantApiKey = prev?.qdrantApiKey ?? "" @@ -307,6 +321,7 @@ export class CodeIndexConfigManager { const currentGeminiApiKey = this.geminiOptions?.apiKey ?? "" const currentMistralApiKey = this.mistralOptions?.apiKey ?? "" const currentVercelAiGatewayApiKey = this.vercelAiGatewayOptions?.apiKey ?? "" + const currentOpenRouterApiKey = this.openRouterOptions?.apiKey ?? "" const currentQdrantUrl = this.qdrantUrl ?? "" const currentQdrantApiKey = this.qdrantApiKey ?? "" @@ -337,6 +352,10 @@ export class CodeIndexConfigManager { return true } + if (prevOpenRouterApiKey !== currentOpenRouterApiKey) { + return true + } + // Check for model dimension changes (generic for all providers) if (prevModelDimension !== currentModelDimension) { return true @@ -395,6 +414,7 @@ export class CodeIndexConfigManager { geminiOptions: this.geminiOptions, mistralOptions: this.mistralOptions, vercelAiGatewayOptions: this.vercelAiGatewayOptions, + openRouterOptions: this.openRouterOptions, qdrantUrl: this.qdrantUrl, qdrantApiKey: this.qdrantApiKey, searchMinScore: this.currentSearchMinScore, diff --git a/src/services/code-index/embedders/__tests__/openrouter.spec.ts b/src/services/code-index/embedders/__tests__/openrouter.spec.ts new file mode 100644 index 00000000000..fe9f9d59dd3 --- /dev/null +++ b/src/services/code-index/embedders/__tests__/openrouter.spec.ts @@ -0,0 +1,289 @@ +import type { MockedClass, MockedFunction } from "vitest" +import { describe, it, expect, beforeEach, vi } from "vitest" +import { OpenAI } from "openai" +import { OpenRouterEmbedder } from "../openrouter" +import { getModelDimension, getDefaultModelId } from "../../../../shared/embeddingModels" + +// Mock the OpenAI SDK +vi.mock("openai") + +// Mock TelemetryService +vi.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureEvent: vi.fn(), + }, + }, + TelemetryEventName: {}, +})) + +// Mock i18n +vi.mock("../../../../i18n", () => ({ + t: (key: string, params?: Record) => { + const translations: Record = { + "embeddings:validation.apiKeyRequired": "validation.apiKeyRequired", + "embeddings:authenticationFailed": + "Failed to create embeddings: Authentication failed. Please check your OpenRouter API key.", + "embeddings:failedWithStatus": `Failed to create embeddings after ${params?.attempts} attempts: HTTP ${params?.statusCode} - ${params?.errorMessage}`, + "embeddings:failedWithError": `Failed to create embeddings after ${params?.attempts} attempts: ${params?.errorMessage}`, + "embeddings:failedMaxAttempts": `Failed to create embeddings after ${params?.attempts} attempts`, + "embeddings:textExceedsTokenLimit": `Text at index ${params?.index} exceeds maximum token limit (${params?.itemTokens} > ${params?.maxTokens}). Skipping.`, + "embeddings:rateLimitRetry": `Rate limit hit, retrying in ${params?.delayMs}ms (attempt ${params?.attempt}/${params?.maxRetries})`, + } + return translations[key] || key + }, +})) + +const MockedOpenAI = OpenAI as MockedClass + +describe("OpenRouterEmbedder", () => { + const mockApiKey = "test-api-key" + let mockEmbeddingsCreate: MockedFunction + let mockOpenAIInstance: any + + beforeEach(() => { + vi.clearAllMocks() + vi.spyOn(console, "warn").mockImplementation(() => {}) + vi.spyOn(console, "error").mockImplementation(() => {}) + + // Setup mock OpenAI instance + mockEmbeddingsCreate = vi.fn() + mockOpenAIInstance = { + embeddings: { + create: mockEmbeddingsCreate, + }, + } + + MockedOpenAI.mockImplementation(() => mockOpenAIInstance) + }) + + afterEach(() => { + vi.restoreAllMocks() + }) + + describe("constructor", () => { + it("should create an instance with valid API key", () => { + const embedder = new OpenRouterEmbedder(mockApiKey) + expect(embedder).toBeInstanceOf(OpenRouterEmbedder) + }) + + it("should throw error with empty API key", () => { + expect(() => new OpenRouterEmbedder("")).toThrow("validation.apiKeyRequired") + }) + + it("should use default model when none specified", () => { + const embedder = new OpenRouterEmbedder(mockApiKey) + const expectedDefault = getDefaultModelId("openrouter") + expect(embedder.embedderInfo.name).toBe("openrouter") + }) + + it("should use custom model when specified", () => { + const customModel = "openai/text-embedding-3-small" + const embedder = new OpenRouterEmbedder(mockApiKey, customModel) + expect(embedder.embedderInfo.name).toBe("openrouter") + }) + + it("should initialize OpenAI client with correct headers", () => { + new OpenRouterEmbedder(mockApiKey) + + expect(MockedOpenAI).toHaveBeenCalledWith({ + baseURL: "https://openrouter.ai/api/v1", + apiKey: mockApiKey, + defaultHeaders: { + "HTTP-Referer": "https://github.com/RooCodeInc/Roo-Code", + "X-Title": "Roo Code", + }, + }) + }) + }) + + describe("embedderInfo", () => { + it("should return correct embedder info", () => { + const embedder = new OpenRouterEmbedder(mockApiKey) + expect(embedder.embedderInfo).toEqual({ + name: "openrouter", + }) + }) + }) + + describe("createEmbeddings", () => { + let embedder: OpenRouterEmbedder + + beforeEach(() => { + embedder = new OpenRouterEmbedder(mockApiKey) + }) + + it("should create embeddings successfully", async () => { + // Create base64 encoded embedding with values that can be exactly represented in Float32 + const testEmbedding = new Float32Array([0.25, 0.5, 0.75]) + const base64String = Buffer.from(testEmbedding.buffer).toString("base64") + + const mockResponse = { + data: [ + { + embedding: base64String, + }, + ], + usage: { + prompt_tokens: 5, + total_tokens: 5, + }, + } + + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.createEmbeddings(["test text"]) + + expect(mockEmbeddingsCreate).toHaveBeenCalledWith({ + input: ["test text"], + model: "openai/text-embedding-3-large", + encoding_format: "base64", + }) + expect(result.embeddings).toHaveLength(1) + expect(result.embeddings[0]).toEqual([0.25, 0.5, 0.75]) + expect(result.usage?.promptTokens).toBe(5) + expect(result.usage?.totalTokens).toBe(5) + }) + + it("should handle multiple texts", async () => { + const embedding1 = new Float32Array([0.25, 0.5]) + const embedding2 = new Float32Array([0.75, 1.0]) + const base64String1 = Buffer.from(embedding1.buffer).toString("base64") + const base64String2 = Buffer.from(embedding2.buffer).toString("base64") + + const mockResponse = { + data: [ + { + embedding: base64String1, + }, + { + embedding: base64String2, + }, + ], + usage: { + prompt_tokens: 10, + total_tokens: 10, + }, + } + + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.createEmbeddings(["text1", "text2"]) + + expect(result.embeddings).toHaveLength(2) + expect(result.embeddings[0]).toEqual([0.25, 0.5]) + expect(result.embeddings[1]).toEqual([0.75, 1.0]) + }) + + it("should use custom model when provided", async () => { + const customModel = "mistralai/mistral-embed-2312" + const embedderWithCustomModel = new OpenRouterEmbedder(mockApiKey, customModel) + + const testEmbedding = new Float32Array([0.25, 0.5]) + const base64String = Buffer.from(testEmbedding.buffer).toString("base64") + + const mockResponse = { + data: [ + { + embedding: base64String, + }, + ], + usage: { + prompt_tokens: 5, + total_tokens: 5, + }, + } + + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + await embedderWithCustomModel.createEmbeddings(["test"]) + + // Verify the embeddings.create was called with the custom model + expect(mockEmbeddingsCreate).toHaveBeenCalledWith({ + input: ["test"], + model: customModel, + encoding_format: "base64", + }) + }) + }) + + describe("validateConfiguration", () => { + let embedder: OpenRouterEmbedder + + beforeEach(() => { + embedder = new OpenRouterEmbedder(mockApiKey) + }) + + it("should validate configuration successfully", async () => { + const testEmbedding = new Float32Array([0.25, 0.5]) + const base64String = Buffer.from(testEmbedding.buffer).toString("base64") + + const mockResponse = { + data: [ + { + embedding: base64String, + }, + ], + usage: { + prompt_tokens: 1, + total_tokens: 1, + }, + } + + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(true) + expect(result.error).toBeUndefined() + expect(mockEmbeddingsCreate).toHaveBeenCalledWith({ + input: ["test"], + model: "openai/text-embedding-3-large", + encoding_format: "base64", + }) + }) + + it("should handle validation failure", async () => { + const authError = new Error("Invalid API key") + ;(authError as any).status = 401 + + mockEmbeddingsCreate.mockRejectedValue(authError) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.authenticationFailed") + }) + }) + + describe("integration with shared models", () => { + it("should work with defined OpenRouter models", () => { + const openRouterModels = [ + "openai/text-embedding-3-small", + "openai/text-embedding-3-large", + "openai/text-embedding-ada-002", + "google/gemini-embedding-001", + "mistralai/mistral-embed-2312", + "mistralai/codestral-embed-2505", + "qwen/qwen3-embedding-8b", + ] + + openRouterModels.forEach((model) => { + const dimension = getModelDimension("openrouter", model) + expect(dimension).toBeDefined() + expect(dimension).toBeGreaterThan(0) + + const embedder = new OpenRouterEmbedder(mockApiKey, model) + expect(embedder.embedderInfo.name).toBe("openrouter") + }) + }) + + it("should use correct default model", () => { + const defaultModel = getDefaultModelId("openrouter") + expect(defaultModel).toBe("openai/text-embedding-3-large") + + const dimension = getModelDimension("openrouter", defaultModel) + expect(dimension).toBe(3072) + }) + }) +}) diff --git a/src/services/code-index/embedders/openrouter.ts b/src/services/code-index/embedders/openrouter.ts new file mode 100644 index 00000000000..a455489d527 --- /dev/null +++ b/src/services/code-index/embedders/openrouter.ts @@ -0,0 +1,396 @@ +import { OpenAI } from "openai" +import { IEmbedder, EmbeddingResponse, EmbedderInfo } from "../interfaces/embedder" +import { + MAX_BATCH_TOKENS, + MAX_ITEM_TOKENS, + MAX_BATCH_RETRIES as MAX_RETRIES, + INITIAL_RETRY_DELAY_MS as INITIAL_DELAY_MS, +} from "../constants" +import { getDefaultModelId, getModelQueryPrefix } from "../../../shared/embeddingModels" +import { t } from "../../../i18n" +import { withValidationErrorHandling, HttpError, formatEmbeddingError } from "../shared/validation-helpers" +import { TelemetryEventName } from "@roo-code/types" +import { TelemetryService } from "@roo-code/telemetry" +import { Mutex } from "async-mutex" +import { handleOpenAIError } from "../../../api/providers/utils/openai-error-handler" + +interface EmbeddingItem { + embedding: string | number[] + [key: string]: any +} + +interface OpenRouterEmbeddingResponse { + data: EmbeddingItem[] + usage?: { + prompt_tokens?: number + total_tokens?: number + } +} + +/** + * OpenRouter implementation of the embedder interface with batching and rate limiting. + * OpenRouter provides an OpenAI-compatible API that gives access to hundreds of models + * through a single endpoint, automatically handling fallbacks and cost optimization. + */ +export class OpenRouterEmbedder implements IEmbedder { + private embeddingsClient: OpenAI + private readonly defaultModelId: string + private readonly apiKey: string + private readonly maxItemTokens: number + private readonly baseUrl: string = "https://openrouter.ai/api/v1" + + // Global rate limiting state shared across all instances + private static globalRateLimitState = { + isRateLimited: false, + rateLimitResetTime: 0, + consecutiveRateLimitErrors: 0, + lastRateLimitError: 0, + // Mutex to ensure thread-safe access to rate limit state + mutex: new Mutex(), + } + + /** + * Creates a new OpenRouter embedder + * @param apiKey The API key for authentication + * @param modelId Optional model identifier (defaults to "openai/text-embedding-3-large") + * @param maxItemTokens Optional maximum tokens per item (defaults to MAX_ITEM_TOKENS) + */ + constructor(apiKey: string, modelId?: string, maxItemTokens?: number) { + if (!apiKey) { + throw new Error(t("embeddings:validation.apiKeyRequired")) + } + + this.apiKey = apiKey + + // Wrap OpenAI client creation to handle invalid API key characters + try { + this.embeddingsClient = new OpenAI({ + baseURL: this.baseUrl, + apiKey: apiKey, + defaultHeaders: { + "HTTP-Referer": "https://github.com/RooCodeInc/Roo-Code", + "X-Title": "Roo Code", + }, + }) + } catch (error) { + // Use the error handler to transform ByteString conversion errors + throw handleOpenAIError(error, "OpenRouter") + } + + this.defaultModelId = modelId || getDefaultModelId("openrouter") + this.maxItemTokens = maxItemTokens || MAX_ITEM_TOKENS + } + + /** + * Creates embeddings for the given texts with batching and rate limiting + * @param texts Array of text strings to embed + * @param model Optional model identifier + * @returns Promise resolving to embedding response + */ + async createEmbeddings(texts: string[], model?: string): Promise { + const modelToUse = model || this.defaultModelId + + // Apply model-specific query prefix if required + const queryPrefix = getModelQueryPrefix("openrouter", modelToUse) + const processedTexts = queryPrefix + ? texts.map((text, index) => { + // Prevent double-prefixing + if (text.startsWith(queryPrefix)) { + return text + } + const prefixedText = `${queryPrefix}${text}` + const estimatedTokens = Math.ceil(prefixedText.length / 4) + if (estimatedTokens > MAX_ITEM_TOKENS) { + console.warn( + t("embeddings:textWithPrefixExceedsTokenLimit", { + index, + estimatedTokens, + maxTokens: MAX_ITEM_TOKENS, + }), + ) + // Return original text if adding prefix would exceed limit + return text + } + return prefixedText + }) + : texts + + const allEmbeddings: number[][] = [] + const usage = { promptTokens: 0, totalTokens: 0 } + const remainingTexts = [...processedTexts] + + while (remainingTexts.length > 0) { + const currentBatch: string[] = [] + let currentBatchTokens = 0 + const processedIndices: number[] = [] + + for (let i = 0; i < remainingTexts.length; i++) { + const text = remainingTexts[i] + const itemTokens = Math.ceil(text.length / 4) + + if (itemTokens > this.maxItemTokens) { + console.warn( + t("embeddings:textExceedsTokenLimit", { + index: i, + itemTokens, + maxTokens: this.maxItemTokens, + }), + ) + processedIndices.push(i) + continue + } + + if (currentBatchTokens + itemTokens <= MAX_BATCH_TOKENS) { + currentBatch.push(text) + currentBatchTokens += itemTokens + processedIndices.push(i) + } else { + break + } + } + + // Remove processed items from remainingTexts (in reverse order to maintain correct indices) + for (let i = processedIndices.length - 1; i >= 0; i--) { + remainingTexts.splice(processedIndices[i], 1) + } + + if (currentBatch.length > 0) { + const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse) + allEmbeddings.push(...batchResult.embeddings) + usage.promptTokens += batchResult.usage.promptTokens + usage.totalTokens += batchResult.usage.totalTokens + } + } + + return { embeddings: allEmbeddings, usage } + } + + /** + * Helper method to handle batch embedding with retries and exponential backoff + * @param batchTexts Array of texts to embed in this batch + * @param model Model identifier to use + * @returns Promise resolving to embeddings and usage statistics + */ + private async _embedBatchWithRetries( + batchTexts: string[], + model: string, + ): Promise<{ embeddings: number[][]; usage: { promptTokens: number; totalTokens: number } }> { + for (let attempts = 0; attempts < MAX_RETRIES; attempts++) { + // Check global rate limit before attempting request + await this.waitForGlobalRateLimit() + + try { + const response = (await this.embeddingsClient.embeddings.create({ + input: batchTexts, + model: model, + // OpenAI package (as of v4.78.1) has a parsing issue that truncates embedding dimensions to 256 + // when processing numeric arrays, which breaks compatibility with models using larger dimensions. + // By requesting base64 encoding, we bypass the package's parser and handle decoding ourselves. + encoding_format: "base64", + })) as OpenRouterEmbeddingResponse + + // Convert base64 embeddings to float32 arrays + const processedEmbeddings = response.data.map((item: EmbeddingItem) => { + if (typeof item.embedding === "string") { + const buffer = Buffer.from(item.embedding, "base64") + + // Create Float32Array view over the buffer + const float32Array = new Float32Array(buffer.buffer, buffer.byteOffset, buffer.byteLength / 4) + + return { + ...item, + embedding: Array.from(float32Array), + } + } + return item + }) + + // Replace the original data with processed embeddings + response.data = processedEmbeddings + + const embeddings = response.data.map((item) => item.embedding as number[]) + + return { + embeddings: embeddings, + usage: { + promptTokens: response.usage?.prompt_tokens || 0, + totalTokens: response.usage?.total_tokens || 0, + }, + } + } catch (error) { + // Capture telemetry before error is reformatted + TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, { + error: error instanceof Error ? error.message : String(error), + stack: error instanceof Error ? error.stack : undefined, + location: "OpenRouterEmbedder:_embedBatchWithRetries", + attempt: attempts + 1, + }) + + const hasMoreAttempts = attempts < MAX_RETRIES - 1 + + // Check if it's a rate limit error + const httpError = error as HttpError + if (httpError?.status === 429) { + // Update global rate limit state + await this.updateGlobalRateLimitState(httpError) + + if (hasMoreAttempts) { + // Calculate delay based on global rate limit state + const baseDelay = INITIAL_DELAY_MS * Math.pow(2, attempts) + const globalDelay = await this.getGlobalRateLimitDelay() + const delayMs = Math.max(baseDelay, globalDelay) + + console.warn( + t("embeddings:rateLimitRetry", { + delayMs, + attempt: attempts + 1, + maxRetries: MAX_RETRIES, + }), + ) + await new Promise((resolve) => setTimeout(resolve, delayMs)) + continue + } + } + + // Log the error for debugging + console.error(`OpenRouter embedder error (attempt ${attempts + 1}/${MAX_RETRIES}):`, error) + + // Format and throw the error + throw formatEmbeddingError(error, MAX_RETRIES) + } + } + + throw new Error(t("embeddings:failedMaxAttempts", { attempts: MAX_RETRIES })) + } + + /** + * Validates the OpenRouter embedder configuration by testing API connectivity + * @returns Promise resolving to validation result with success status and optional error message + */ + async validateConfiguration(): Promise<{ valid: boolean; error?: string }> { + return withValidationErrorHandling(async () => { + try { + // Test with a minimal embedding request + const testTexts = ["test"] + const modelToUse = this.defaultModelId + + const response = (await this.embeddingsClient.embeddings.create({ + input: testTexts, + model: modelToUse, + encoding_format: "base64", + })) as OpenRouterEmbeddingResponse + + // Check if we got a valid response + if (!response?.data || response.data.length === 0) { + return { + valid: false, + error: "embeddings:validation.invalidResponse", + } + } + + return { valid: true } + } catch (error) { + // Capture telemetry for validation errors + TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, { + error: error instanceof Error ? error.message : String(error), + stack: error instanceof Error ? error.stack : undefined, + location: "OpenRouterEmbedder:validateConfiguration", + }) + throw error + } + }, "openrouter") + } + + /** + * Returns information about this embedder + */ + get embedderInfo(): EmbedderInfo { + return { + name: "openrouter", + } + } + + /** + * Waits if there's an active global rate limit + */ + private async waitForGlobalRateLimit(): Promise { + const release = await OpenRouterEmbedder.globalRateLimitState.mutex.acquire() + let mutexReleased = false + + try { + const state = OpenRouterEmbedder.globalRateLimitState + + if (state.isRateLimited && state.rateLimitResetTime > Date.now()) { + const waitTime = state.rateLimitResetTime - Date.now() + // Silent wait - no logging to prevent flooding + release() + mutexReleased = true + await new Promise((resolve) => setTimeout(resolve, waitTime)) + return + } + + // Reset rate limit if time has passed + if (state.isRateLimited && state.rateLimitResetTime <= Date.now()) { + state.isRateLimited = false + state.consecutiveRateLimitErrors = 0 + } + } finally { + // Only release if we haven't already + if (!mutexReleased) { + release() + } + } + } + + /** + * Updates global rate limit state when a 429 error occurs + */ + private async updateGlobalRateLimitState(error: HttpError): Promise { + const release = await OpenRouterEmbedder.globalRateLimitState.mutex.acquire() + try { + const state = OpenRouterEmbedder.globalRateLimitState + const now = Date.now() + + // Increment consecutive rate limit errors + if (now - state.lastRateLimitError < 60000) { + // Within 1 minute + state.consecutiveRateLimitErrors++ + } else { + state.consecutiveRateLimitErrors = 1 + } + + state.lastRateLimitError = now + + // Calculate exponential backoff based on consecutive errors + const baseDelay = 5000 // 5 seconds base + const maxDelay = 300000 // 5 minutes max + const exponentialDelay = Math.min(baseDelay * Math.pow(2, state.consecutiveRateLimitErrors - 1), maxDelay) + + // Set global rate limit + state.isRateLimited = true + state.rateLimitResetTime = now + exponentialDelay + + // Silent rate limit activation - no logging to prevent flooding + } finally { + release() + } + } + + /** + * Gets the current global rate limit delay + */ + private async getGlobalRateLimitDelay(): Promise { + const release = await OpenRouterEmbedder.globalRateLimitState.mutex.acquire() + try { + const state = OpenRouterEmbedder.globalRateLimitState + + if (state.isRateLimited && state.rateLimitResetTime > Date.now()) { + return state.rateLimitResetTime - Date.now() + } + + return 0 + } finally { + release() + } + } +} diff --git a/src/services/code-index/interfaces/config.ts b/src/services/code-index/interfaces/config.ts index f168e268691..9fe3df1f129 100644 --- a/src/services/code-index/interfaces/config.ts +++ b/src/services/code-index/interfaces/config.ts @@ -15,6 +15,7 @@ export interface CodeIndexConfig { geminiOptions?: { apiKey: string } mistralOptions?: { apiKey: string } vercelAiGatewayOptions?: { apiKey: string } + openRouterOptions?: { apiKey: string } qdrantUrl?: string qdrantApiKey?: string searchMinScore?: number @@ -37,6 +38,7 @@ export type PreviousConfigSnapshot = { geminiApiKey?: string mistralApiKey?: string vercelAiGatewayApiKey?: string + openRouterApiKey?: string qdrantUrl?: string qdrantApiKey?: string } diff --git a/src/services/code-index/interfaces/embedder.ts b/src/services/code-index/interfaces/embedder.ts index 1fcda3aca32..7a3aa91ad9d 100644 --- a/src/services/code-index/interfaces/embedder.ts +++ b/src/services/code-index/interfaces/embedder.ts @@ -28,7 +28,14 @@ export interface EmbeddingResponse { } } -export type AvailableEmbedders = "openai" | "ollama" | "openai-compatible" | "gemini" | "mistral" | "vercel-ai-gateway" +export type AvailableEmbedders = + | "openai" + | "ollama" + | "openai-compatible" + | "gemini" + | "mistral" + | "vercel-ai-gateway" + | "openrouter" export interface EmbedderInfo { name: AvailableEmbedders diff --git a/src/services/code-index/interfaces/manager.ts b/src/services/code-index/interfaces/manager.ts index 527900f6d1c..9a6e4031ab1 100644 --- a/src/services/code-index/interfaces/manager.ts +++ b/src/services/code-index/interfaces/manager.ts @@ -70,7 +70,14 @@ export interface ICodeIndexManager { } export type IndexingState = "Standby" | "Indexing" | "Indexed" | "Error" -export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" | "mistral" | "vercel-ai-gateway" +export type EmbedderProvider = + | "openai" + | "ollama" + | "openai-compatible" + | "gemini" + | "mistral" + | "vercel-ai-gateway" + | "openrouter" export interface IndexProgressUpdate { systemStatus: IndexingState diff --git a/src/services/code-index/service-factory.ts b/src/services/code-index/service-factory.ts index 6d69e1f0b6c..56ee1cff9f9 100644 --- a/src/services/code-index/service-factory.ts +++ b/src/services/code-index/service-factory.ts @@ -5,6 +5,7 @@ import { OpenAICompatibleEmbedder } from "./embedders/openai-compatible" import { GeminiEmbedder } from "./embedders/gemini" import { MistralEmbedder } from "./embedders/mistral" import { VercelAiGatewayEmbedder } from "./embedders/vercel-ai-gateway" +import { OpenRouterEmbedder } from "./embedders/openrouter" import { EmbedderProvider, getDefaultModelId, getModelDimension } from "../../shared/embeddingModels" import { QdrantVectorStore } from "./vector-store/qdrant-client" import { codeParser, DirectoryScanner, FileWatcher } from "./processors" @@ -79,6 +80,11 @@ export class CodeIndexServiceFactory { throw new Error(t("embeddings:serviceFactory.vercelAiGatewayConfigMissing")) } return new VercelAiGatewayEmbedder(config.vercelAiGatewayOptions.apiKey, config.modelId) + } else if (provider === "openrouter") { + if (!config.openRouterOptions?.apiKey) { + throw new Error(t("embeddings:serviceFactory.openRouterConfigMissing")) + } + return new OpenRouterEmbedder(config.openRouterOptions.apiKey, config.modelId) } throw new Error( diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts index 9c475186288..f10808cd428 100644 --- a/src/shared/WebviewMessage.ts +++ b/src/shared/WebviewMessage.ts @@ -292,6 +292,7 @@ export interface WebviewMessage { | "gemini" | "mistral" | "vercel-ai-gateway" + | "openrouter" codebaseIndexEmbedderBaseUrl?: string codebaseIndexEmbedderModelId: string codebaseIndexEmbedderModelDimension?: number // Generic dimension for all providers @@ -306,6 +307,7 @@ export interface WebviewMessage { codebaseIndexGeminiApiKey?: string codebaseIndexMistralApiKey?: string codebaseIndexVercelAiGatewayApiKey?: string + codebaseIndexOpenRouterApiKey?: string } } diff --git a/src/shared/embeddingModels.ts b/src/shared/embeddingModels.ts index 80c51a6b455..8c2f8fd44c7 100644 --- a/src/shared/embeddingModels.ts +++ b/src/shared/embeddingModels.ts @@ -2,7 +2,14 @@ * Defines profiles for different embedding models, including their dimensions. */ -export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" | "mistral" | "vercel-ai-gateway" // Add other providers as needed +export type EmbedderProvider = + | "openai" + | "ollama" + | "openai-compatible" + | "gemini" + | "mistral" + | "vercel-ai-gateway" + | "openrouter" // Add other providers as needed export interface EmbeddingModelProfile { dimension: number @@ -70,6 +77,19 @@ export const EMBEDDING_MODEL_PROFILES: EmbeddingModelProfiles = { "mistral/codestral-embed": { dimension: 1536, scoreThreshold: 0.4 }, "mistral/mistral-embed": { dimension: 1024, scoreThreshold: 0.4 }, }, + openrouter: { + // OpenAI models via OpenRouter + "openai/text-embedding-3-small": { dimension: 1536, scoreThreshold: 0.4 }, + "openai/text-embedding-3-large": { dimension: 3072, scoreThreshold: 0.4 }, + "openai/text-embedding-ada-002": { dimension: 1536, scoreThreshold: 0.4 }, + // Google models via OpenRouter + "google/gemini-embedding-001": { dimension: 3072, scoreThreshold: 0.4 }, + // Mistral models via OpenRouter + "mistralai/mistral-embed-2312": { dimension: 1024, scoreThreshold: 0.4 }, + "mistralai/codestral-embed-2505": { dimension: 3072, scoreThreshold: 0.4 }, + // Qwen models via OpenRouter + "qwen/qwen3-embedding-8b": { dimension: 4096, scoreThreshold: 0.4 }, + }, } /** @@ -163,6 +183,9 @@ export function getDefaultModelId(provider: EmbedderProvider): string { case "vercel-ai-gateway": return "openai/text-embedding-3-large" + case "openrouter": + return "openai/text-embedding-3-large" + default: // Fallback for unknown providers console.warn(`Unknown provider for default model ID: ${provider}. Falling back to OpenAI default.`) diff --git a/webview-ui/src/components/chat/CodeIndexPopover.tsx b/webview-ui/src/components/chat/CodeIndexPopover.tsx index 45bf4224a12..42fe9498e70 100644 --- a/webview-ui/src/components/chat/CodeIndexPopover.tsx +++ b/webview-ui/src/components/chat/CodeIndexPopover.tsx @@ -73,6 +73,7 @@ interface LocalCodeIndexSettings { codebaseIndexGeminiApiKey?: string codebaseIndexMistralApiKey?: string codebaseIndexVercelAiGatewayApiKey?: string + codebaseIndexOpenRouterApiKey?: string } // Validation schema for codebase index settings @@ -149,6 +150,16 @@ const createValidationSchema = (provider: EmbedderProvider, t: any) => { .min(1, t("settings:codeIndex.validation.modelSelectionRequired")), }) + case "openrouter": + return baseSchema.extend({ + codebaseIndexOpenRouterApiKey: z + .string() + .min(1, t("settings:codeIndex.validation.openRouterApiKeyRequired")), + codebaseIndexEmbedderModelId: z + .string() + .min(1, t("settings:codeIndex.validation.modelSelectionRequired")), + }) + default: return baseSchema } @@ -194,6 +205,7 @@ export const CodeIndexPopover: React.FC = ({ codebaseIndexGeminiApiKey: "", codebaseIndexMistralApiKey: "", codebaseIndexVercelAiGatewayApiKey: "", + codebaseIndexOpenRouterApiKey: "", }) // Initial settings state - stores the settings when popover opens @@ -229,6 +241,7 @@ export const CodeIndexPopover: React.FC = ({ codebaseIndexGeminiApiKey: "", codebaseIndexMistralApiKey: "", codebaseIndexVercelAiGatewayApiKey: "", + codebaseIndexOpenRouterApiKey: "", } setInitialSettings(settings) setCurrentSettings(settings) @@ -345,6 +358,14 @@ export const CodeIndexPopover: React.FC = ({ ? SECRET_PLACEHOLDER : "" } + if ( + !prev.codebaseIndexOpenRouterApiKey || + prev.codebaseIndexOpenRouterApiKey === SECRET_PLACEHOLDER + ) { + updated.codebaseIndexOpenRouterApiKey = secretStatus.hasOpenRouterApiKey + ? SECRET_PLACEHOLDER + : "" + } return updated } @@ -418,7 +439,8 @@ export const CodeIndexPopover: React.FC = ({ key === "codebaseIndexOpenAiCompatibleApiKey" || key === "codebaseIndexGeminiApiKey" || key === "codebaseIndexMistralApiKey" || - key === "codebaseIndexVercelAiGatewayApiKey" + key === "codebaseIndexVercelAiGatewayApiKey" || + key === "codebaseIndexOpenRouterApiKey" ) { dataToValidate[key] = "placeholder-valid" } @@ -669,6 +691,9 @@ export const CodeIndexPopover: React.FC = ({ {t("settings:codeIndex.vercelAiGatewayProvider")} + + {t("settings:codeIndex.openRouterProvider")} + @@ -1131,6 +1156,71 @@ export const CodeIndexPopover: React.FC = ({ )} + {currentSettings.codebaseIndexEmbedderProvider === "openrouter" && ( + <> +
+ + + updateSetting("codebaseIndexOpenRouterApiKey", e.target.value) + } + placeholder={t("settings:codeIndex.openRouterApiKeyPlaceholder")} + className={cn("w-full", { + "border-red-500": formErrors.codebaseIndexOpenRouterApiKey, + })} + /> + {formErrors.codebaseIndexOpenRouterApiKey && ( +

+ {formErrors.codebaseIndexOpenRouterApiKey} +

+ )} +
+ +
+ + + updateSetting("codebaseIndexEmbedderModelId", e.target.value) + } + className={cn("w-full", { + "border-red-500": formErrors.codebaseIndexEmbedderModelId, + })}> + + {t("settings:codeIndex.selectModel")} + + {getAvailableModels().map((modelId) => { + const model = + codebaseIndexModels?.[ + currentSettings.codebaseIndexEmbedderProvider + ]?.[modelId] + return ( + + {modelId}{" "} + {model + ? t("settings:codeIndex.modelDimensions", { + dimension: model.dimension, + }) + : ""} + + ) + })} + + {formErrors.codebaseIndexEmbedderModelId && ( +

+ {formErrors.codebaseIndexEmbedderModelId} +

+ )} +
+ + )} + {/* Qdrant Settings */}