diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 34ac70a6cfd..16e0c17e7ab 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -81,7 +81,6 @@ body: - DeepSeek - Featherless AI - Fireworks AI - - Glama - Google Gemini - Google Vertex AI - Groq diff --git a/packages/types/src/global-settings.ts b/packages/types/src/global-settings.ts index 6e61c3950f5..1e6e621b697 100644 --- a/packages/types/src/global-settings.ts +++ b/packages/types/src/global-settings.ts @@ -210,7 +210,6 @@ export type RooCodeSettings = GlobalSettings & ProviderSettings */ export const SECRET_STATE_KEYS = [ "apiKey", - "glamaApiKey", "openRouterApiKey", "awsAccessKey", "awsApiKey", diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index d713a47d6b4..20a2eceef26 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -48,7 +48,6 @@ export const dynamicProviders = [ "io-intelligence", "requesty", "unbound", - "glama", "roo", "chutes", ] as const @@ -206,11 +205,6 @@ const claudeCodeSchema = apiModelIdProviderModelSchema.extend({ claudeCodeMaxOutputTokens: z.number().int().min(1).max(200000).optional(), }) -const glamaSchema = baseProviderSettingsSchema.extend({ - glamaModelId: z.string().optional(), - glamaApiKey: z.string().optional(), -}) - const openRouterSchema = baseProviderSettingsSchema.extend({ openRouterApiKey: z.string().optional(), openRouterModelId: z.string().optional(), @@ -437,7 +431,6 @@ const defaultSchema = z.object({ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProvider", [ anthropicSchema.merge(z.object({ apiProvider: z.literal("anthropic") })), claudeCodeSchema.merge(z.object({ apiProvider: z.literal("claude-code") })), - glamaSchema.merge(z.object({ apiProvider: z.literal("glama") })), openRouterSchema.merge(z.object({ apiProvider: z.literal("openrouter") })), bedrockSchema.merge(z.object({ apiProvider: z.literal("bedrock") })), vertexSchema.merge(z.object({ apiProvider: z.literal("vertex") })), @@ -480,7 +473,6 @@ export const providerSettingsSchema = z.object({ apiProvider: providerNamesSchema.optional(), ...anthropicSchema.shape, ...claudeCodeSchema.shape, - ...glamaSchema.shape, ...openRouterSchema.shape, ...bedrockSchema.shape, ...vertexSchema.shape, @@ -537,7 +529,6 @@ export const PROVIDER_SETTINGS_KEYS = providerSettingsSchema.keyof().options export const modelIdKeys = [ "apiModelId", - "glamaModelId", "openRouterModelId", "openAiModelId", "ollamaModelId", @@ -571,7 +562,6 @@ export const isTypicalProvider = (key: unknown): key is TypicalProvider => export const modelIdKeysByProvider: Record = { anthropic: "apiModelId", "claude-code": "apiModelId", - glama: "glamaModelId", openrouter: "openRouterModelId", bedrock: "apiModelId", vertex: "apiModelId", @@ -727,7 +717,6 @@ export const MODELS_BY_PROVIDER: Record< baseten: { id: "baseten", label: "Baseten", models: Object.keys(basetenModels) }, // Dynamic providers; models pulled from remote APIs. - glama: { id: "glama", label: "Glama", models: [] }, huggingface: { id: "huggingface", label: "Hugging Face", models: [] }, litellm: { id: "litellm", label: "LiteLLM", models: [] }, openrouter: { id: "openrouter", label: "OpenRouter", models: [] }, diff --git a/packages/types/src/providers/glama.ts b/packages/types/src/providers/glama.ts deleted file mode 100644 index 98aedc831b7..00000000000 --- a/packages/types/src/providers/glama.ts +++ /dev/null @@ -1,19 +0,0 @@ -import type { ModelInfo } from "../model.js" - -// https://glama.ai/models -export const glamaDefaultModelId = "anthropic/claude-3-7-sonnet" - -export const glamaDefaultModelInfo: ModelInfo = { - maxTokens: 8192, - contextWindow: 200_000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 3.0, - outputPrice: 15.0, - cacheWritesPrice: 3.75, - cacheReadsPrice: 0.3, - description: - "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", -} - -export const GLAMA_DEFAULT_TEMPERATURE = 0 diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 5a0e577eab7..ecd56bd41a4 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -9,7 +9,6 @@ export * from "./doubao.js" export * from "./featherless.js" export * from "./fireworks.js" export * from "./gemini.js" -export * from "./glama.js" export * from "./groq.js" export * from "./huggingface.js" export * from "./io-intelligence.js" @@ -44,7 +43,6 @@ import { doubaoDefaultModelId } from "./doubao.js" import { featherlessDefaultModelId } from "./featherless.js" import { fireworksDefaultModelId } from "./fireworks.js" import { geminiDefaultModelId } from "./gemini.js" -import { glamaDefaultModelId } from "./glama.js" import { groqDefaultModelId } from "./groq.js" import { ioIntelligenceDefaultModelId } from "./io-intelligence.js" import { litellmDefaultModelId } from "./lite-llm.js" @@ -81,8 +79,6 @@ export function getProviderDefaultModelId( return openRouterDefaultModelId case "requesty": return requestyDefaultModelId - case "glama": - return glamaDefaultModelId case "unbound": return unboundDefaultModelId case "litellm": diff --git a/src/activate/handleUri.ts b/src/activate/handleUri.ts index 75345e3f314..c29b66e21af 100644 --- a/src/activate/handleUri.ts +++ b/src/activate/handleUri.ts @@ -14,13 +14,6 @@ export const handleUri = async (uri: vscode.Uri) => { } switch (path) { - case "/glama": { - const code = query.get("code") - if (code) { - await visibleProvider.handleGlamaCallback(code) - } - break - } case "/openrouter": { const code = query.get("code") if (code) { diff --git a/src/api/index.ts b/src/api/index.ts index 0eba26c52a6..b1bfc582548 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -6,7 +6,6 @@ import type { ProviderSettings, ModelInfo, ToolProtocol } from "@roo-code/types" import { ApiStream } from "./transform/stream" import { - GlamaHandler, AnthropicHandler, AwsBedrockHandler, CerebrasHandler, @@ -126,8 +125,6 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new AnthropicHandler(options) case "claude-code": return new ClaudeCodeHandler(options) - case "glama": - return new GlamaHandler(options) case "openrouter": return new OpenRouterHandler(options) case "bedrock": diff --git a/src/api/providers/__tests__/glama.spec.ts b/src/api/providers/__tests__/glama.spec.ts deleted file mode 100644 index 9f82cad3ba4..00000000000 --- a/src/api/providers/__tests__/glama.spec.ts +++ /dev/null @@ -1,232 +0,0 @@ -// npx vitest run src/api/providers/__tests__/glama.spec.ts - -import { Anthropic } from "@anthropic-ai/sdk" - -import { GlamaHandler } from "../glama" -import { ApiHandlerOptions } from "../../../shared/api" - -// Mock dependencies -vitest.mock("../fetchers/modelCache", () => ({ - getModels: vitest.fn().mockImplementation(() => { - return Promise.resolve({ - "anthropic/claude-3-7-sonnet": { - maxTokens: 8192, - contextWindow: 200000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 3, - outputPrice: 15, - cacheWritesPrice: 3.75, - cacheReadsPrice: 0.3, - description: "Claude 3.7 Sonnet", - thinking: false, - }, - "openai/gpt-4o": { - maxTokens: 4096, - contextWindow: 128000, - supportsImages: true, - supportsPromptCache: false, - inputPrice: 5, - outputPrice: 15, - description: "GPT-4o", - }, - }) - }), -})) - -// Mock OpenAI client -const mockCreate = vitest.fn() -const mockWithResponse = vitest.fn() - -vitest.mock("openai", () => { - return { - __esModule: true, - default: vitest.fn().mockImplementation(() => ({ - chat: { - completions: { - create: (...args: any[]) => { - const stream = { - [Symbol.asyncIterator]: async function* () { - yield { - choices: [{ delta: { content: "Test response" }, index: 0 }], - usage: null, - } - yield { - choices: [{ delta: {}, index: 0 }], - usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }, - } - }, - } - - const result = mockCreate(...args) - - if (args[0].stream) { - mockWithResponse.mockReturnValue( - Promise.resolve({ - data: stream, - response: { - headers: { - get: (name: string) => - name === "x-completion-request-id" ? "test-request-id" : null, - }, - }, - }), - ) - result.withResponse = mockWithResponse - } - - return result - }, - }, - }, - })), - } -}) - -describe("GlamaHandler", () => { - let handler: GlamaHandler - let mockOptions: ApiHandlerOptions - - beforeEach(() => { - mockOptions = { - glamaApiKey: "test-api-key", - glamaModelId: "anthropic/claude-3-7-sonnet", - } - - handler = new GlamaHandler(mockOptions) - mockCreate.mockClear() - mockWithResponse.mockClear() - - // Default mock implementation for non-streaming responses - mockCreate.mockResolvedValue({ - id: "test-completion", - choices: [ - { - message: { role: "assistant", content: "Test response" }, - finish_reason: "stop", - index: 0, - }, - ], - usage: { - prompt_tokens: 10, - completion_tokens: 5, - total_tokens: 15, - }, - }) - }) - - describe("constructor", () => { - it("should initialize with provided options", () => { - expect(handler).toBeInstanceOf(GlamaHandler) - expect(handler.getModel().id).toBe(mockOptions.glamaModelId) - }) - }) - - describe("createMessage", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: "Hello!", - }, - ] - - it("should handle streaming responses", async () => { - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks.length).toBe(1) - expect(chunks[0]).toEqual({ type: "text", text: "Test response" }) - }) - - it("should handle API errors", async () => { - mockCreate.mockImplementationOnce(() => { - throw new Error("API Error") - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks = [] - - try { - for await (const chunk of stream) { - chunks.push(chunk) - } - expect.fail("Expected error to be thrown") - } catch (error) { - expect(error).toBeInstanceOf(Error) - expect(error.message).toBe("API Error") - } - }) - }) - - describe("completePrompt", () => { - it("should complete prompt successfully", async () => { - const result = await handler.completePrompt("Test prompt") - expect(result).toBe("Test response") - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: mockOptions.glamaModelId, - messages: [{ role: "user", content: "Test prompt" }], - temperature: 0, - max_tokens: 8192, - }), - ) - }) - - it("should handle API errors", async () => { - mockCreate.mockRejectedValueOnce(new Error("API Error")) - await expect(handler.completePrompt("Test prompt")).rejects.toThrow("Glama completion error: API Error") - }) - - it("should handle empty response", async () => { - mockCreate.mockResolvedValueOnce({ - choices: [{ message: { content: "" } }], - }) - const result = await handler.completePrompt("Test prompt") - expect(result).toBe("") - }) - - it("should not set max_tokens for non-Anthropic models", async () => { - // Reset mock to clear any previous calls - mockCreate.mockClear() - - const nonAnthropicOptions = { - glamaApiKey: "test-key", - glamaModelId: "openai/gpt-4o", - } - - const nonAnthropicHandler = new GlamaHandler(nonAnthropicOptions) - - await nonAnthropicHandler.completePrompt("Test prompt") - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: "openai/gpt-4o", - messages: [{ role: "user", content: "Test prompt" }], - temperature: 0, - }), - ) - expect(mockCreate.mock.calls[0][0]).not.toHaveProperty("max_tokens") - }) - }) - - describe("fetchModel", () => { - it("should return model info", async () => { - const modelInfo = await handler.fetchModel() - expect(modelInfo.id).toBe(mockOptions.glamaModelId) - expect(modelInfo.info).toBeDefined() - expect(modelInfo.info.maxTokens).toBe(8192) - expect(modelInfo.info.contextWindow).toBe(200_000) - }) - - it("should return default model when invalid model provided", async () => { - const handlerWithInvalidModel = new GlamaHandler({ ...mockOptions, glamaModelId: "invalid/model" }) - const modelInfo = await handlerWithInvalidModel.fetchModel() - expect(modelInfo.id).toBe("anthropic/claude-3-7-sonnet") - expect(modelInfo.info).toBeDefined() - }) - }) -}) diff --git a/src/api/providers/fetchers/__tests__/modelCache.spec.ts b/src/api/providers/fetchers/__tests__/modelCache.spec.ts index a3df3e1e49c..3c73b2a2725 100644 --- a/src/api/providers/fetchers/__tests__/modelCache.spec.ts +++ b/src/api/providers/fetchers/__tests__/modelCache.spec.ts @@ -41,7 +41,6 @@ vi.mock("fs", () => ({ vi.mock("../litellm") vi.mock("../openrouter") vi.mock("../requesty") -vi.mock("../glama") vi.mock("../unbound") vi.mock("../io-intelligence") @@ -64,14 +63,12 @@ import { getModels, getModelsFromCache } from "../modelCache" import { getLiteLLMModels } from "../litellm" import { getOpenRouterModels } from "../openrouter" import { getRequestyModels } from "../requesty" -import { getGlamaModels } from "../glama" import { getUnboundModels } from "../unbound" import { getIOIntelligenceModels } from "../io-intelligence" const mockGetLiteLLMModels = getLiteLLMModels as Mock const mockGetOpenRouterModels = getOpenRouterModels as Mock const mockGetRequestyModels = getRequestyModels as Mock -const mockGetGlamaModels = getGlamaModels as Mock const mockGetUnboundModels = getUnboundModels as Mock const mockGetIOIntelligenceModels = getIOIntelligenceModels as Mock @@ -139,23 +136,6 @@ describe("getModels with new GetModelsOptions", () => { expect(result).toEqual(mockModels) }) - it("calls getGlamaModels for glama provider", async () => { - const mockModels = { - "glama/model": { - maxTokens: 4096, - contextWindow: 8192, - supportsPromptCache: false, - description: "Glama model", - }, - } - mockGetGlamaModels.mockResolvedValue(mockModels) - - const result = await getModels({ provider: "glama" }) - - expect(mockGetGlamaModels).toHaveBeenCalled() - expect(result).toEqual(mockModels) - }) - it("calls getUnboundModels with optional API key", async () => { const mockModels = { "unbound/model": { @@ -302,7 +282,7 @@ describe("getModelsFromCache disk fallback", () => { const consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {}) - const result = getModelsFromCache("glama") + const result = getModelsFromCache("openrouter") expect(result).toBeUndefined() expect(consoleErrorSpy).toHaveBeenCalled() diff --git a/src/api/providers/fetchers/glama.ts b/src/api/providers/fetchers/glama.ts deleted file mode 100644 index ae36c751fb8..00000000000 --- a/src/api/providers/fetchers/glama.ts +++ /dev/null @@ -1,42 +0,0 @@ -import axios from "axios" - -import type { ModelInfo } from "@roo-code/types" - -import { parseApiPrice } from "../../../shared/cost" - -export async function getGlamaModels(): Promise> { - const models: Record = {} - - try { - const response = await axios.get("https://glama.ai/api/gateway/v1/models") - const rawModels = response.data - - for (const rawModel of rawModels) { - const modelInfo: ModelInfo = { - maxTokens: rawModel.maxTokensOutput, - contextWindow: rawModel.maxTokensInput, - supportsImages: rawModel.capabilities?.includes("input:image"), - supportsPromptCache: rawModel.capabilities?.includes("caching"), - inputPrice: parseApiPrice(rawModel.pricePerToken?.input), - outputPrice: parseApiPrice(rawModel.pricePerToken?.output), - description: undefined, - cacheWritesPrice: parseApiPrice(rawModel.pricePerToken?.cacheWrite), - cacheReadsPrice: parseApiPrice(rawModel.pricePerToken?.cacheRead), - } - - switch (rawModel.id) { - case rawModel.id.startsWith("anthropic/"): - modelInfo.maxTokens = 8192 - break - default: - break - } - - models[rawModel.id] = modelInfo - } - } catch (error) { - console.error(`Error fetching Glama models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`) - } - - return models -} diff --git a/src/api/providers/fetchers/modelCache.ts b/src/api/providers/fetchers/modelCache.ts index 3a69121b2d5..6bf31b64c1f 100644 --- a/src/api/providers/fetchers/modelCache.ts +++ b/src/api/providers/fetchers/modelCache.ts @@ -19,7 +19,6 @@ import { fileExistsAtPath } from "../../../utils/fs" import { getOpenRouterModels } from "./openrouter" import { getVercelAiGatewayModels } from "./vercel-ai-gateway" import { getRequestyModels } from "./requesty" -import { getGlamaModels } from "./glama" import { getUnboundModels } from "./unbound" import { getLiteLLMModels } from "./litellm" import { GetModelsOptions } from "../../../shared/api" @@ -74,9 +73,6 @@ async function fetchModelsFromProvider(options: GetModelsOptions): Promise { // Providers that work without API keys const publicProviders: Array<{ provider: RouterName; options: GetModelsOptions }> = [ { provider: "openrouter", options: { provider: "openrouter" } }, - { provider: "glama", options: { provider: "glama" } }, { provider: "vercel-ai-gateway", options: { provider: "vercel-ai-gateway" } }, { provider: "chutes", options: { provider: "chutes" } }, ] diff --git a/src/api/providers/glama.ts b/src/api/providers/glama.ts deleted file mode 100644 index 774d6157097..00000000000 --- a/src/api/providers/glama.ts +++ /dev/null @@ -1,146 +0,0 @@ -import { Anthropic } from "@anthropic-ai/sdk" -import axios from "axios" -import OpenAI from "openai" - -import { glamaDefaultModelId, glamaDefaultModelInfo, GLAMA_DEFAULT_TEMPERATURE } from "@roo-code/types" - -import { Package } from "../../shared/package" -import { ApiHandlerOptions } from "../../shared/api" - -import { ApiStream } from "../transform/stream" -import { convertToOpenAiMessages } from "../transform/openai-format" -import { addCacheBreakpoints } from "../transform/caching/anthropic" - -import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" -import { RouterProvider } from "./router-provider" - -const DEFAULT_HEADERS = { - "X-Glama-Metadata": JSON.stringify({ - labels: [{ key: "app", value: `vscode.${Package.publisher}.${Package.name}` }], - }), -} - -export class GlamaHandler extends RouterProvider implements SingleCompletionHandler { - constructor(options: ApiHandlerOptions) { - super({ - options, - name: "glama", - baseURL: "https://glama.ai/api/gateway/openai/v1", - apiKey: options.glamaApiKey, - modelId: options.glamaModelId, - defaultModelId: glamaDefaultModelId, - defaultModelInfo: glamaDefaultModelInfo, - }) - } - - override async *createMessage( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - const { id: modelId, info } = await this.fetchModel() - - const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ - { role: "system", content: systemPrompt }, - ...convertToOpenAiMessages(messages), - ] - - if (modelId.startsWith("anthropic/claude-3")) { - addCacheBreakpoints(systemPrompt, openAiMessages) - } - - // Required by Anthropic; other providers default to max tokens allowed. - let maxTokens: number | undefined - - if (modelId.startsWith("anthropic/")) { - maxTokens = info.maxTokens ?? undefined - } - - const requestOptions: OpenAI.Chat.ChatCompletionCreateParams = { - model: modelId, - max_tokens: maxTokens, - messages: openAiMessages, - stream: true, - } - - if (this.supportsTemperature(modelId)) { - requestOptions.temperature = this.options.modelTemperature ?? GLAMA_DEFAULT_TEMPERATURE - } - - const { data: completion, response } = await this.client.chat.completions - .create(requestOptions, { headers: DEFAULT_HEADERS }) - .withResponse() - - const completionRequestId = response.headers.get("x-completion-request-id") - - for await (const chunk of completion) { - const delta = chunk.choices[0]?.delta - - if (delta?.content) { - yield { type: "text", text: delta.content } - } - } - - try { - let attempt = 0 - - const delay = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)) - - while (attempt++ < 10) { - // In case of an interrupted request, we need to wait for the upstream API to finish processing the request - // before we can fetch information about the token usage and cost. - const response = await axios.get( - `https://glama.ai/api/gateway/v1/completion-requests/${completionRequestId}`, - { headers: { Authorization: `Bearer ${this.options.glamaApiKey}` } }, - ) - - const completionRequest = response.data - - if (completionRequest.tokenUsage && completionRequest.totalCostUsd) { - yield { - type: "usage", - cacheWriteTokens: completionRequest.tokenUsage.cacheCreationInputTokens, - cacheReadTokens: completionRequest.tokenUsage.cacheReadInputTokens, - inputTokens: completionRequest.tokenUsage.promptTokens, - outputTokens: completionRequest.tokenUsage.completionTokens, - totalCost: parseFloat(completionRequest.totalCostUsd), - } - - break - } - - await delay(200) - } - } catch (error) { - console.error("Error fetching Glama completion details", error) - } - } - - async completePrompt(prompt: string): Promise { - const { id: modelId, info } = await this.fetchModel() - - try { - const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { - model: modelId, - messages: [{ role: "user", content: prompt }], - } - - if (this.supportsTemperature(modelId)) { - requestOptions.temperature = this.options.modelTemperature ?? GLAMA_DEFAULT_TEMPERATURE - } - - if (modelId.startsWith("anthropic/")) { - requestOptions.max_tokens = info.maxTokens - } - - const response = await this.client.chat.completions.create(requestOptions) - return response.choices[0]?.message.content || "" - } catch (error) { - if (error instanceof Error) { - throw new Error(`Glama completion error: ${error.message}`) - } - - throw error - } - } -} diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index 0df764967fe..23ef40ee133 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -9,7 +9,6 @@ export { DoubaoHandler } from "./doubao" export { MoonshotHandler } from "./moonshot" export { FakeAIHandler } from "./fake-ai" export { GeminiHandler } from "./gemini" -export { GlamaHandler } from "./glama" export { GroqHandler } from "./groq" export { HuggingFaceHandler } from "./huggingface" export { HumanRelayHandler } from "./human-relay" diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 77e2dbc2ebe..835d6cfd6f1 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -36,7 +36,6 @@ import { RooCodeEventName, requestyDefaultModelId, openRouterDefaultModelId, - glamaDefaultModelId, DEFAULT_TERMINAL_OUTPUT_CHARACTER_LIMIT, DEFAULT_WRITE_DELAY_MS, ORGANIZATION_ALLOW_ALL, @@ -1525,39 +1524,6 @@ export class ClineProvider await this.upsertProviderProfile(currentApiConfigName, newConfiguration) } - // Glama - - async handleGlamaCallback(code: string) { - let apiKey: string - - try { - const response = await axios.post("https://glama.ai/api/gateway/v1/auth/exchange-code", { code }) - - if (response.data && response.data.apiKey) { - apiKey = response.data.apiKey - } else { - throw new Error("Invalid response from Glama API") - } - } catch (error) { - this.log( - `Error exchanging code for API key: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`, - ) - - throw error - } - - const { apiConfiguration, currentApiConfigName = "default" } = await this.getState() - - const newConfiguration: ProviderSettings = { - ...apiConfiguration, - apiProvider: "glama", - glamaApiKey: apiKey, - glamaModelId: apiConfiguration?.glamaModelId || glamaDefaultModelId, - } - - await this.upsertProviderProfile(currentApiConfigName, newConfiguration) - } - // Requesty async handleRequestyCallback(code: string, baseUrl: string | null) { diff --git a/src/core/webview/__tests__/ClineProvider.apiHandlerRebuild.spec.ts b/src/core/webview/__tests__/ClineProvider.apiHandlerRebuild.spec.ts index 3671019f5db..04f5d577929 100644 --- a/src/core/webview/__tests__/ClineProvider.apiHandlerRebuild.spec.ts +++ b/src/core/webview/__tests__/ClineProvider.apiHandlerRebuild.spec.ts @@ -567,7 +567,6 @@ describe("ClineProvider - API Handler Rebuild Guard", () => { "claude-3-5-sonnet-20241022", ) expect(getModelId({ apiProvider: "openai", openAiModelId: "gpt-4-turbo" })).toBe("gpt-4-turbo") - expect(getModelId({ apiProvider: "glama", glamaModelId: "some-model" })).toBe("some-model") expect(getModelId({ apiProvider: "bedrock", apiModelId: "anthropic.claude-v2" })).toBe( "anthropic.claude-v2", ) diff --git a/src/core/webview/__tests__/ClineProvider.spec.ts b/src/core/webview/__tests__/ClineProvider.spec.ts index c3a515b9de2..09b333c37c6 100644 --- a/src/core/webview/__tests__/ClineProvider.spec.ts +++ b/src/core/webview/__tests__/ClineProvider.spec.ts @@ -2645,7 +2645,6 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - glamaApiKey: "glama-key", unboundApiKey: "unbound-key", litellmApiKey: "litellm-key", litellmBaseUrl: "http://localhost:4000", @@ -2675,7 +2674,6 @@ describe("ClineProvider - Router Models", () => { // Verify getModels was called for each provider with correct options expect(getModels).toHaveBeenCalledWith({ provider: "openrouter" }) expect(getModels).toHaveBeenCalledWith({ provider: "requesty", apiKey: "requesty-key" }) - expect(getModels).toHaveBeenCalledWith({ provider: "glama" }) expect(getModels).toHaveBeenCalledWith({ provider: "unbound", apiKey: "unbound-key" }) expect(getModels).toHaveBeenCalledWith({ provider: "vercel-ai-gateway" }) expect(getModels).toHaveBeenCalledWith({ provider: "deepinfra" }) @@ -2699,7 +2697,6 @@ describe("ClineProvider - Router Models", () => { deepinfra: mockModels, openrouter: mockModels, requesty: mockModels, - glama: mockModels, unbound: mockModels, roo: mockModels, chutes: mockModels, @@ -2722,7 +2719,6 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - glamaApiKey: "glama-key", unboundApiKey: "unbound-key", litellmApiKey: "litellm-key", litellmBaseUrl: "http://localhost:4000", @@ -2738,7 +2734,6 @@ describe("ClineProvider - Router Models", () => { vi.mocked(getModels) .mockResolvedValueOnce(mockModels) // openrouter success .mockRejectedValueOnce(new Error("Requesty API error")) // requesty fail - .mockResolvedValueOnce(mockModels) // glama success .mockRejectedValueOnce(new Error("Unbound API error")) // unbound fail .mockResolvedValueOnce(mockModels) // vercel-ai-gateway success .mockResolvedValueOnce(mockModels) // deepinfra success @@ -2755,7 +2750,6 @@ describe("ClineProvider - Router Models", () => { deepinfra: mockModels, openrouter: mockModels, requesty: {}, - glama: mockModels, unbound: {}, roo: mockModels, chutes: {}, @@ -2815,7 +2809,6 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - glamaApiKey: "glama-key", unboundApiKey: "unbound-key", // No litellm config }, @@ -2851,7 +2844,6 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - glamaApiKey: "glama-key", unboundApiKey: "unbound-key", // No litellm config }, @@ -2879,7 +2871,6 @@ describe("ClineProvider - Router Models", () => { deepinfra: mockModels, openrouter: mockModels, requesty: mockModels, - glama: mockModels, unbound: mockModels, roo: mockModels, chutes: mockModels, diff --git a/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts index 12ffb025883..7a69adbdde6 100644 --- a/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts +++ b/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts @@ -76,8 +76,6 @@ describe("webviewMessageHandler - requestRouterModels provider filter", () => { return { "requesty/model": { contextWindow: 8192, supportsPromptCache: false } } case "deepinfra": return { "deepinfra/model": { contextWindow: 8192, supportsPromptCache: false } } - case "glama": - return { "glama/model": { contextWindow: 8192, supportsPromptCache: false } } case "unbound": return { "unbound/model": { contextWindow: 8192, supportsPromptCache: false } } case "vercel-ai-gateway": diff --git a/src/core/webview/__tests__/webviewMessageHandler.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.spec.ts index c436f160147..f724558ae6a 100644 --- a/src/core/webview/__tests__/webviewMessageHandler.spec.ts +++ b/src/core/webview/__tests__/webviewMessageHandler.spec.ts @@ -187,7 +187,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - glamaApiKey: "glama-key", unboundApiKey: "unbound-key", litellmApiKey: "litellm-key", litellmBaseUrl: "http://localhost:4000", @@ -220,7 +219,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { // Verify getModels was called for each provider expect(mockGetModels).toHaveBeenCalledWith({ provider: "openrouter" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "requesty", apiKey: "requesty-key" }) - expect(mockGetModels).toHaveBeenCalledWith({ provider: "glama" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "unbound", apiKey: "unbound-key" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "vercel-ai-gateway" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "deepinfra" }) @@ -245,7 +243,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { deepinfra: mockModels, openrouter: mockModels, requesty: mockModels, - glama: mockModels, unbound: mockModels, litellm: mockModels, roo: mockModels, @@ -265,7 +262,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - glamaApiKey: "glama-key", unboundApiKey: "unbound-key", // Missing litellm config }, @@ -303,7 +299,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - glamaApiKey: "glama-key", unboundApiKey: "unbound-key", // Missing litellm config }, @@ -339,7 +334,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { deepinfra: mockModels, openrouter: mockModels, requesty: mockModels, - glama: mockModels, unbound: mockModels, roo: mockModels, chutes: mockModels, @@ -368,7 +362,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { mockGetModels .mockResolvedValueOnce(mockModels) // openrouter .mockRejectedValueOnce(new Error("Requesty API error")) // requesty - .mockResolvedValueOnce(mockModels) // glama .mockRejectedValueOnce(new Error("Unbound API error")) // unbound .mockResolvedValueOnce(mockModels) // vercel-ai-gateway .mockResolvedValueOnce(mockModels) // deepinfra @@ -416,7 +409,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { deepinfra: mockModels, openrouter: mockModels, requesty: {}, - glama: mockModels, unbound: {}, roo: mockModels, chutes: {}, @@ -436,7 +428,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { mockGetModels .mockRejectedValueOnce(new Error("Structured error message")) // openrouter .mockRejectedValueOnce(new Error("Requesty API error")) // requesty - .mockRejectedValueOnce(new Error("Glama API error")) // glama .mockRejectedValueOnce(new Error("Unbound API error")) // unbound .mockRejectedValueOnce(new Error("Vercel AI Gateway error")) // vercel-ai-gateway .mockRejectedValueOnce(new Error("DeepInfra API error")) // deepinfra @@ -463,13 +454,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { values: { provider: "requesty" }, }) - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ - type: "singleRouterModelFetchResponse", - success: false, - error: "Glama API error", - values: { provider: "glama" }, - }) - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index c1c8e6aa204..e8aa9d99b21 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -874,7 +874,6 @@ export const webviewMessageHandler = async ( "io-intelligence": {}, requesty: {}, unbound: {}, - glama: {}, ollama: {}, lmstudio: {}, roo: {}, @@ -905,7 +904,6 @@ export const webviewMessageHandler = async ( baseUrl: apiConfiguration.requestyBaseUrl, }, }, - { key: "glama", options: { provider: "glama" } }, { key: "unbound", options: { provider: "unbound", apiKey: apiConfiguration.unboundApiKey } }, { key: "vercel-ai-gateway", options: { provider: "vercel-ai-gateway" } }, { diff --git a/src/shared/ProfileValidator.ts b/src/shared/ProfileValidator.ts index 78ff6ed9fe1..c8a2c243c00 100644 --- a/src/shared/ProfileValidator.ts +++ b/src/shared/ProfileValidator.ts @@ -82,8 +82,6 @@ export class ProfileValidator { return profile.vsCodeLmModelSelector?.id case "openrouter": return profile.openRouterModelId - case "glama": - return profile.glamaModelId case "ollama": return profile.ollamaModelId case "requesty": diff --git a/src/shared/__tests__/ProfileValidator.spec.ts b/src/shared/__tests__/ProfileValidator.spec.ts index 5cfe7a720bf..d604cd95234 100644 --- a/src/shared/__tests__/ProfileValidator.spec.ts +++ b/src/shared/__tests__/ProfileValidator.spec.ts @@ -306,21 +306,6 @@ describe("ProfileValidator", () => { expect(ProfileValidator.isProfileAllowed(profile, allowList)).toBe(true) }) - it("should extract glamaModelId for glama provider", () => { - const allowList: OrganizationAllowList = { - allowAll: false, - providers: { - glama: { allowAll: false, models: ["glama-model"] }, - }, - } - const profile: ProviderSettings = { - apiProvider: "glama", - glamaModelId: "glama-model", - } - - expect(ProfileValidator.isProfileAllowed(profile, allowList)).toBe(true) - }) - it("should extract requestyModelId for requesty provider", () => { const allowList: OrganizationAllowList = { allowAll: false, diff --git a/src/shared/__tests__/checkExistApiConfig.spec.ts b/src/shared/__tests__/checkExistApiConfig.spec.ts index 7696f00cc0c..58ea3bccbbd 100644 --- a/src/shared/__tests__/checkExistApiConfig.spec.ts +++ b/src/shared/__tests__/checkExistApiConfig.spec.ts @@ -24,7 +24,6 @@ describe("checkExistKey", () => { it("should return true when multiple keys are defined", () => { const config: ProviderSettings = { apiKey: "test-key", - glamaApiKey: "glama-key", openRouterApiKey: "openrouter-key", } expect(checkExistKey(config)).toBe(true) @@ -43,7 +42,6 @@ describe("checkExistKey", () => { it("should return false when all key fields are undefined", () => { const config: ProviderSettings = { apiKey: undefined, - glamaApiKey: undefined, openRouterApiKey: undefined, awsRegion: undefined, vertexProjectId: undefined, diff --git a/src/shared/api.ts b/src/shared/api.ts index 4f4c8a4ae9a..ffb42a8ca44 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -189,7 +189,6 @@ const dynamicProviderExtras = { "io-intelligence": {} as { apiKey: string }, requesty: {} as { apiKey?: string; baseUrl?: string }, unbound: {} as { apiKey?: string }, - glama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type ollama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type lmstudio: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type roo: {} as { apiKey?: string; baseUrl?: string }, diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index e60c32e6c54..653c9181f1d 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -10,7 +10,6 @@ import { DEFAULT_CONSECUTIVE_MISTAKE_LIMIT, openRouterDefaultModelId, requestyDefaultModelId, - glamaDefaultModelId, unboundDefaultModelId, litellmDefaultModelId, openAiNativeDefaultModelId, @@ -76,7 +75,6 @@ import { DeepSeek, Doubao, Gemini, - Glama, Groq, HuggingFace, IOIntelligence, @@ -307,7 +305,7 @@ const ApiOptions = ({ // It would be much easier to have a single attribute that stores // the modelId, but we have a separate attribute for each of - // OpenRouter, Glama, Unbound, and Requesty. + // OpenRouter, Unbound, and Requesty. // If you switch to one of these providers and the corresponding // modelId is not set then you immediately end up in an error state. // To address that we set the modelId to the default value for th @@ -341,7 +339,6 @@ const ApiOptions = ({ > = { deepinfra: { field: "deepInfraModelId", default: deepInfraDefaultModelId }, openrouter: { field: "openRouterModelId", default: openRouterDefaultModelId }, - glama: { field: "glamaModelId", default: glamaDefaultModelId }, unbound: { field: "unboundModelId", default: unboundDefaultModelId }, requesty: { field: "requestyModelId", default: requestyDefaultModelId }, litellm: { field: "litellmModelId", default: litellmDefaultModelId }, @@ -534,18 +531,6 @@ const ApiOptions = ({ /> )} - {selectedProvider === "glama" && ( - - )} - {selectedProvider === "unbound" && ( { expect(providerValues).toContain("ollama") expect(providerValues).toContain("lmstudio") expect(providerValues).toContain("litellm") - expect(providerValues).toContain("glama") expect(providerValues).toContain("unbound") expect(providerValues).toContain("requesty") expect(providerValues).toContain("io-intelligence") diff --git a/webview-ui/src/components/settings/__tests__/ModelPicker.spec.tsx b/webview-ui/src/components/settings/__tests__/ModelPicker.spec.tsx index 05cb871a960..081a0a94b99 100644 --- a/webview-ui/src/components/settings/__tests__/ModelPicker.spec.tsx +++ b/webview-ui/src/components/settings/__tests__/ModelPicker.spec.tsx @@ -36,7 +36,7 @@ describe("ModelPicker", () => { const defaultProps = { apiConfiguration: {}, defaultModelId: "model1", - modelIdKey: "glamaModelId" as const, + modelIdKey: "openRouterModelId" as const, serviceName: "Test Service", serviceUrl: "https://test.service", recommendedModel: "recommended-model", diff --git a/webview-ui/src/components/settings/constants.ts b/webview-ui/src/components/settings/constants.ts index e5c13e136a0..bab4f440ede 100644 --- a/webview-ui/src/components/settings/constants.ts +++ b/webview-ui/src/components/settings/constants.ts @@ -61,7 +61,6 @@ export const PROVIDERS = [ { value: "qwen-code", label: "Qwen Code" }, { value: "vertex", label: "GCP Vertex AI" }, { value: "bedrock", label: "Amazon Bedrock" }, - { value: "glama", label: "Glama" }, { value: "vscode-lm", label: "VS Code LM API" }, { value: "mistral", label: "Mistral" }, { value: "lmstudio", label: "LM Studio" }, diff --git a/webview-ui/src/components/settings/providers/Glama.tsx b/webview-ui/src/components/settings/providers/Glama.tsx deleted file mode 100644 index 36b13a4a6d6..00000000000 --- a/webview-ui/src/components/settings/providers/Glama.tsx +++ /dev/null @@ -1,79 +0,0 @@ -import { useCallback } from "react" -import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" - -import { type ProviderSettings, type OrganizationAllowList, glamaDefaultModelId } from "@roo-code/types" - -import type { RouterModels } from "@roo/api" - -import { useAppTranslation } from "@src/i18n/TranslationContext" -import { getGlamaAuthUrl } from "@src/oauth/urls" -import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" - -import { inputEventTransform } from "../transforms" -import { ModelPicker } from "../ModelPicker" - -type GlamaProps = { - apiConfiguration: ProviderSettings - setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void - routerModels?: RouterModels - uriScheme?: string - organizationAllowList: OrganizationAllowList - modelValidationError?: string - simplifySettings?: boolean -} - -export const Glama = ({ - apiConfiguration, - setApiConfigurationField, - routerModels, - uriScheme, - organizationAllowList, - modelValidationError, - simplifySettings, -}: GlamaProps) => { - const { t } = useAppTranslation() - - const handleInputChange = useCallback( - ( - field: K, - transform: (event: E) => ProviderSettings[K] = inputEventTransform, - ) => - (event: E | Event) => { - setApiConfigurationField(field, transform(event as E)) - }, - [setApiConfigurationField], - ) - - return ( - <> - - - -
- {t("settings:providers.apiKeyStorageNotice")} -
- {!apiConfiguration?.glamaApiKey && ( - - {t("settings:providers.getGlamaApiKey")} - - )} - - - ) -} diff --git a/webview-ui/src/components/settings/providers/index.ts b/webview-ui/src/components/settings/providers/index.ts index fbd9bc4eb24..0dd722a5220 100644 --- a/webview-ui/src/components/settings/providers/index.ts +++ b/webview-ui/src/components/settings/providers/index.ts @@ -6,7 +6,6 @@ export { ClaudeCode } from "./ClaudeCode" export { DeepSeek } from "./DeepSeek" export { Doubao } from "./Doubao" export { Gemini } from "./Gemini" -export { Glama } from "./Glama" export { Groq } from "./Groq" export { HuggingFace } from "./HuggingFace" export { IOIntelligence } from "./IOIntelligence" diff --git a/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts b/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts index 945b8aa63b2..bfbfdc424e4 100644 --- a/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts +++ b/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts @@ -55,7 +55,6 @@ describe("useSelectedModel", () => { "test-model": baseModelInfo, }, requesty: {}, - glama: {}, unbound: {}, litellm: {}, "io-intelligence": {}, @@ -119,7 +118,6 @@ describe("useSelectedModel", () => { }, }, requesty: {}, - glama: {}, unbound: {}, litellm: {}, "io-intelligence": {}, @@ -187,7 +185,6 @@ describe("useSelectedModel", () => { "test-model": baseModelInfo, }, requesty: {}, - glama: {}, unbound: {}, litellm: {}, "io-intelligence": {}, @@ -242,7 +239,6 @@ describe("useSelectedModel", () => { data: { openrouter: { "test-model": baseModelInfo }, requesty: {}, - glama: {}, unbound: {}, litellm: {}, "io-intelligence": {}, @@ -286,7 +282,6 @@ describe("useSelectedModel", () => { }, }, requesty: {}, - glama: {}, unbound: {}, litellm: {}, "io-intelligence": {}, @@ -349,7 +344,7 @@ describe("useSelectedModel", () => { it("should NOT set loading when openrouter provider metadata is loading but provider is static (anthropic)", () => { mockUseRouterModels.mockReturnValue({ - data: { openrouter: {}, requesty: {}, glama: {}, unbound: {}, litellm: {}, "io-intelligence": {} }, + data: { openrouter: {}, requesty: {}, unbound: {}, litellm: {}, "io-intelligence": {} }, isLoading: false, isError: false, } as any) @@ -417,7 +412,6 @@ describe("useSelectedModel", () => { data: { openrouter: {}, requesty: {}, - glama: {}, unbound: {}, litellm: {}, "io-intelligence": {}, @@ -455,7 +449,6 @@ describe("useSelectedModel", () => { data: { openrouter: {}, requesty: {}, - glama: {}, unbound: {}, litellm: {}, "io-intelligence": {}, @@ -490,7 +483,6 @@ describe("useSelectedModel", () => { data: { openrouter: {}, requesty: {}, - glama: {}, unbound: {}, litellm: {}, "io-intelligence": {}, diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index 3df7236713c..79804165dc4 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -157,11 +157,6 @@ function getSelectedModel({ const info = routerModels.requesty?.[id] return { id, info } } - case "glama": { - const id = getValidatedModelId(apiConfiguration.glamaModelId, routerModels.glama, defaultModelId) - const info = routerModels.glama?.[id] - return { id, info } - } case "unbound": { const id = getValidatedModelId(apiConfiguration.unboundModelId, routerModels.unbound, defaultModelId) const info = routerModels.unbound?.[id] diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 1d2124b6fba..27aab976f00 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Clau API de Vercel AI Gateway", "getVercelAiGatewayApiKey": "Obtenir clau API de Vercel AI Gateway", "apiKeyStorageNotice": "Les claus API s'emmagatzemen de forma segura a l'Emmagatzematge Secret de VSCode", - "glamaApiKey": "Clau API de Glama", - "getGlamaApiKey": "Obtenir clau API de Glama", "useCustomBaseUrl": "Utilitzar URL base personalitzada", "useReasoning": "Activar raonament", "useHostHeader": "Utilitzar capçalera Host personalitzada", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 53799baca6a..a56d929995c 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -285,8 +285,6 @@ "doubaoApiKey": "Doubao API-Schlüssel", "getDoubaoApiKey": "Doubao API-Schlüssel erhalten", "apiKeyStorageNotice": "API-Schlüssel werden sicher im VSCode Secret Storage gespeichert", - "glamaApiKey": "Glama API-Schlüssel", - "getGlamaApiKey": "Glama API-Schlüssel erhalten", "useCustomBaseUrl": "Benutzerdefinierte Basis-URL verwenden", "useReasoning": "Reasoning aktivieren", "useHostHeader": "Benutzerdefinierten Host-Header verwenden", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 6dd07ca4110..ce416075aa0 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -288,8 +288,6 @@ "vercelAiGatewayApiKey": "Vercel AI Gateway API Key", "getVercelAiGatewayApiKey": "Get Vercel AI Gateway API Key", "apiKeyStorageNotice": "API keys are stored securely in VSCode's Secret Storage", - "glamaApiKey": "Glama API Key", - "getGlamaApiKey": "Get Glama API Key", "useCustomBaseUrl": "Use custom base URL", "useReasoning": "Enable reasoning", "useHostHeader": "Use custom Host header", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 25a7ee2128f..2c24e9b37c1 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Clave API de Vercel AI Gateway", "getVercelAiGatewayApiKey": "Obtener clave API de Vercel AI Gateway", "apiKeyStorageNotice": "Las claves API se almacenan de forma segura en el Almacenamiento Secreto de VSCode", - "glamaApiKey": "Clave API de Glama", - "getGlamaApiKey": "Obtener clave API de Glama", "useCustomBaseUrl": "Usar URL base personalizada", "useReasoning": "Habilitar razonamiento", "useHostHeader": "Usar encabezado Host personalizado", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index f53715ec1e4..538540dd202 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Clé API Vercel AI Gateway", "getVercelAiGatewayApiKey": "Obtenir la clé API Vercel AI Gateway", "apiKeyStorageNotice": "Les clés API sont stockées en toute sécurité dans le stockage sécurisé de VSCode", - "glamaApiKey": "Clé API Glama", - "getGlamaApiKey": "Obtenir la clé API Glama", "useCustomBaseUrl": "Utiliser une URL de base personnalisée", "useReasoning": "Activer le raisonnement", "useHostHeader": "Utiliser un en-tête Host personnalisé", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 3d6ab6969c4..82159f90048 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Vercel AI Gateway API कुंजी", "getVercelAiGatewayApiKey": "Vercel AI Gateway API कुंजी प्राप्त करें", "apiKeyStorageNotice": "API कुंजियाँ VSCode के सुरक्षित स्टोरेज में सुरक्षित रूप से संग्रहीत हैं", - "glamaApiKey": "Glama API कुंजी", - "getGlamaApiKey": "Glama API कुंजी प्राप्त करें", "useCustomBaseUrl": "कस्टम बेस URL का उपयोग करें", "useReasoning": "तर्क सक्षम करें", "useHostHeader": "कस्टम होस्ट हेडर का उपयोग करें", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index c519f0dccb9..a0d62a06941 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -287,8 +287,6 @@ "vercelAiGatewayApiKey": "Vercel AI Gateway API Key", "getVercelAiGatewayApiKey": "Dapatkan Vercel AI Gateway API Key", "apiKeyStorageNotice": "API key disimpan dengan aman di Secret Storage VSCode", - "glamaApiKey": "Glama API Key", - "getGlamaApiKey": "Dapatkan Glama API Key", "useCustomBaseUrl": "Gunakan base URL kustom", "useReasoning": "Aktifkan reasoning", "useHostHeader": "Gunakan Host header kustom", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 568cf19d47c..1b8a59e7c9c 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Chiave API Vercel AI Gateway", "getVercelAiGatewayApiKey": "Ottieni chiave API Vercel AI Gateway", "apiKeyStorageNotice": "Le chiavi API sono memorizzate in modo sicuro nell'Archivio Segreto di VSCode", - "glamaApiKey": "Chiave API Glama", - "getGlamaApiKey": "Ottieni chiave API Glama", "useCustomBaseUrl": "Usa URL base personalizzato", "useReasoning": "Abilita ragionamento", "useHostHeader": "Usa intestazione Host personalizzata", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 00b45386301..dc0017621fb 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Vercel AI Gateway APIキー", "getVercelAiGatewayApiKey": "Vercel AI Gateway APIキーを取得", "apiKeyStorageNotice": "APIキーはVSCodeのシークレットストレージに安全に保存されます", - "glamaApiKey": "Glama APIキー", - "getGlamaApiKey": "Glama APIキーを取得", "useCustomBaseUrl": "カスタムベースURLを使用", "useReasoning": "推論を有効化", "useHostHeader": "カスタムHostヘッダーを使用", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index fcbda4acebe..b03516bcd95 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Vercel AI Gateway API 키", "getVercelAiGatewayApiKey": "Vercel AI Gateway API 키 받기", "apiKeyStorageNotice": "API 키는 VSCode의 보안 저장소에 안전하게 저장됩니다", - "glamaApiKey": "Glama API 키", - "getGlamaApiKey": "Glama API 키 받기", "useCustomBaseUrl": "사용자 정의 기본 URL 사용", "useReasoning": "추론 활성화", "useHostHeader": "사용자 정의 Host 헤더 사용", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 404e35e0da1..19dd2075161 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Vercel AI Gateway API-sleutel", "getVercelAiGatewayApiKey": "Vercel AI Gateway API-sleutel ophalen", "apiKeyStorageNotice": "API-sleutels worden veilig opgeslagen in de geheime opslag van VSCode", - "glamaApiKey": "Glama API-sleutel", - "getGlamaApiKey": "Glama API-sleutel ophalen", "useCustomBaseUrl": "Aangepaste basis-URL gebruiken", "useReasoning": "Redenering inschakelen", "useHostHeader": "Aangepaste Host-header gebruiken", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 08225847b92..0df28c49b78 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Klucz API Vercel AI Gateway", "getVercelAiGatewayApiKey": "Uzyskaj klucz API Vercel AI Gateway", "apiKeyStorageNotice": "Klucze API są bezpiecznie przechowywane w Tajnym Magazynie VSCode", - "glamaApiKey": "Klucz API Glama", - "getGlamaApiKey": "Uzyskaj klucz API Glama", "useCustomBaseUrl": "Użyj niestandardowego URL bazowego", "useReasoning": "Włącz rozumowanie", "useHostHeader": "Użyj niestandardowego nagłówka Host", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index a1f4155a879..365fdf252d5 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Chave API do Vercel AI Gateway", "getVercelAiGatewayApiKey": "Obter chave API do Vercel AI Gateway", "apiKeyStorageNotice": "As chaves de API são armazenadas com segurança no Armazenamento Secreto do VSCode", - "glamaApiKey": "Chave de API Glama", - "getGlamaApiKey": "Obter chave de API Glama", "useCustomBaseUrl": "Usar URL base personalizado", "useReasoning": "Habilitar raciocínio", "useHostHeader": "Usar cabeçalho Host personalizado", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index e71ddfd936b..67175ed71f1 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Ключ API Vercel AI Gateway", "getVercelAiGatewayApiKey": "Получить ключ API Vercel AI Gateway", "apiKeyStorageNotice": "API-ключи хранятся безопасно в Secret Storage VSCode", - "glamaApiKey": "Glama API-ключ", - "getGlamaApiKey": "Получить Glama API-ключ", "useCustomBaseUrl": "Использовать пользовательский базовый URL", "useReasoning": "Включить рассуждения", "useHostHeader": "Использовать пользовательский Host-заголовок", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 0e17e1a6cc9..6e8936de45d 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Vercel AI Gateway API Anahtarı", "getVercelAiGatewayApiKey": "Vercel AI Gateway API Anahtarı Al", "apiKeyStorageNotice": "API anahtarları VSCode'un Gizli Depolamasında güvenli bir şekilde saklanır", - "glamaApiKey": "Glama API Anahtarı", - "getGlamaApiKey": "Glama API Anahtarı Al", "useCustomBaseUrl": "Özel temel URL kullan", "useReasoning": "Akıl yürütmeyi etkinleştir", "useHostHeader": "Özel Host başlığı kullan", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index f164db9ac90..bea163e0f61 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Khóa API Vercel AI Gateway", "getVercelAiGatewayApiKey": "Lấy khóa API Vercel AI Gateway", "apiKeyStorageNotice": "Khóa API được lưu trữ an toàn trong Bộ lưu trữ bí mật của VSCode", - "glamaApiKey": "Khóa API Glama", - "getGlamaApiKey": "Lấy khóa API Glama", "useCustomBaseUrl": "Sử dụng URL cơ sở tùy chỉnh", "useReasoning": "Bật lý luận", "useHostHeader": "Sử dụng tiêu đề Host tùy chỉnh", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 2c9327c94db..d3d8ed0a7aa 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Vercel AI Gateway API 密钥", "getVercelAiGatewayApiKey": "获取 Vercel AI Gateway API 密钥", "apiKeyStorageNotice": "API 密钥安全存储在 VSCode 的密钥存储中", - "glamaApiKey": "Glama API 密钥", - "getGlamaApiKey": "获取 Glama API 密钥", "useCustomBaseUrl": "使用自定义基础 URL", "useReasoning": "启用推理", "useHostHeader": "使用自定义 Host 标头", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 67957e87afb..76ee5b44d92 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -283,8 +283,6 @@ "vercelAiGatewayApiKey": "Vercel AI Gateway API 金鑰", "getVercelAiGatewayApiKey": "取得 Vercel AI Gateway API 金鑰", "apiKeyStorageNotice": "API 金鑰安全儲存於 VSCode 金鑰儲存中", - "glamaApiKey": "Glama API 金鑰", - "getGlamaApiKey": "取得 Glama API 金鑰", "useCustomBaseUrl": "使用自訂基礎 URL", "useReasoning": "啟用推理", "useHostHeader": "使用自訂 Host 標頭", diff --git a/webview-ui/src/oauth/urls.ts b/webview-ui/src/oauth/urls.ts index 8abf0ca4340..38acb839c43 100644 --- a/webview-ui/src/oauth/urls.ts +++ b/webview-ui/src/oauth/urls.ts @@ -4,10 +4,6 @@ export function getCallbackUrl(provider: string, uriScheme?: string) { return encodeURIComponent(`${uriScheme || "vscode"}://${Package.publisher}.${Package.name}/${provider}`) } -export function getGlamaAuthUrl(uriScheme?: string) { - return `https://glama.ai/oauth/authorize?callback_url=${getCallbackUrl("glama", uriScheme)}` -} - export function getOpenRouterAuthUrl(uriScheme?: string) { return `https://openrouter.ai/auth?callback_url=${getCallbackUrl("openrouter", uriScheme)}` } diff --git a/webview-ui/src/utils/__tests__/validate.test.ts b/webview-ui/src/utils/__tests__/validate.test.ts index 0bd7a15962b..46de15ea916 100644 --- a/webview-ui/src/utils/__tests__/validate.test.ts +++ b/webview-ui/src/utils/__tests__/validate.test.ts @@ -24,16 +24,6 @@ describe("Model Validation Functions", () => { outputPrice: 5.0, }, }, - glama: { - "valid-model": { - maxTokens: 8192, - contextWindow: 200000, - supportsImages: true, - supportsPromptCache: false, - inputPrice: 3.0, - outputPrice: 15.0, - }, - }, requesty: {}, unbound: {}, litellm: {}, @@ -93,26 +83,6 @@ describe("Model Validation Functions", () => { expect(result).toContain("model") }) - it("returns undefined for valid Glama model", () => { - const config: ProviderSettings = { - apiProvider: "glama", - glamaModelId: "valid-model", - } - - const result = getModelValidationError(config, mockRouterModels, allowAllOrganization) - expect(result).toBeUndefined() - }) - - it("returns error for invalid Glama model", () => { - const config: ProviderSettings = { - apiProvider: "glama", - glamaModelId: "invalid-model", - } - - const result = getModelValidationError(config, mockRouterModels, allowAllOrganization) - expect(result).toBeUndefined() - }) - it("returns undefined for OpenAI models when no router models provided", () => { const config: ProviderSettings = { apiProvider: "openai", @@ -192,25 +162,5 @@ describe("Model Validation Functions", () => { ) expect(result).toBeUndefined() // Should exclude model-specific org errors }) - - it("returns undefined for valid IO Intelligence model", () => { - const config: ProviderSettings = { - apiProvider: "io-intelligence", - glamaModelId: "valid-model", - } - - const result = getModelValidationError(config, mockRouterModels, allowAllOrganization) - expect(result).toBeUndefined() - }) - - it("returns error for invalid IO Intelligence model", () => { - const config: ProviderSettings = { - apiProvider: "io-intelligence", - glamaModelId: "invalid-model", - } - - const result = getModelValidationError(config, mockRouterModels, allowAllOrganization) - expect(result).toBeUndefined() - }) }) }) diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts index 947b18ac3b0..bf49e03a4ba 100644 --- a/webview-ui/src/utils/validate.ts +++ b/webview-ui/src/utils/validate.ts @@ -43,11 +43,6 @@ function validateModelsAndKeysProvided(apiConfiguration: ProviderSettings): stri return i18next.t("settings:validation.apiKey") } break - case "glama": - if (!apiConfiguration.glamaApiKey) { - return i18next.t("settings:validation.apiKey") - } - break case "unbound": if (!apiConfiguration.unboundApiKey) { return i18next.t("settings:validation.apiKey")