diff --git a/src/api/providers/__tests__/io-intelligence.spec.ts b/src/api/providers/__tests__/io-intelligence.spec.ts index 99dfcefea4..2978ef856c 100644 --- a/src/api/providers/__tests__/io-intelligence.spec.ts +++ b/src/api/providers/__tests__/io-intelligence.spec.ts @@ -1,303 +1,197 @@ -import { Anthropic } from "@anthropic-ai/sdk" - -import { IOIntelligenceHandler } from "../io-intelligence" -import type { ApiHandlerOptions } from "../../../shared/api" +const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({ + mockStreamText: vi.fn(), + mockGenerateText: vi.fn(), +})) -const mockCreate = vi.fn() +vi.mock("ai", async (importOriginal) => { + const actual = await importOriginal() + return { + ...actual, + streamText: mockStreamText, + generateText: mockGenerateText, + } +}) -// Mock OpenAI -vi.mock("openai", () => ({ - default: class MockOpenAI { - baseURL: string - apiKey: string - chat = { - completions: { - create: vi.fn(), - }, - } - constructor(options: any) { - this.baseURL = options.baseURL - this.apiKey = options.apiKey - this.chat.completions.create = mockCreate - } - }, +vi.mock("@ai-sdk/openai-compatible", () => ({ + createOpenAICompatible: vi.fn(() => { + return vi.fn(() => ({ + modelId: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", + provider: "IO Intelligence", + })) + }), })) -// Mock the fetcher functions -vi.mock("../fetchers/io-intelligence", () => ({ - getIOIntelligenceModels: vi.fn(), - getCachedIOIntelligenceModels: vi.fn(() => ({ - "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": { - maxTokens: 8192, - contextWindow: 430000, - description: "Llama 4 Maverick 17B model", - supportsImages: true, - supportsPromptCache: false, - }, - "deepseek-ai/DeepSeek-R1-0528": { - maxTokens: 8192, - contextWindow: 128000, - supportsImages: false, - supportsPromptCache: false, - description: "DeepSeek R1 reasoning model", - }, - "Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": { - maxTokens: 4096, - contextWindow: 106000, - supportsImages: false, - supportsPromptCache: false, - description: "Qwen3 Coder 480B specialized for coding", - }, - "openai/gpt-oss-120b": { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - description: "OpenAI GPT-OSS 120B model", - }, - })), -})) +import type { Anthropic } from "@anthropic-ai/sdk" -// Mock constants -vi.mock("../constants", () => ({ - DEFAULT_HEADERS: { "User-Agent": "roo-cline" }, -})) +import { ioIntelligenceDefaultModelId } from "@roo-code/types" -// Mock transform functions -vi.mock("../../transform/openai-format", () => ({ - convertToOpenAiMessages: vi.fn((messages) => messages), -})) +import type { ApiHandlerOptions } from "../../../shared/api" + +import { IOIntelligenceHandler } from "../io-intelligence" describe("IOIntelligenceHandler", () => { let handler: IOIntelligenceHandler let mockOptions: ApiHandlerOptions beforeEach(() => { - vi.clearAllMocks() mockOptions = { ioIntelligenceApiKey: "test-api-key", - apiModelId: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", + ioIntelligenceModelId: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", modelTemperature: 0.7, - includeMaxTokens: false, modelMaxTokens: undefined, } as ApiHandlerOptions - - mockCreate.mockImplementation(async () => ({ - [Symbol.asyncIterator]: async function* () { - yield { - choices: [ - { - delta: { content: "Test response" }, - index: 0, - }, - ], - usage: null, - } - yield { - choices: [ - { - delta: {}, - index: 0, - }, - ], - usage: { - prompt_tokens: 10, - completion_tokens: 5, - total_tokens: 15, - }, - } - }, - })) handler = new IOIntelligenceHandler(mockOptions) + vi.clearAllMocks() }) - afterEach(() => { - vi.restoreAllMocks() - }) - - it("should create OpenAI client with correct configuration", () => { - const ioIntelligenceApiKey = "test-io-intelligence-api-key" - const handler = new IOIntelligenceHandler({ ioIntelligenceApiKey }) - // Verify that the handler was created successfully - expect(handler).toBeInstanceOf(IOIntelligenceHandler) - expect(handler["client"]).toBeDefined() - // Verify the client has the expected properties - expect(handler["client"].baseURL).toBe("https://api.intelligence.io.solutions/api/v1") - expect(handler["client"].apiKey).toBe(ioIntelligenceApiKey) - }) + describe("constructor", () => { + it("should initialize with provided options", () => { + expect(handler).toBeInstanceOf(IOIntelligenceHandler) + expect(handler.getModel().id).toBe("meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8") + }) - it("should initialize with correct configuration", () => { - expect(handler).toBeInstanceOf(IOIntelligenceHandler) - expect(handler["client"]).toBeDefined() - expect(handler["options"]).toEqual({ - ...mockOptions, - apiKey: mockOptions.ioIntelligenceApiKey, + it("should use default model ID if not provided", () => { + const handlerWithoutModel = new IOIntelligenceHandler({ + ...mockOptions, + ioIntelligenceModelId: undefined, + } as ApiHandlerOptions) + expect(handlerWithoutModel.getModel().id).toBe(ioIntelligenceDefaultModelId) }) - }) - it("should throw error when API key is missing", () => { - const optionsWithoutKey = { ...mockOptions } - delete optionsWithoutKey.ioIntelligenceApiKey + it("should throw error when API key is missing", () => { + const optionsWithoutKey = { ...mockOptions } + delete optionsWithoutKey.ioIntelligenceApiKey - expect(() => new IOIntelligenceHandler(optionsWithoutKey)).toThrow("IO Intelligence API key is required") + expect(() => new IOIntelligenceHandler(optionsWithoutKey)).toThrow("IO Intelligence API key is required") + }) }) - it("should handle streaming response correctly", async () => { - const mockStream = [ - { - choices: [{ delta: { content: "Hello" } }], - usage: null, - }, - { - choices: [{ delta: { content: " world" } }], - usage: null, - }, - { - choices: [{ delta: {} }], - usage: { prompt_tokens: 10, completion_tokens: 5 }, - }, - ] - - mockCreate.mockResolvedValue({ - [Symbol.asyncIterator]: async function* () { - for (const chunk of mockStream) { - yield chunk - } - }, + describe("getModel", () => { + it("should return model info for valid model ID", () => { + const model = handler.getModel() + expect(model.id).toBe("meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8") + expect(model.info).toBeDefined() + expect(model.info.maxTokens).toBe(8192) + expect(model.info.contextWindow).toBe(430000) + expect(model.info.supportsImages).toBe(true) + expect(model.info.supportsPromptCache).toBe(false) }) - const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }] - - const stream = handler.createMessage("System prompt", messages) - const results = [] - - for await (const chunk of stream) { - results.push(chunk) - } - - expect(results).toHaveLength(3) - expect(results[0]).toEqual({ type: "text", text: "Hello" }) - expect(results[1]).toEqual({ type: "text", text: " world" }) - expect(results[2]).toMatchObject({ - type: "usage", - inputTokens: 10, - outputTokens: 5, + it("should return default model info for unknown model ID", () => { + const handlerWithUnknown = new IOIntelligenceHandler({ + ...mockOptions, + ioIntelligenceModelId: "unknown-model", + } as ApiHandlerOptions) + const model = handlerWithUnknown.getModel() + expect(model.id).toBe("unknown-model") + expect(model.info).toBeDefined() + expect(model.info.contextWindow).toBe(handler.getModel().info.contextWindow) }) - }) - it("completePrompt method should return text from IO Intelligence API", async () => { - const expectedResponse = "This is a test response from IO Intelligence" - mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] }) - const result = await handler.completePrompt("test prompt") - expect(result).toBe(expectedResponse) - }) + it("should return default model if no model ID is provided", () => { + const handlerWithoutModel = new IOIntelligenceHandler({ + ...mockOptions, + ioIntelligenceModelId: undefined, + } as ApiHandlerOptions) + const model = handlerWithoutModel.getModel() + expect(model.id).toBe(ioIntelligenceDefaultModelId) + expect(model.info).toBeDefined() + }) - it("should handle errors in completePrompt", async () => { - const errorMessage = "IO Intelligence API error" - mockCreate.mockRejectedValueOnce(new Error(errorMessage)) - await expect(handler.completePrompt("test prompt")).rejects.toThrow( - `IO Intelligence completion error: ${errorMessage}`, - ) + it("should include model parameters from getModelParams", () => { + const model = handler.getModel() + expect(model).toHaveProperty("temperature") + expect(model).toHaveProperty("maxTokens") + }) }) - it("createMessage should yield text content from stream", async () => { - const testContent = "This is test content from IO Intelligence stream" + describe("createMessage", () => { + const systemPrompt = "You are a helpful assistant." + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "text" as const, + text: "Hello!", + }, + ], + }, + ] - mockCreate.mockImplementationOnce(() => { - return { - [Symbol.asyncIterator]: () => ({ - next: vi - .fn() - .mockResolvedValueOnce({ - done: false, - value: { choices: [{ delta: { content: testContent } }] }, - }) - .mockResolvedValueOnce({ done: true }), - }), + it("should handle streaming responses", async () => { + async function* mockFullStream() { + yield { type: "text-delta", text: "Test response" } } - }) - - const stream = handler.createMessage("system prompt", []) - const firstChunk = await stream.next() - - expect(firstChunk.done).toBe(false) - expect(firstChunk.value).toEqual({ type: "text", text: testContent }) - }) - it("createMessage should yield usage data from stream", async () => { - mockCreate.mockImplementationOnce(() => { - return { - [Symbol.asyncIterator]: () => ({ - next: vi - .fn() - .mockResolvedValueOnce({ - done: false, - value: { choices: [{ delta: {} }], usage: { prompt_tokens: 10, completion_tokens: 20 } }, - }) - .mockResolvedValueOnce({ done: true }), - }), + const mockUsage = Promise.resolve({ + inputTokens: 10, + outputTokens: 5, + details: {}, + raw: {}, + }) + + mockStreamText.mockReturnValue({ + fullStream: mockFullStream(), + usage: mockUsage, + }) + + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) } - }) - - const stream = handler.createMessage("system prompt", []) - const firstChunk = await stream.next() - - expect(firstChunk.done).toBe(false) - expect(firstChunk.value).toMatchObject({ type: "usage", inputTokens: 10, outputTokens: 20 }) - }) - it("should return model info from cache when available", () => { - const model = handler.getModel() - expect(model.id).toBe("meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8") - expect(model.info).toEqual({ - maxTokens: 8192, - contextWindow: 430000, - description: "Llama 4 Maverick 17B model", - supportsImages: true, - supportsPromptCache: false, + expect(chunks.length).toBeGreaterThan(0) + const textChunks = chunks.filter((chunk) => chunk.type === "text") + expect(textChunks).toHaveLength(1) + expect(textChunks[0].text).toBe("Test response") }) - }) - it("should return fallback model info when not in cache", () => { - const handlerWithUnknownModel = new IOIntelligenceHandler({ - ...mockOptions, - apiModelId: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", - }) - const model = handlerWithUnknownModel.getModel() - expect(model.id).toBe("meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8") - expect(model.info).toEqual({ - maxTokens: 8192, - contextWindow: 430000, - description: "Llama 4 Maverick 17B model", - supportsImages: true, - supportsPromptCache: false, - }) - }) + it("should include usage information", async () => { + async function* mockFullStream() { + yield { type: "text-delta", text: "Test response" } + } + + const mockUsage = Promise.resolve({ + inputTokens: 10, + outputTokens: 5, + details: {}, + raw: {}, + }) + + mockStreamText.mockReturnValue({ + fullStream: mockFullStream(), + usage: mockUsage, + }) + + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } - it("should use default model when no model is specified", () => { - const handlerWithoutModel = new IOIntelligenceHandler({ - ...mockOptions, - apiModelId: undefined, + const usageChunks = chunks.filter((chunk) => chunk.type === "usage") + expect(usageChunks.length).toBeGreaterThan(0) + expect(usageChunks[0].inputTokens).toBe(10) + expect(usageChunks[0].outputTokens).toBe(5) }) - const model = handlerWithoutModel.getModel() - expect(model.id).toBe("meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8") }) - it("should handle empty response from completePrompt", async () => { - mockCreate.mockResolvedValueOnce({ - choices: [{ message: { content: null } }], - }) + describe("completePrompt", () => { + it("should complete a prompt using generateText", async () => { + mockGenerateText.mockResolvedValue({ + text: "Test completion", + }) - const result = await handler.completePrompt("Test prompt") - expect(result).toBe("") - }) + const result = await handler.completePrompt("Test prompt") - it("should handle missing choices in completePrompt response", async () => { - mockCreate.mockResolvedValueOnce({ - choices: [], + expect(result).toBe("Test completion") + expect(mockGenerateText).toHaveBeenCalledWith( + expect.objectContaining({ + prompt: "Test prompt", + }), + ) }) - - const result = await handler.completePrompt("Test prompt") - expect(result).toBe("") }) }) diff --git a/src/api/providers/io-intelligence.ts b/src/api/providers/io-intelligence.ts index ef1c60a6a2..11b8afe5c4 100644 --- a/src/api/providers/io-intelligence.ts +++ b/src/api/providers/io-intelligence.ts @@ -1,44 +1,62 @@ -import { ioIntelligenceDefaultModelId, ioIntelligenceModels, type IOIntelligenceModelId } from "@roo-code/types" +import { + ioIntelligenceDefaultModelId, + ioIntelligenceModels, + type IOIntelligenceModelId, + type ModelInfo, +} from "@roo-code/types" import type { ApiHandlerOptions } from "../../shared/api" -import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider" -export class IOIntelligenceHandler extends BaseOpenAiCompatibleProvider { +import { getModelParams } from "../transform/model-params" + +import { OpenAICompatibleHandler, type OpenAICompatibleConfig } from "./openai-compatible" + +export class IOIntelligenceHandler extends OpenAICompatibleHandler { constructor(options: ApiHandlerOptions) { if (!options.ioIntelligenceApiKey) { throw new Error("IO Intelligence API key is required") } - super({ - ...options, + const modelId = options.ioIntelligenceModelId ?? ioIntelligenceDefaultModelId + const modelInfo: ModelInfo = ioIntelligenceModels[modelId as IOIntelligenceModelId] ?? + ioIntelligenceModels[ioIntelligenceDefaultModelId] ?? { + maxTokens: 8192, + contextWindow: 128000, + supportsImages: false, + supportsPromptCache: false, + } + + const config: OpenAICompatibleConfig = { providerName: "IO Intelligence", baseURL: "https://api.intelligence.io.solutions/api/v1", - defaultProviderModelId: ioIntelligenceDefaultModelId, - providerModels: ioIntelligenceModels, - defaultTemperature: 0.7, apiKey: options.ioIntelligenceApiKey, - }) + modelId, + modelInfo, + modelMaxTokens: options.modelMaxTokens ?? undefined, + temperature: options.modelTemperature ?? 0.7, + } + + super(options, config) } override getModel() { - const modelId = this.options.ioIntelligenceModelId || (ioIntelligenceDefaultModelId as IOIntelligenceModelId) - - const modelInfo = - this.providerModels[modelId as IOIntelligenceModelId] ?? this.providerModels[ioIntelligenceDefaultModelId] - - if (modelInfo) { - return { id: modelId as IOIntelligenceModelId, info: modelInfo } - } - - // Return the requested model ID even if not found, with fallback info. - return { - id: modelId as IOIntelligenceModelId, - info: { + const modelId = this.options.ioIntelligenceModelId ?? ioIntelligenceDefaultModelId + const modelInfo: ModelInfo = ioIntelligenceModels[modelId as IOIntelligenceModelId] ?? + ioIntelligenceModels[ioIntelligenceDefaultModelId] ?? { maxTokens: 8192, contextWindow: 128000, supportsImages: false, supportsPromptCache: false, - }, - } + } + + const params = getModelParams({ + format: "openai", + modelId, + model: modelInfo, + settings: this.options, + defaultTemperature: 0.7, + }) + + return { id: modelId, info: modelInfo, ...params } } }