diff --git a/src/api/providers/fetchers/__tests__/ollama.test.ts b/src/api/providers/fetchers/__tests__/ollama.test.ts index 59663bc4959..a50e0c28edd 100644 --- a/src/api/providers/fetchers/__tests__/ollama.test.ts +++ b/src/api/providers/fetchers/__tests__/ollama.test.ts @@ -167,10 +167,14 @@ describe("Ollama Fetcher", () => { const result = await getOllamaModels(baseUrl) expect(mockedAxios.get).toHaveBeenCalledTimes(1) - expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {} }) + expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {}, timeout: 10_000 }) expect(mockedAxios.post).toHaveBeenCalledTimes(1) - expect(mockedAxios.post).toHaveBeenCalledWith(`${baseUrl}/api/show`, { model: modelName }, { headers: {} }) + expect(mockedAxios.post).toHaveBeenCalledWith( + `${baseUrl}/api/show`, + { model: modelName }, + { headers: {}, timeout: 10_000 }, + ) expect(typeof result).toBe("object") expect(result).not.toBeInstanceOf(Array) @@ -235,36 +239,58 @@ describe("Ollama Fetcher", () => { expect(result[modelName]).toBeUndefined() }) - it("should return an empty list if the initial /api/tags call fails", async () => { + it("should throw an error with descriptive message if the initial /api/tags call fails", async () => { const baseUrl = "http://localhost:11434" mockedAxios.get.mockRejectedValueOnce(new Error("Network error")) - const consoleInfoSpy = vi.spyOn(console, "error").mockImplementation(() => {}) // Spy and suppress output - const result = await getOllamaModels(baseUrl) + await expect(getOllamaModels(baseUrl)).rejects.toThrow( + `Failed to connect to Ollama at ${baseUrl}: Network error`, + ) expect(mockedAxios.get).toHaveBeenCalledTimes(1) - expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {} }) + expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {}, timeout: 10_000 }) expect(mockedAxios.post).not.toHaveBeenCalled() - expect(result).toEqual({}) }) - it("should log an info message and return an empty object on ECONNREFUSED", async () => { + it("should throw a user-friendly error on ECONNREFUSED", async () => { const baseUrl = "http://localhost:11434" - const consoleInfoSpy = vi.spyOn(console, "warn").mockImplementation(() => {}) // Spy and suppress output const econnrefusedError = new Error("Connection refused") as any econnrefusedError.code = "ECONNREFUSED" mockedAxios.get.mockRejectedValueOnce(econnrefusedError) - const result = await getOllamaModels(baseUrl) + await expect(getOllamaModels(baseUrl)).rejects.toThrow( + `Connection refused by Ollama at ${baseUrl}. Is the server running and accessible?`, + ) expect(mockedAxios.get).toHaveBeenCalledTimes(1) - expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {} }) expect(mockedAxios.post).not.toHaveBeenCalled() - expect(consoleInfoSpy).toHaveBeenCalledWith(`Failed connecting to Ollama at ${baseUrl}`) - expect(result).toEqual({}) + }) - consoleInfoSpy.mockRestore() // Restore original console.info + it("should throw a user-friendly error on timeout", async () => { + const baseUrl = "http://10.3.4.5:11434" + + const timeoutError = new Error("timeout of 10000ms exceeded") as any + timeoutError.code = "ECONNABORTED" + mockedAxios.get.mockRejectedValueOnce(timeoutError) + + await expect(getOllamaModels(baseUrl)).rejects.toThrow(/timed out after 10s.*firewall/) + }) + + it("should throw an error for invalid URL", async () => { + await expect(getOllamaModels("not-a-valid-url")).rejects.toThrow(/Invalid Ollama URL/) + }) + + it("should throw a user-friendly error on ENOTFOUND", async () => { + const baseUrl = "http://nonexistent-host:11434" + + const enotfoundError = new Error("getaddrinfo ENOTFOUND nonexistent-host") as any + enotfoundError.code = "ENOTFOUND" + mockedAxios.get.mockRejectedValueOnce(enotfoundError) + + await expect(getOllamaModels(baseUrl)).rejects.toThrow( + `Could not resolve hostname for Ollama at ${baseUrl}. Check the URL.`, + ) }) it("should handle models with null families field in API response", async () => { @@ -317,10 +343,14 @@ describe("Ollama Fetcher", () => { const result = await getOllamaModels(baseUrl) expect(mockedAxios.get).toHaveBeenCalledTimes(1) - expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {} }) + expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {}, timeout: 10_000 }) expect(mockedAxios.post).toHaveBeenCalledTimes(1) - expect(mockedAxios.post).toHaveBeenCalledWith(`${baseUrl}/api/show`, { model: modelName }, { headers: {} }) + expect(mockedAxios.post).toHaveBeenCalledWith( + `${baseUrl}/api/show`, + { model: modelName }, + { headers: {}, timeout: 10_000 }, + ) expect(typeof result).toBe("object") expect(result).not.toBeInstanceOf(Array) @@ -384,13 +414,16 @@ describe("Ollama Fetcher", () => { const expectedHeaders = { Authorization: `Bearer ${apiKey}` } expect(mockedAxios.get).toHaveBeenCalledTimes(1) - expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: expectedHeaders }) + expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { + headers: expectedHeaders, + timeout: 10_000, + }) expect(mockedAxios.post).toHaveBeenCalledTimes(1) expect(mockedAxios.post).toHaveBeenCalledWith( `${baseUrl}/api/show`, { model: modelName }, - { headers: expectedHeaders }, + { headers: expectedHeaders, timeout: 10_000 }, ) expect(typeof result).toBe("object") diff --git a/src/api/providers/fetchers/ollama.ts b/src/api/providers/fetchers/ollama.ts index ba5b1c1d5d9..1018dbc360f 100644 --- a/src/api/providers/fetchers/ollama.ts +++ b/src/api/providers/fetchers/ollama.ts @@ -2,6 +2,10 @@ import axios from "axios" import { ModelInfo, ollamaDefaultModelInfo } from "@roo-code/types" import { z } from "zod" +// 10-second timeout for Ollama HTTP requests. This prevents indefinite hangs +// when connecting to unreachable external servers (TCP default is ~30s). +const OLLAMA_REQUEST_TIMEOUT_MS = 10_000 + const OllamaModelDetailsSchema = z.object({ family: z.string(), families: z.array(z.string()).nullable().optional(), @@ -68,18 +72,21 @@ export async function getOllamaModels( // clearing the input can leave an empty string; use the default in that case baseUrl = baseUrl === "" ? "http://localhost:11434" : baseUrl - try { - if (!URL.canParse(baseUrl)) { - return models - } + if (!URL.canParse(baseUrl)) { + throw new Error(`Invalid Ollama URL: ${baseUrl}`) + } - // Prepare headers with optional API key - const headers: Record = {} - if (apiKey) { - headers["Authorization"] = `Bearer ${apiKey}` - } + // Prepare headers with optional API key + const headers: Record = {} + if (apiKey) { + headers["Authorization"] = `Bearer ${apiKey}` + } - const response = await axios.get(`${baseUrl}/api/tags`, { headers }) + try { + const response = await axios.get(`${baseUrl}/api/tags`, { + headers, + timeout: OLLAMA_REQUEST_TIMEOUT_MS, + }) const parsedResponse = OllamaModelsResponseSchema.safeParse(response.data) let modelInfoPromises = [] @@ -92,9 +99,9 @@ export async function getOllamaModels( { model: ollamaModel.model, }, - { headers }, + { headers, timeout: OLLAMA_REQUEST_TIMEOUT_MS }, ) - .then((ollamaModelInfo) => { + .then((ollamaModelInfo: { data: OllamaModelInfoResponse }) => { const modelInfo = parseOllamaModel(ollamaModelInfo.data) // Only include models that support native tools if (modelInfo) { @@ -108,13 +115,23 @@ export async function getOllamaModels( } else { console.error(`Error parsing Ollama models response: ${JSON.stringify(parsedResponse.error, null, 2)}`) } - } catch (error) { - if (error.code === "ECONNREFUSED") { - console.warn(`Failed connecting to Ollama at ${baseUrl}`) - } else { - console.error( - `Error fetching Ollama models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`, + } catch (error: any) { + // Build a user-friendly error message based on the failure type + const code = error?.code + const status = error?.response?.status + + if (code === "ECONNREFUSED") { + throw new Error(`Connection refused by Ollama at ${baseUrl}. Is the server running and accessible?`) + } else if (code === "ECONNABORTED" || code === "ETIMEDOUT" || error?.message?.includes("timeout")) { + throw new Error( + `Connection to Ollama at ${baseUrl} timed out after ${OLLAMA_REQUEST_TIMEOUT_MS / 1000}s. The server may be unreachable (check firewall settings).`, ) + } else if (code === "ENOTFOUND") { + throw new Error(`Could not resolve hostname for Ollama at ${baseUrl}. Check the URL.`) + } else if (status) { + throw new Error(`Ollama at ${baseUrl} returned HTTP ${status}.`) + } else { + throw new Error(`Failed to connect to Ollama at ${baseUrl}: ${error?.message || String(error)}`) } } diff --git a/src/core/webview/__tests__/webviewMessageHandler.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.spec.ts index faa8e926825..153b93edc0f 100644 --- a/src/core/webview/__tests__/webviewMessageHandler.spec.ts +++ b/src/core/webview/__tests__/webviewMessageHandler.spec.ts @@ -256,6 +256,35 @@ describe("webviewMessageHandler - requestOllamaModels", () => { ollamaModels: mockModels, }) }) + + it("always sends a response even when models list is empty", async () => { + mockGetModels.mockResolvedValue({}) + + await webviewMessageHandler(mockClineProvider, { + type: "requestOllamaModels", + }) + + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "ollamaModels", + ollamaModels: {}, + }) + }) + + it("sends error message to webview when connection fails", async () => { + mockGetModels.mockRejectedValue( + new Error("Connection refused by Ollama at http://localhost:1234. Is the server running and accessible?"), + ) + + await webviewMessageHandler(mockClineProvider, { + type: "requestOllamaModels", + }) + + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "ollamaModels", + ollamaModels: {}, + error: "Connection refused by Ollama at http://localhost:1234. Is the server running and accessible?", + }) + }) }) describe("webviewMessageHandler - requestRouterModels", () => { diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index cc4b1a27e9e..e2748496fe1 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -1008,12 +1008,17 @@ export const webviewMessageHandler = async ( const ollamaModels = await getModels(ollamaOptions) - if (Object.keys(ollamaModels).length > 0) { - provider.postMessageToWebview({ type: "ollamaModels", ollamaModels: ollamaModels }) - } + // Always send a response so the webview doesn't just timeout silently + provider.postMessageToWebview({ type: "ollamaModels", ollamaModels }) } catch (error) { - // Silently fail - user hasn't configured Ollama yet - console.debug("Ollama models fetch failed:", error) + const errorMessage = error instanceof Error ? error.message : String(error) + console.debug("Ollama models fetch failed:", errorMessage) + // Send empty models with error so the UI can display the connection failure + provider.postMessageToWebview({ + type: "ollamaModels", + ollamaModels: {}, + error: errorMessage, + }) } break } diff --git a/webview-ui/src/components/settings/providers/Ollama.tsx b/webview-ui/src/components/settings/providers/Ollama.tsx index 32f99c60255..402f7c13c3c 100644 --- a/webview-ui/src/components/settings/providers/Ollama.tsx +++ b/webview-ui/src/components/settings/providers/Ollama.tsx @@ -20,6 +20,7 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro const { t } = useAppTranslation() const [ollamaModels, setOllamaModels] = useState({}) + const [connectionError, setConnectionError] = useState() const routerModels = useRouterModels() const handleInputChange = useCallback( @@ -41,6 +42,8 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro { const newModels = message.ollamaModels ?? {} setOllamaModels(newModels) + // Surface connection errors from the backend + setConnectionError(message.error) } break } @@ -100,6 +103,7 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro )} + {connectionError &&
{connectionError}
} const timeout = setTimeout(() => { cleanup() reject(new Error("Ollama models request timed out")) - }, 10000) + }, 15000) // Allow extra time for backend timeout (10s) + overhead const handler = (event: MessageEvent) => { const message: ExtensionMessage = event.data @@ -22,7 +22,12 @@ const getOllamaModels = async () => clearTimeout(timeout) cleanup() - if (message.ollamaModels) { + // The backend now always sends a response, even on error. + // If there's an error field, reject with the server's error message + // so callers get actionable diagnostics. + if (message.error) { + reject(new Error(message.error)) + } else if (message.ollamaModels) { resolve(message.ollamaModels) } else { reject(new Error("No Ollama models in response"))