Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 51 additions & 18 deletions src/api/providers/fetchers/__tests__/ollama.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -167,10 +167,14 @@ describe("Ollama Fetcher", () => {
const result = await getOllamaModels(baseUrl)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {} })
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {}, timeout: 10_000 })

expect(mockedAxios.post).toHaveBeenCalledTimes(1)
expect(mockedAxios.post).toHaveBeenCalledWith(`${baseUrl}/api/show`, { model: modelName }, { headers: {} })
expect(mockedAxios.post).toHaveBeenCalledWith(
`${baseUrl}/api/show`,
{ model: modelName },
{ headers: {}, timeout: 10_000 },
)

expect(typeof result).toBe("object")
expect(result).not.toBeInstanceOf(Array)
Expand Down Expand Up @@ -235,36 +239,58 @@ describe("Ollama Fetcher", () => {
expect(result[modelName]).toBeUndefined()
})

it("should return an empty list if the initial /api/tags call fails", async () => {
it("should throw an error with descriptive message if the initial /api/tags call fails", async () => {
const baseUrl = "http://localhost:11434"
mockedAxios.get.mockRejectedValueOnce(new Error("Network error"))
const consoleInfoSpy = vi.spyOn(console, "error").mockImplementation(() => {}) // Spy and suppress output

const result = await getOllamaModels(baseUrl)
await expect(getOllamaModels(baseUrl)).rejects.toThrow(
`Failed to connect to Ollama at ${baseUrl}: Network error`,
)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {} })
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {}, timeout: 10_000 })
expect(mockedAxios.post).not.toHaveBeenCalled()
expect(result).toEqual({})
})

it("should log an info message and return an empty object on ECONNREFUSED", async () => {
it("should throw a user-friendly error on ECONNREFUSED", async () => {
const baseUrl = "http://localhost:11434"
const consoleInfoSpy = vi.spyOn(console, "warn").mockImplementation(() => {}) // Spy and suppress output

const econnrefusedError = new Error("Connection refused") as any
econnrefusedError.code = "ECONNREFUSED"
mockedAxios.get.mockRejectedValueOnce(econnrefusedError)

const result = await getOllamaModels(baseUrl)
await expect(getOllamaModels(baseUrl)).rejects.toThrow(
`Connection refused by Ollama at ${baseUrl}. Is the server running and accessible?`,
)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {} })
expect(mockedAxios.post).not.toHaveBeenCalled()
expect(consoleInfoSpy).toHaveBeenCalledWith(`Failed connecting to Ollama at ${baseUrl}`)
expect(result).toEqual({})
})

consoleInfoSpy.mockRestore() // Restore original console.info
it("should throw a user-friendly error on timeout", async () => {
const baseUrl = "http://10.3.4.5:11434"

const timeoutError = new Error("timeout of 10000ms exceeded") as any
timeoutError.code = "ECONNABORTED"
mockedAxios.get.mockRejectedValueOnce(timeoutError)

await expect(getOllamaModels(baseUrl)).rejects.toThrow(/timed out after 10s.*firewall/)
})

it("should throw an error for invalid URL", async () => {
await expect(getOllamaModels("not-a-valid-url")).rejects.toThrow(/Invalid Ollama URL/)
})

it("should throw a user-friendly error on ENOTFOUND", async () => {
const baseUrl = "http://nonexistent-host:11434"

const enotfoundError = new Error("getaddrinfo ENOTFOUND nonexistent-host") as any
enotfoundError.code = "ENOTFOUND"
mockedAxios.get.mockRejectedValueOnce(enotfoundError)

await expect(getOllamaModels(baseUrl)).rejects.toThrow(
`Could not resolve hostname for Ollama at ${baseUrl}. Check the URL.`,
)
})

it("should handle models with null families field in API response", async () => {
Expand Down Expand Up @@ -317,10 +343,14 @@ describe("Ollama Fetcher", () => {
const result = await getOllamaModels(baseUrl)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {} })
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: {}, timeout: 10_000 })

expect(mockedAxios.post).toHaveBeenCalledTimes(1)
expect(mockedAxios.post).toHaveBeenCalledWith(`${baseUrl}/api/show`, { model: modelName }, { headers: {} })
expect(mockedAxios.post).toHaveBeenCalledWith(
`${baseUrl}/api/show`,
{ model: modelName },
{ headers: {}, timeout: 10_000 },
)

expect(typeof result).toBe("object")
expect(result).not.toBeInstanceOf(Array)
Expand Down Expand Up @@ -384,13 +414,16 @@ describe("Ollama Fetcher", () => {
const expectedHeaders = { Authorization: `Bearer ${apiKey}` }

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, { headers: expectedHeaders })
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`, {
headers: expectedHeaders,
timeout: 10_000,
})

expect(mockedAxios.post).toHaveBeenCalledTimes(1)
expect(mockedAxios.post).toHaveBeenCalledWith(
`${baseUrl}/api/show`,
{ model: modelName },
{ headers: expectedHeaders },
{ headers: expectedHeaders, timeout: 10_000 },
)

expect(typeof result).toBe("object")
Expand Down
53 changes: 35 additions & 18 deletions src/api/providers/fetchers/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@ import axios from "axios"
import { ModelInfo, ollamaDefaultModelInfo } from "@roo-code/types"
import { z } from "zod"

// 10-second timeout for Ollama HTTP requests. This prevents indefinite hangs
// when connecting to unreachable external servers (TCP default is ~30s).
const OLLAMA_REQUEST_TIMEOUT_MS = 10_000

const OllamaModelDetailsSchema = z.object({
family: z.string(),
families: z.array(z.string()).nullable().optional(),
Expand Down Expand Up @@ -68,18 +72,21 @@ export async function getOllamaModels(
// clearing the input can leave an empty string; use the default in that case
baseUrl = baseUrl === "" ? "http://localhost:11434" : baseUrl

try {
if (!URL.canParse(baseUrl)) {
return models
}
if (!URL.canParse(baseUrl)) {
throw new Error(`Invalid Ollama URL: ${baseUrl}`)
}

// Prepare headers with optional API key
const headers: Record<string, string> = {}
if (apiKey) {
headers["Authorization"] = `Bearer ${apiKey}`
}
// Prepare headers with optional API key
const headers: Record<string, string> = {}
if (apiKey) {
headers["Authorization"] = `Bearer ${apiKey}`
}

const response = await axios.get<OllamaModelsResponse>(`${baseUrl}/api/tags`, { headers })
try {
const response = await axios.get<OllamaModelsResponse>(`${baseUrl}/api/tags`, {
headers,
timeout: OLLAMA_REQUEST_TIMEOUT_MS,
})
const parsedResponse = OllamaModelsResponseSchema.safeParse(response.data)
let modelInfoPromises = []

Expand All @@ -92,9 +99,9 @@ export async function getOllamaModels(
{
model: ollamaModel.model,
},
{ headers },
{ headers, timeout: OLLAMA_REQUEST_TIMEOUT_MS },
)
.then((ollamaModelInfo) => {
.then((ollamaModelInfo: { data: OllamaModelInfoResponse }) => {
const modelInfo = parseOllamaModel(ollamaModelInfo.data)
// Only include models that support native tools
if (modelInfo) {
Expand All @@ -108,13 +115,23 @@ export async function getOllamaModels(
} else {
console.error(`Error parsing Ollama models response: ${JSON.stringify(parsedResponse.error, null, 2)}`)
}
} catch (error) {
if (error.code === "ECONNREFUSED") {
console.warn(`Failed connecting to Ollama at ${baseUrl}`)
} else {
console.error(
`Error fetching Ollama models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`,
} catch (error: any) {
// Build a user-friendly error message based on the failure type
const code = error?.code
const status = error?.response?.status

if (code === "ECONNREFUSED") {
throw new Error(`Connection refused by Ollama at ${baseUrl}. Is the server running and accessible?`)
} else if (code === "ECONNABORTED" || code === "ETIMEDOUT" || error?.message?.includes("timeout")) {
throw new Error(
`Connection to Ollama at ${baseUrl} timed out after ${OLLAMA_REQUEST_TIMEOUT_MS / 1000}s. The server may be unreachable (check firewall settings).`,
)
} else if (code === "ENOTFOUND") {
throw new Error(`Could not resolve hostname for Ollama at ${baseUrl}. Check the URL.`)
} else if (status) {
throw new Error(`Ollama at ${baseUrl} returned HTTP ${status}.`)
} else {
throw new Error(`Failed to connect to Ollama at ${baseUrl}: ${error?.message || String(error)}`)
}
}
Comment on lines +118 to 136
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The try block starting at line 85 encompasses both the /api/tags request and the Promise.all of individual /api/show calls. If /api/tags succeeds but a single model's /api/show call fails (e.g., times out on a large model), Promise.all rejects and this catch block produces a connection-level error like "Connection to Ollama at ... timed out" -- which is misleading since the server IS reachable. Consider wrapping individual /api/show calls with their own error handling (or using Promise.allSettled) so that one model's failure doesn't discard all successfully-fetched models and produce a false connection error.

Fix it with Roo Code or mention @roomote and request a fix.


Expand Down
29 changes: 29 additions & 0 deletions src/core/webview/__tests__/webviewMessageHandler.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,35 @@ describe("webviewMessageHandler - requestOllamaModels", () => {
ollamaModels: mockModels,
})
})

it("always sends a response even when models list is empty", async () => {
mockGetModels.mockResolvedValue({})

await webviewMessageHandler(mockClineProvider, {
type: "requestOllamaModels",
})

expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({
type: "ollamaModels",
ollamaModels: {},
})
})

it("sends error message to webview when connection fails", async () => {
mockGetModels.mockRejectedValue(
new Error("Connection refused by Ollama at http://localhost:1234. Is the server running and accessible?"),
)

await webviewMessageHandler(mockClineProvider, {
type: "requestOllamaModels",
})

expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({
type: "ollamaModels",
ollamaModels: {},
error: "Connection refused by Ollama at http://localhost:1234. Is the server running and accessible?",
})
})
})

describe("webviewMessageHandler - requestRouterModels", () => {
Expand Down
15 changes: 10 additions & 5 deletions src/core/webview/webviewMessageHandler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1008,12 +1008,17 @@ export const webviewMessageHandler = async (

const ollamaModels = await getModels(ollamaOptions)

if (Object.keys(ollamaModels).length > 0) {
provider.postMessageToWebview({ type: "ollamaModels", ollamaModels: ollamaModels })
}
// Always send a response so the webview doesn't just timeout silently
provider.postMessageToWebview({ type: "ollamaModels", ollamaModels })
} catch (error) {
// Silently fail - user hasn't configured Ollama yet
console.debug("Ollama models fetch failed:", error)
const errorMessage = error instanceof Error ? error.message : String(error)
console.debug("Ollama models fetch failed:", errorMessage)
// Send empty models with error so the UI can display the connection failure
provider.postMessageToWebview({
type: "ollamaModels",
ollamaModels: {},
error: errorMessage,
})
}
break
}
Expand Down
6 changes: 5 additions & 1 deletion webview-ui/src/components/settings/providers/Ollama.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro
const { t } = useAppTranslation()

const [ollamaModels, setOllamaModels] = useState<ModelRecord>({})
const [connectionError, setConnectionError] = useState<string | undefined>()
const routerModels = useRouterModels()

const handleInputChange = useCallback(
Expand All @@ -41,6 +42,8 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro
{
const newModels = message.ollamaModels ?? {}
setOllamaModels(newModels)
// Surface connection errors from the backend
setConnectionError(message.error)
}
break
}
Expand Down Expand Up @@ -100,6 +103,7 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro
</div>
</VSCodeTextField>
)}
{connectionError && <div className="text-sm text-vscode-errorForeground">{connectionError}</div>}
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Duplicate error display: connectionError is rendered here as a standalone <div>, and also passed to ModelPicker via errorMessage={connectionError ?? modelNotAvailableError} on line 115. When there's a connection error, the user sees the same message twice -- once in this div and once inside ModelPicker's ApiErrorMessage. Consider removing this standalone div and relying solely on the ModelPicker prop, or vice versa.

Fix it with Roo Code or mention @roomote and request a fix.

<ModelPicker
apiConfiguration={apiConfiguration}
setApiConfigurationField={setApiConfigurationField}
Expand All @@ -108,7 +112,7 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro
modelIdKey="ollamaModelId"
serviceName="Ollama"
serviceUrl="https://ollama.ai"
errorMessage={modelNotAvailableError}
errorMessage={connectionError ?? modelNotAvailableError}
hidePricing
/>
<VSCodeTextField
Expand Down
9 changes: 7 additions & 2 deletions webview-ui/src/components/ui/hooks/useOllamaModels.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ const getOllamaModels = async () =>
const timeout = setTimeout(() => {
cleanup()
reject(new Error("Ollama models request timed out"))
}, 10000)
}, 15000) // Allow extra time for backend timeout (10s) + overhead

const handler = (event: MessageEvent) => {
const message: ExtensionMessage = event.data
Expand All @@ -22,7 +22,12 @@ const getOllamaModels = async () =>
clearTimeout(timeout)
cleanup()

if (message.ollamaModels) {
// The backend now always sends a response, even on error.
// If there's an error field, reject with the server's error message
// so callers get actionable diagnostics.
if (message.error) {
reject(new Error(message.error))
} else if (message.ollamaModels) {
resolve(message.ollamaModels)
} else {
reject(new Error("No Ollama models in response"))
Expand Down
Loading