Skip to content

Commit 2dec78c

Browse files
authored
fix: refresh models button not flushing cache properly (#9870)
1 parent 3c05cae commit 2dec78c

14 files changed

+63
-34
lines changed

src/__tests__/extension.spec.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -384,7 +384,7 @@ describe("extension.ts", () => {
384384
})
385385

386386
// Verify flushModels was called to clear the cache on logout
387-
expect(flushModels).toHaveBeenCalledWith("roo", false)
387+
expect(flushModels).toHaveBeenCalledWith({ provider: "roo" }, false)
388388
})
389389
})
390390
})

src/api/providers/__tests__/deepinfra.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ vitest.mock("../fetchers/modelCache", () => ({
2626
getModels: vitest.fn().mockResolvedValue({
2727
[deepInfraDefaultModelId]: deepInfraDefaultModelInfo,
2828
}),
29+
getModelsFromCache: vitest.fn().mockReturnValue(undefined),
2930
}))
3031

3132
import OpenAI from "openai"

src/api/providers/__tests__/lite-llm.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ vi.mock("../fetchers/modelCache", () => ({
4242
"gpt-4-turbo": { ...litellmDefaultModelInfo, maxTokens: 8192 },
4343
})
4444
}),
45+
getModelsFromCache: vi.fn().mockReturnValue(undefined),
4546
}))
4647

4748
describe("LiteLLMHandler", () => {

src/api/providers/__tests__/unbound.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ vitest.mock("../fetchers/modelCache", () => ({
7171
},
7272
})
7373
}),
74+
getModelsFromCache: vitest.fn().mockReturnValue(undefined),
7475
}))
7576

7677
// Mock OpenAI client

src/api/providers/__tests__/vercel-ai-gateway.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ vitest.mock("../fetchers/modelCache", () => ({
5151
},
5252
})
5353
}),
54+
getModelsFromCache: vitest.fn().mockReturnValue(undefined),
5455
}))
5556

5657
vitest.mock("../../transform/caching/vercel-ai-gateway", () => ({

src/api/providers/fetchers/lmstudio.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ export const forceFullModelDetailsLoad = async (baseUrl: string, modelId: string
1919
const client = new LMStudioClient({ baseUrl: lmsUrl })
2020
await client.llm.model(modelId)
2121
// Flush and refresh cache to get updated model details
22-
await flushModels("lmstudio", true)
22+
await flushModels({ provider: "lmstudio", baseUrl }, true)
2323

2424
// Mark this model as having full details loaded.
2525
modelsWithLoadedDetails.add(modelId)

src/api/providers/fetchers/modelCache.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -267,20 +267,20 @@ export async function initializeModelCacheRefresh(): Promise<void> {
267267
/**
268268
* Flush models memory cache for a specific router.
269269
*
270-
* @param router - The router to flush models for.
270+
* @param options - The options for fetching models, including provider, apiKey, and baseUrl
271271
* @param refresh - If true, immediately fetch fresh data from API
272272
*/
273-
export const flushModels = async (router: RouterName, refresh: boolean = false): Promise<void> => {
273+
export const flushModels = async (options: GetModelsOptions, refresh: boolean = false): Promise<void> => {
274+
const { provider } = options
274275
if (refresh) {
275276
// Don't delete memory cache - let refreshModels atomically replace it
276277
// This prevents a race condition where getModels() might be called
277278
// before refresh completes, avoiding a gap in cache availability
278-
refreshModels({ provider: router } as GetModelsOptions).catch((error) => {
279-
console.error(`[flushModels] Refresh failed for ${router}:`, error)
280-
})
279+
// Await the refresh to ensure the cache is updated before returning
280+
await refreshModels(options)
281281
} else {
282282
// Only delete memory cache when not refreshing
283-
memoryCache.del(router)
283+
memoryCache.del(provider)
284284
}
285285
}
286286

src/api/providers/router-provider.ts

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import type { ModelInfo } from "@roo-code/types"
55
import { ApiHandlerOptions, RouterName, ModelRecord } from "../../shared/api"
66

77
import { BaseProvider } from "./base-provider"
8-
import { getModels } from "./fetchers/modelCache"
8+
import { getModels, getModelsFromCache } from "./fetchers/modelCache"
99

1010
import { DEFAULT_HEADERS } from "./constants"
1111

@@ -63,9 +63,22 @@ export abstract class RouterProvider extends BaseProvider {
6363
override getModel(): { id: string; info: ModelInfo } {
6464
const id = this.modelId ?? this.defaultModelId
6565

66-
return this.models[id]
67-
? { id, info: this.models[id] }
68-
: { id: this.defaultModelId, info: this.defaultModelInfo }
66+
// First check instance models (populated by fetchModel)
67+
if (this.models[id]) {
68+
return { id, info: this.models[id] }
69+
}
70+
71+
// Fall back to global cache (synchronous disk/memory cache)
72+
// This ensures models are available before fetchModel() is called
73+
const cachedModels = getModelsFromCache(this.name)
74+
if (cachedModels?.[id]) {
75+
// Also populate instance models for future calls
76+
this.models = cachedModels
77+
return { id, info: cachedModels[id] }
78+
}
79+
80+
// Last resort: return default model
81+
return { id: this.defaultModelId, info: this.defaultModelInfo }
6982
}
7083

7184
protected supportsTemperature(modelId: string): boolean {

src/core/webview/__tests__/ClineProvider.spec.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -237,6 +237,7 @@ vi.mock("../../../integrations/misc/extract-text", () => ({
237237
vi.mock("../../../api/providers/fetchers/modelCache", () => ({
238238
getModels: vi.fn().mockResolvedValue({}),
239239
flushModels: vi.fn(),
240+
getModelsFromCache: vi.fn().mockReturnValue(undefined),
240241
}))
241242

242243
vi.mock("../../../shared/modes", () => ({
@@ -308,6 +309,7 @@ vi.mock("../../../integrations/misc/extract-text", () => ({
308309
vi.mock("../../../api/providers/fetchers/modelCache", () => ({
309310
getModels: vi.fn().mockResolvedValue({}),
310311
flushModels: vi.fn(),
312+
getModelsFromCache: vi.fn().mockReturnValue(undefined),
311313
}))
312314

313315
vi.mock("../diff/strategies/multi-search-replace", () => ({

src/core/webview/__tests__/ClineProvider.sticky-mode.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ vi.mock("../../prompts/system", () => ({
151151
vi.mock("../../../api/providers/fetchers/modelCache", () => ({
152152
getModels: vi.fn().mockResolvedValue({}),
153153
flushModels: vi.fn(),
154+
getModelsFromCache: vi.fn().mockReturnValue(undefined),
154155
}))
155156

156157
vi.mock("../../../integrations/misc/extract-text", () => ({

0 commit comments

Comments
 (0)