diff --git a/.gitignore b/.gitignore index fc175568df6..f4e89b70969 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ ts-dist .turbo **/.serena .serena/ +opencode.json /result refs Session.vim diff --git a/packages/opencode/src/cli/cmd/ollama.ts b/packages/opencode/src/cli/cmd/ollama.ts new file mode 100644 index 00000000000..4477989caaa --- /dev/null +++ b/packages/opencode/src/cli/cmd/ollama.ts @@ -0,0 +1,187 @@ +import { cmd } from "./cmd" +import * as prompts from "@clack/prompts" +import { UI } from "../ui" +import { $ } from "bun" +import fs from "fs/promises" +import path from "path" +import { mergeDeep } from "remeda" +import { Global } from "../../global" + +/** + * Represents a single Ollama model + */ +export interface OllamaModel { + name: string +} + +/** + * Collection of Ollama models indexed by model name + */ +export type OllamaModels = Record + +/** + * Ollama provider configuration structure + */ +export interface OllamaProviderConfig { + npm: string + name: string + options: { + baseURL: string + } + models: OllamaModels +} + +/** + * Configuration object for Ollama + */ +export interface OllamaConfig { + $schema: string + provider: { + ollama: OllamaProviderConfig + } +} + +/** + * Get the platform-appropriate command to detect ollama + * @param platform - The platform string (typically process.platform) + * @returns The command name to use ('where' for Windows, 'which' for Unix) + */ +export function getDetectionCommand(platform: string): string { + if (platform === "win32") { + return "where" + } + return "which" +} + +/** + * Detect if ollama is installed in PATH using platform-appropriate command + * @param platform - The platform string (typically process.platform) + * @returns Promise that resolves if ollama is found, rejects otherwise + */ +export async function detectOllama(platform: string = process.platform): Promise { + const command = getDetectionCommand(platform) + await $`${command} ollama`.quiet() +} + +export const OllamaCommand = cmd({ + command: "ollama", + describe: "manage Ollama configuration", + builder: (yargs) => yargs.command(OllamaInitCommand).demandCommand(), + async handler() {}, +}) + +export const OllamaInitCommand = cmd({ + command: "init [output]", + describe: "generate Ollama provider configuration", + builder: (yargs) => + yargs + .positional("output", { + describe: "output file path", + type: "string", + }) + .option("base-url", { + describe: "Ollama base URL", + type: "string", + default: "http://localhost:11434/v1", + }) + .option("global", { + alias: "g", + describe: "write to global config", + type: "boolean", + default: false, + }) + .option("yes", { + alias: "y", + describe: "skip confirmation prompts", + type: "boolean", + default: false, + }), + async handler(args) { + UI.empty() + prompts.intro("Generate Ollama configuration") + + const onError = () => { + prompts.log.error("Ollama not found in PATH") + prompts.log.info("Install Ollama from https://ollama.com or ensure it's in your PATH") + throw new UI.CancelledError() + } + + // Check if ollama is installed (cross-platform) + await detectOllama().catch(onError) + + // Get list of models from ollama + const result = await $`ollama list`.text().catch(() => { + prompts.log.error("Failed to query ollama models") + prompts.log.info("Run 'ollama list' to check available models") + throw new UI.CancelledError() + }) + + const lines = result.split("\n").slice(1) // Skip header + const modelsList = lines.map((line) => line.trim().split(/\s+/)[0]).filter((name) => name && name.length > 0) + + if (modelsList.length === 0) { + prompts.log.warning("No models found from 'ollama list'") + prompts.log.info("Pull a model first: ollama pull llama2") + throw new UI.CancelledError() + } + + prompts.log.success(`Found ${modelsList.length} model(s)`) + + // Show preview of models + if (!args.yes) { + prompts.log.info(`Models: ${modelsList.join(", ")}`) + + const confirm = await prompts.confirm({ + message: `Create config with ${modelsList.length} model(s)?`, + initialValue: true, + }) + + if (prompts.isCancel(confirm) || !confirm) { + throw new UI.CancelledError() + } + } + + // Build models object + const models: OllamaModels = {} + for (const modelName of modelsList) { + models[modelName] = { name: modelName } + } + + // Determine output path + const outputPath = args.global + ? path.join(Global.Path.config, "opencode.json") + : path.resolve(args.output || "opencode.json") + + // Read existing config if it exists + const existingConfig = await Bun.file(outputPath) + .json() + .catch(() => ({})) + + // Build new config object + const newConfig: OllamaConfig = { + $schema: "https://opencode.ai/config.json", + provider: { + ollama: { + npm: "@ai-sdk/openai-compatible", + name: "Ollama (local)", + options: { + baseURL: args.baseUrl, + }, + models, + }, + }, + } + + // Merge with existing config + const mergedConfig = mergeDeep(existingConfig, newConfig) + + // Write config file + await fs.writeFile(outputPath, JSON.stringify(mergedConfig, null, 2) + "\n").catch((error) => { + prompts.log.error(`Failed to write config file: ${error}`) + throw new UI.CancelledError() + }) + + prompts.log.success(`Wrote ${outputPath} with ${modelsList.length} model(s)`) + prompts.outro("Ollama configuration generated successfully") + }, +}) diff --git a/packages/opencode/src/index.ts b/packages/opencode/src/index.ts index e1c2cbd1d51..c50b67e8fca 100644 --- a/packages/opencode/src/index.ts +++ b/packages/opencode/src/index.ts @@ -16,6 +16,7 @@ import { ServeCommand } from "./cli/cmd/serve" import { DebugCommand } from "./cli/cmd/debug" import { StatsCommand } from "./cli/cmd/stats" import { McpCommand } from "./cli/cmd/mcp" +import { OllamaCommand } from "./cli/cmd/ollama" import { GithubCommand } from "./cli/cmd/github" import { ExportCommand } from "./cli/cmd/export" import { ImportCommand } from "./cli/cmd/import" @@ -80,6 +81,7 @@ const cli = yargs(hideBin(process.argv)) .completion("completion", "generate shell completion script") .command(AcpCommand) .command(McpCommand) + .command(OllamaCommand) .command(TuiThreadCommand) .command(TuiSpawnCommand) .command(AttachCommand) diff --git a/packages/opencode/test/cli/cmd/ollama.test.ts b/packages/opencode/test/cli/cmd/ollama.test.ts new file mode 100644 index 00000000000..6f775691d4d --- /dev/null +++ b/packages/opencode/test/cli/cmd/ollama.test.ts @@ -0,0 +1,314 @@ +import { describe, expect, test, beforeAll, afterAll } from "bun:test" +import { tmpdir } from "../../fixture/fixture" +import path from "path" +import fs from "fs/promises" +import { $ } from "bun" + +describe("cli.ollama", () => { + let originalPath: string + + beforeAll(() => { + originalPath = process.env.PATH || "" + }) + + afterAll(() => { + process.env.PATH = originalPath + }) + + test("generates config with models from ollama list", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + // Create a mock ollama binary + const binDir = path.join(dir, "bin") + await fs.mkdir(binDir, { recursive: true }) + const ollamaScript = path.join(binDir, "ollama") + + await Bun.write( + ollamaScript, + `#!/bin/bash +if [ "$1" = "list" ]; then + cat << EOF +NAME ID SIZE MODIFIED +llama2:latest abc123 3.8 GB 2 days ago +codellama:7b def456 3.8 GB 3 days ago +mistral:latest ghi789 4.1 GB 1 week ago +EOF +fi +`, + ) + await fs.chmod(ollamaScript, 0o755) + + // Add mock binary to PATH + process.env.PATH = `${binDir}:${originalPath}` + }, + }) + + // Run the ollama init command + const outputFile = path.join(tmp.path, "test-config.json") + await $`bun run ${path.join(__dirname, "../../../src/index.ts")} ollama init ${outputFile} -y`.cwd(tmp.path).quiet() + + // Verify the config file was created + const configContent = await Bun.file(outputFile).text() + const config = JSON.parse(configContent) + + expect(config.$schema).toBe("https://opencode.ai/config.json") + expect(config.provider.ollama).toBeDefined() + expect(config.provider.ollama.npm).toBe("@ai-sdk/openai-compatible") + expect(config.provider.ollama.name).toBe("Ollama (local)") + expect(config.provider.ollama.options.baseURL).toBe("http://localhost:11434/v1") + expect(config.provider.ollama.models).toBeDefined() + + // Verify models are present + expect(config.provider.ollama.models["llama2:latest"]).toEqual({ name: "llama2:latest" }) + expect(config.provider.ollama.models["codellama:7b"]).toEqual({ name: "codellama:7b" }) + expect(config.provider.ollama.models["mistral:latest"]).toEqual({ name: "mistral:latest" }) + }) + + test("uses custom base URL when provided", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + // Create a mock ollama binary + const binDir = path.join(dir, "bin") + await fs.mkdir(binDir, { recursive: true }) + const ollamaScript = path.join(binDir, "ollama") + + await Bun.write( + ollamaScript, + `#!/bin/bash +if [ "$1" = "list" ]; then + echo "NAME ID SIZE MODIFIED" + echo "llama2:latest abc123 3.8 GB 2 days ago" +fi +`, + ) + await fs.chmod(ollamaScript, 0o755) + process.env.PATH = `${binDir}:${originalPath}` + }, + }) + + const outputFile = path.join(tmp.path, "custom-url-config.json") + const customUrl = "http://192.168.1.100:11434/v1" + + await $`bun run ${path.join(__dirname, "../../../src/index.ts")} ollama init ${outputFile} --base-url ${customUrl} -y` + .cwd(tmp.path) + .quiet() + + const configContent = await Bun.file(outputFile).text() + const config = JSON.parse(configContent) + + expect(config.provider.ollama.options.baseURL).toBe(customUrl) + }) + + test("uses default filename when no output specified", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + // Create a mock ollama binary + const binDir = path.join(dir, "bin") + await fs.mkdir(binDir, { recursive: true }) + const ollamaScript = path.join(binDir, "ollama") + + await Bun.write( + ollamaScript, + `#!/bin/bash +if [ "$1" = "list" ]; then + echo "NAME ID SIZE MODIFIED" + echo "llama2:latest abc123 3.8 GB 2 days ago" +fi +`, + ) + await fs.chmod(ollamaScript, 0o755) + process.env.PATH = `${binDir}:${originalPath}` + }, + }) + + await $`bun run ${path.join(__dirname, "../../../src/index.ts")} ollama init -y`.cwd(tmp.path).quiet() + + // Verify default filename was used + const defaultFile = path.join(tmp.path, "opencode.json") + const exists = await Bun.file(defaultFile).exists() + expect(exists).toBe(true) + }) + + test("fails gracefully when ollama is not installed", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + // Set PATH to a directory without ollama + const binDir = path.join(dir, "bin") + await fs.mkdir(binDir, { recursive: true }) + process.env.PATH = binDir + }, + }) + + const outputFile = path.join(tmp.path, "should-not-exist.json") + + const result = await $`bun run ${path.join(__dirname, "../../../src/index.ts")} ollama init ${outputFile} -y` + .cwd(tmp.path) + .nothrow() + .quiet() + + expect(result.exitCode).not.toBe(0) + + // Verify no config file was created + const exists = await Bun.file(outputFile).exists() + expect(exists).toBe(false) + }) + + test("fails gracefully when no models are available", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + // Create a mock ollama binary that returns no models + const binDir = path.join(dir, "bin") + await fs.mkdir(binDir, { recursive: true }) + const ollamaScript = path.join(binDir, "ollama") + + await Bun.write( + ollamaScript, + `#!/bin/bash +if [ "$1" = "list" ]; then + echo "NAME ID SIZE MODIFIED" +fi +`, + ) + await fs.chmod(ollamaScript, 0o755) + process.env.PATH = `${binDir}:${originalPath}` + }, + }) + + const outputFile = path.join(tmp.path, "no-models.json") + + const result = await $`bun run ${path.join(__dirname, "../../../src/index.ts")} ollama init ${outputFile} -y` + .cwd(tmp.path) + .nothrow() + .quiet() + + expect(result.exitCode).not.toBe(0) + + // Verify no config file was created + const exists = await Bun.file(outputFile).exists() + expect(exists).toBe(false) + }) + + test("merges with existing config file", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + // Create a mock ollama binary + const binDir = path.join(dir, "bin") + await fs.mkdir(binDir, { recursive: true }) + const ollamaScript = path.join(binDir, "ollama") + + await Bun.write( + ollamaScript, + `#!/bin/bash +if [ "$1" = "list" ]; then + echo "NAME ID SIZE MODIFIED" + echo "llama2:latest abc123 3.8 GB 2 days ago" +fi +`, + ) + await fs.chmod(ollamaScript, 0o755) + process.env.PATH = `${binDir}:${originalPath}` + + // Create an existing config file with other providers + const outputFile = path.join(dir, "existing-config.json") + await Bun.write( + outputFile, + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + model: "anthropic/claude-sonnet-4", + provider: { + anthropic: { + models: { + "claude-sonnet-4": { + name: "Claude Sonnet 4", + }, + }, + }, + }, + }), + ) + }, + }) + + const outputFile = path.join(tmp.path, "existing-config.json") + + await $`bun run ${path.join(__dirname, "../../../src/index.ts")} ollama init ${outputFile} -y`.cwd(tmp.path).quiet() + + const configContent = await Bun.file(outputFile).text() + const config = JSON.parse(configContent) + + // Verify existing anthropic config is preserved + expect(config.model).toBe("anthropic/claude-sonnet-4") + expect(config.provider.anthropic).toBeDefined() + expect(config.provider.anthropic.models["claude-sonnet-4"]).toEqual({ + name: "Claude Sonnet 4", + }) + + // Verify new ollama config was added + expect(config.provider.ollama).toBeDefined() + expect(config.provider.ollama.models["llama2:latest"]).toEqual({ name: "llama2:latest" }) + }) + + test("writes to global config when --global flag is used", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + // Create a mock ollama binary + const binDir = path.join(dir, "bin") + await fs.mkdir(binDir, { recursive: true }) + const ollamaScript = path.join(binDir, "ollama") + + await Bun.write( + ollamaScript, + `#!/bin/bash +if [ "$1" = "list" ]; then + echo "NAME ID SIZE MODIFIED" + echo "llama2:latest abc123 3.8 GB 2 days ago" +fi +`, + ) + await fs.chmod(ollamaScript, 0o755) + process.env.PATH = `${binDir}:${originalPath}` + }, + }) + + // Set XDG_CONFIG_HOME to tmp directory for testing + const originalConfigHome = process.env.XDG_CONFIG_HOME + const testConfigDir = path.join(tmp.path, ".config", "opencode") + await fs.mkdir(testConfigDir, { recursive: true }) + process.env.XDG_CONFIG_HOME = path.join(tmp.path, ".config") + + await $`bun run ${path.join(__dirname, "../../../src/index.ts")} ollama init --global -y`.cwd(tmp.path).quiet() + + // Restore original XDG_CONFIG_HOME + if (originalConfigHome) { + process.env.XDG_CONFIG_HOME = originalConfigHome + } else { + delete process.env.XDG_CONFIG_HOME + } + + // Verify config was written to global location + const globalConfigPath = path.join(testConfigDir, "opencode.json") + const exists = await Bun.file(globalConfigPath).exists() + expect(exists).toBe(true) + + const configContent = await Bun.file(globalConfigPath).text() + const config = JSON.parse(configContent) + + expect(config.provider.ollama).toBeDefined() + expect(config.provider.ollama.models["llama2:latest"]).toEqual({ name: "llama2:latest" }) + }) + + test("uses 'which' command on Unix platforms", async () => { + const { getDetectionCommand } = await import("../../../src/cli/cmd/ollama") + + expect(getDetectionCommand("linux")).toBe("which") + expect(getDetectionCommand("darwin")).toBe("which") + expect(getDetectionCommand("freebsd")).toBe("which") + expect(getDetectionCommand("openbsd")).toBe("which") + }) + + test("uses 'where' command on Windows platform", async () => { + const { getDetectionCommand } = await import("../../../src/cli/cmd/ollama") + + expect(getDetectionCommand("win32")).toBe("where") + }) +}) diff --git a/packages/web/src/content/docs/providers.mdx b/packages/web/src/content/docs/providers.mdx index 0e4539e122d..415cc9de215 100644 --- a/packages/web/src/content/docs/providers.mdx +++ b/packages/web/src/content/docs/providers.mdx @@ -996,6 +996,40 @@ To use Kimi K2 from Moonshot AI: You can configure opencode to use local models through Ollama. +#### Quick Setup + +The easiest way to set up Ollama is to use the built-in config generator: + +```bash +# Create config in current directory +opencode ollama init + +# Or install globally +opencode ollama init --global +``` + +This will automatically detect all your installed Ollama models and create an `opencode.json` configuration file. + +**Options:** + +```bash +# Write to global config (~/.config/opencode/opencode.json) +opencode ollama init -g + +# Specify custom output file +opencode ollama init my-config.json + +# Use custom Ollama URL +opencode ollama init --base-url http://192.168.1.100:11434/v1 + +# Skip confirmation prompts +opencode ollama init -y +``` + +#### Manual Configuration + +Alternatively, you can manually configure Ollama in your `opencode.json`: + ```json title="opencode.json" "ollama" {5, 6, 8, 10-14} { "$schema": "https://opencode.ai/config.json",