diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..3b6e655f --- /dev/null +++ b/.dockerignore @@ -0,0 +1,18 @@ +.git +.github +node_modules +**/node_modules +.turbo +.cache +dist +apps/*/dist +packages/*/dist +apps/lander/public +*.log +*.sqlite +*.db* +tmp +.DS_Store +.env +.env.* +docs diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000..4674b8c6 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,31 @@ +# Repository Guidelines + +## Project Structure & Module Organization +- `apps/` hosts runnable surfaces: `server` (REST + proxy), `tui` (interactive console), and `lander` (marketing site screenshot assets). +- `packages/` contains reusable modules grouped by concern: telemetry + core logic (`core`, `core-di`, `load-balancer`), IO layers (`http-api`, `proxy`, `database`), shared UI kits (`dashboard-web`, `ui-common`, `ui-constants`), and tooling (`agents`, `cli-commands`). +- Docs and product briefs live in `docs/`; configs such as `tsconfig.json` and `biome.json` sit at the root for repo-wide tooling. + +## Build, Test & Development Commands +- `bun install` installs all workspace dependencies (Bun >= 1.2.8 required). +- `bun run dev:server` hot-reloads the API/proxy; `bun run dev:dashboard` serves the React dashboard with Bun’s HMR. +- `bun run tui` launches the interactive terminal UI; `bun run ccflare` builds then starts the TUI + server bundle. +- `bun run build` orchestrates `build:dashboard`, `build:tui`, and optional `build:lander` for release artifacts. +- `bun run typecheck`, `bun run lint`, and `bun run format` gate submissions (Biome auto-formats with tabs + double quotes). + +## Coding Style & Naming Conventions +- Follow TypeScript strictness; prefer ES modules and workspace-relative imports (`@ccflare/`). +- Biome enforces tab indentation, double-quoted strings, and organized imports—run `bun run format` before commits. +- Use descriptive PascalCase for React components/Providers, camelCase for functions/instances, SCREAMING_SNAKE_CASE for env vars. + +## Testing Guidelines +- The project is migrating to Bun’s built-in test runner; place specs beside source files as `.test.ts` and target observable behavior rather than mocks. +- Until coverage targets solidify, add tests for every bug fix plus high-risk flows (load balancing, account rotation, OAuth refresh). Use `bun test` (or `bun wip --watch` once available) before pushing. + +## Commit & Pull Request Guidelines +- Match the existing Conventional Commit style (`feat:`, `fix:`, `chore:`). Scope optional but encouraged for packages (e.g., `fix(tui-core): guard null response`). +- Each PR should describe the change, include reproduction steps or screenshots for UI/TUI work, and link any GitHub issues. +- Ensure CI-critical commands (`typecheck`, `lint`, `build`) pass locally; note any skipped tests and justify in the PR description. + +## Security & Configuration Tips +- Keep sensitive credentials in the local `.env`; never commit API keys. Prefer the config modules under `packages/config` for defaults. +- When debugging proxy flows, set `ANTHROPIC_BASE_URL` and related credentials via `bun run server` env vars instead of hardcoding values. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..09837f67 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,47 @@ +# syntax=docker/dockerfile:1.7 + +ARG BUN_VERSION=1.2.8 + +FROM oven/bun:${BUN_VERSION} AS builder +WORKDIR /app + +# Copy workspace manifests first for better layer caching +COPY package.json bun.lock tsconfig.json biome.json ./ +COPY apps ./apps +COPY packages ./packages + +RUN bun install --frozen-lockfile +RUN bun run build:dashboard + +FROM oven/bun:${BUN_VERSION} AS runner +WORKDIR /app + +ENV NODE_ENV=production \ + XDG_CONFIG_HOME=/data \ + ccflare_CONFIG_PATH=/data/config/ccflare.json \ + ccflare_DB_PATH=/data/storage/ccflare.db \ + PORT=8080 + +# System dependencies for health checks +RUN apt-get update \ + && apt-get install -y --no-install-recommends curl \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /app/package.json ./package.json +COPY --from=builder /app/bun.lock ./bun.lock +COPY --from=builder /app/tsconfig.json ./tsconfig.json +COPY --from=builder /app/biome.json ./biome.json +COPY --from=builder /app/apps ./apps +COPY --from=builder /app/packages ./packages + +RUN bun install --frozen-lockfile --production + +RUN mkdir -p /data/config /data/storage + +EXPOSE 8080 +VOLUME ["/data"] + +HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ + CMD curl -fsS "http://127.0.0.1:${PORT}/api/stats" >/dev/null || exit 1 + +ENTRYPOINT ["bun", "run", "server"] diff --git a/README.md b/README.md index 44bee0df..fe9b138f 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,51 @@ bun run ccflare export ANTHROPIC_BASE_URL=http://localhost:8080 ``` +## Run with Docker + +```bash +# Build the image (ships with dashboard assets prebuilt) +docker build -t ccflare . + +# Start the server inside Docker +docker run -d \ + --name ccflare \ + -p 8080:8080 \ + -v ccflare_data:/data \ + -e PORT=8080 \ + -e LB_STRATEGY=session \ + ccflare +``` + +The container stores configuration and the SQLite database under `/data` (mapped to +`ccflare_CONFIG_PATH=/data/config/ccflare.json` and `ccflare_DB_PATH=/data/storage/ccflare.db`). +Mount a volume there (shown above) to persist settings between restarts. See +[`docs/deployment.md`](docs/deployment.md#docker-deployment) for compose examples +and advanced options (health checks, reverse proxies, custom networks, etc.). + +## Agent Workspaces & Discovery + +ccflare loads agents from Markdown files inside `.claude/agents/` folders. To keep +your container in sync with projects scattered across Linux, WSL, or Windows: + +```bash +# One-time automation: scan, capture workspaces, restart container with minimal mounts +bun run agents:setup + +# Manual scan if you want to keep the container running +bun run agents:scan -- /host /mnt/c --max-depth 8 +``` + +- The setup script stops `ccflare-dev`, launches a helper container with wide + mounts, runs the scanner, and restarts `ccflare-dev` with only the discovered + bind mounts plus `/data`, while sharing a `ccflare-workspaces` volume that + persists `/root/.ccflare/workspaces.json` between restarts. +- The dashboard now includes a **Register Workspace Paths** card (Agents tab) so + you can add absolute paths on the fly. Behind the scenes it calls + `POST /api/workspaces` (documented in [`docs/api-http.md`](docs/api-http.md#post-apiworkspaces)). +- For more examples (mount tables, environment variables, troubleshooting) see + [`docs/agent-workspaces.md`](docs/agent-workspaces.md). + ## Features ### 🎯 Intelligent Load Balancing diff --git a/apps/tui/src/main.ts b/apps/tui/src/main.ts index 4c7c89ea..46a3ef8c 100644 --- a/apps/tui/src/main.ts +++ b/apps/tui/src/main.ts @@ -182,26 +182,29 @@ Examples: return; } - if (parsed.setModel) { - const config = new Config(); - // Validate the model - const modelMap: Record = { - "opus-4": CLAUDE_MODEL_IDS.OPUS_4, - "sonnet-4": CLAUDE_MODEL_IDS.SONNET_4, - "opus-4.1": CLAUDE_MODEL_IDS.OPUS_4_1, - }; - - const fullModel = modelMap[parsed.setModel]; - if (!fullModel) { - console.error(`❌ Invalid model: ${parsed.setModel}`); - console.error("Valid models: opus-4, sonnet-4"); - process.exit(1); - } - - config.setDefaultAgentModel(fullModel); - console.log(`✅ Default agent model set to: ${fullModel}`); - return; - } + if (parsed.setModel) { + const config = new Config(); + // Validate the model + const modelMap: Record = { + "sonnet-4.5": CLAUDE_MODEL_IDS.SONNET_4_5, + "opus-4.1": CLAUDE_MODEL_IDS.OPUS_4_1, + "haiku-4.5": CLAUDE_MODEL_IDS.HAIKU_4_5, + "opus-plan": CLAUDE_MODEL_IDS.OPUS_PLAN_MODE, + }; + + const fullModel = modelMap[parsed.setModel]; + if (!fullModel) { + console.error(`❌ Invalid model: ${parsed.setModel}`); + console.error( + `Valid models: ${Object.keys(modelMap).join(", ")}`, + ); + process.exit(1); + } + + config.setDefaultAgentModel(fullModel); + console.log(`✅ Default agent model set to: ${fullModel}`); + return; + } // Default: Launch interactive TUI with auto-started server const config = new Config(); diff --git a/bun.lock b/bun.lock index c06a75ea..b1b1f953 100644 --- a/bun.lock +++ b/bun.lock @@ -105,7 +105,10 @@ "name": "@ccflare/dashboard-web", "version": "1.0.0", "dependencies": { + "@ccflare/core": "workspace:*", "@ccflare/errors": "workspace:*", + "@ccflare/http-common": "workspace:*", + "@ccflare/types": "workspace:*", "@ccflare/ui-common": "workspace:*", "@ccflare/ui-constants": "workspace:*", "@radix-ui/react-dialog": "^1.1.14", @@ -698,7 +701,7 @@ "yoga-layout": ["yoga-layout@3.2.1", "", {}, "sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ=="], - "@ccflare/dashboard-web/@types/bun": ["@types/bun@1.2.20", "", { "dependencies": { "bun-types": "1.2.20" } }, "sha512-dX3RGzQ8+KgmMw7CsW4xT5ITBSCrSbfHc36SNT31EOUg/LA9JWq0VDdEXDRSe1InVWpd2yLUM1FUF/kEOyTzYA=="], + "@ccflare/dashboard-web/@types/bun": ["@types/bun@1.3.1", "", { "dependencies": { "bun-types": "1.3.1" } }, "sha512-4jNMk2/K9YJtfqwoAa28c8wK+T7nvJFOjxI4h/7sORWcypRNxBpr+TPNaCfVWq70tLCJsqoFwcf0oI0JU/fvMQ=="], "@ccflare/errors/@types/bun": ["@types/bun@1.1.15", "", { "dependencies": { "bun-types": "1.1.42" } }, "sha512-Fi7ND1jCq8O5iU3s9z3TKHggD0hidgpe7wSxyisviXpbMmY4B1KiokF3f/mmjOoDrEcf873tSpixgen7Wm9X0g=="], @@ -720,7 +723,7 @@ "slice-ansi/is-fullwidth-code-point": ["is-fullwidth-code-point@5.0.0", "", { "dependencies": { "get-east-asian-width": "^1.0.0" } }, "sha512-OVa3u9kkBbw7b8Xw5F9P+D/T9X+Z4+JruYVNapTjPYZYUznQ5YfWeFkOj606XYYW8yugTfC8Pj0hYqvi4ryAhA=="], - "@ccflare/dashboard-web/@types/bun/bun-types": ["bun-types@1.2.20", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-pxTnQYOrKvdOwyiyd/7sMt9yFOenN004Y6O4lCcCUoKVej48FS5cvTw9geRaEcB9TsDZaJKAxPTVvi8tFsVuXA=="], + "@ccflare/dashboard-web/@types/bun/bun-types": ["bun-types@1.3.1", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-NMrcy7smratanWJ2mMXdpatalovtxVggkj11bScuWuiOoXTiKIu2eVS1/7qbyI/4yHedtsn175n4Sm4JcdHLXw=="], "@ccflare/errors/@types/bun/bun-types": ["bun-types@1.1.42", "", { "dependencies": { "@types/node": "~20.12.8", "@types/ws": "~8.5.10" } }, "sha512-beMbnFqWbbBQHll/bn3phSwmoOQmnX2nt8NI9iOQKFbgR5Z6rlH3YuaMdlid8vp5XGct3/W4QVQBmhoOEoe4nw=="], diff --git a/docs/agent-workspaces.md b/docs/agent-workspaces.md new file mode 100644 index 00000000..9f8c3d07 --- /dev/null +++ b/docs/agent-workspaces.md @@ -0,0 +1,81 @@ +# Agent Workspace Discovery + +ccflare automatically surfaces agents that live in Markdown files under `.claude/agents/` folders. This guide covers every way to point the proxy at new projects—across Linux, WSL, and Windows—and how to keep the container mounts in sync. + +## Default Discovery Flow + +1. **Scan for `.claude/agents` directories.** The `packages/agents` scanner crawls the filesystem and writes the results to `~/.ccflare/workspaces.json`. +2. **Persist the workspace list.** Each entry stores the absolute path, a short name, and `lastSeen` timestamp. +3. **Load agents.** The dashboard/API reads every Markdown file and merges metadata with any per-agent model preferences stored in the database. + +You can trigger the scan manually (`bun run agents:scan`) or run the automation script described below. + +## Manual Scan (`bun run agents:scan`) + +``` +bun run agents:scan -- /host /mnt/c --max-depth 8 +``` + +- Without arguments the scanner walks the current directory, your home folder, and a platform-aware list of defaults (`/workspaces`, `/host`, `/mnt/`, `/host_mnt/`, etc.). +- Pass custom roots via CLI arguments or `AGENT_SCAN_ROOTS="/host,/mnt/c"`. Use `AGENT_SCAN_EXTRA_ROOTS` to append to the defaults. +- `AGENT_SCAN_MAX_DEPTH` controls recursion depth (default `8`). Set `AGENT_SCAN_INCLUDE_ROOT=true` if you truly want to traverse `/`. +- Windows paths such as `C:\Projects\Repo` are automatically normalized to `/mnt/c/Projects/Repo` when the scanner runs inside WSL/Linux. + +The command logs every discovered `.claude/agents` directory plus any warnings (e.g., unsupported `model: inherit`). + +## Automated Setup (`bun run agents:setup`) + +The orchestration script handles the full workflow: + +1. Stops the current `ccflare-dev` container and any previous helper containers. +2. Launches a temporary `ccflare-dev-scan` container with wide mounts (`/` → `/host`, `/mnt/*`). +3. Runs the scanner (`bun run agents:scan --max-depth 8 /host /mnt/c`). +4. Reads `~/.ccflare/workspaces.json` from the helper container. +5. Tears down the helper and restarts `ccflare-dev` with bind mounts only for the discovered directories plus `/data`. + +```bash +bun run agents:setup +``` + +Environment variables: + +| Variable | Description | +| --- | --- | +| `CCFLARE_CONTAINER` | Name for the runtime container (`ccflare-dev` by default). | +| `CCFLARE_IMAGE` | Image tag to run (`ccflare:latest`). | +| `CCFLARE_DATA_VOLUME` | Data volume name (`ccflare-data`). | +| `CCFLARE_WORKSPACES_VOLUME` | Named volume that persists `/root/.ccflare/workspaces.json` (`ccflare-workspaces`). | +| `AGENT_SCAN_ROOTS` | Extra roots to mount during the scan (comma/semicolon/newline separated). | +| `AGENT_SCAN_MAX_DEPTH` | Overrides traversal depth. | + +If no workspaces are found the script falls back to the wide mounts so you can diagnose manually. + +## Mounting Host Paths + +The scanner (and later the server) only sees directories that are mounted into the container. Examples: + +| Host | Sample `docker run` mounts | +| --- | --- | +| Native Linux | `-v /:/host` plus more targeted paths (`-v /srv/projects:/srv/projects`). | +| WSL2 | `-v /:/host -v /mnt/c:/mnt/c` so Windows drives are visible as `/mnt/c`. | +| Windows (Docker Desktop) | `-v C:\\Users\\me\\agents:/windows/agents` for each shared directory. | + +After adjusting mounts, rerun `bun run agents:setup` so the helper container refreshes both the bind mounts and the persisted `/root/.ccflare/workspaces.json` living inside the `ccflare-workspaces` volume. If you prefer to keep the container running, call `bun run agents:scan` followed by `POST /api/workspaces` (see below). + +## Registering Paths from the Dashboard/API + +- **Dashboard:** In the **Agents → Register Workspace Paths** card, paste absolute paths (e.g., `/opt/projects/app`, `/mnt/c/Users/me/tooling`). Paths are normalized and sent to the API; success/errors are shown inline. +- **API:** + ```bash + curl -X POST http://localhost:8080/api/workspaces \ + -H "Content-Type: application/json" \ + -d '{"paths":["/opt/projects/app","/mnt/c/Users/me/tooling"]}' + ``` + The handler checks each path, registers valid ones, and returns counts for `added`, `updated`, `skipped`, plus any `invalidPaths`. + +## Common Warnings + +- **“invalid model: inherit”** – The agent file specifies a shorthand model name that ccflare doesn’t recognize. The default agent model (configurable in the dashboard/API) is used instead. +- **Duplicate agents** – Multiple mounts often point to the same repo (e.g., `/mnt/c/...` and Docker Desktop’s bind-mount mirrors). ccflare logs the duplicates but keeps the first copy; no action is required unless you want to prune redundant mounts. + +Refer to [`docs/deployment.md`](docs/deployment.md#host-mounts-for-agent-discovery-linux-windows-wsl) for more mount examples and [`docs/api-http.md`](docs/api-http.md#post-apiworkspaces) for the full workspace API reference. diff --git a/docs/api-http.md b/docs/api-http.md index 20615b07..c1082d2a 100644 --- a/docs/api-http.md +++ b/docs/api-http.md @@ -686,6 +686,65 @@ List all available workspaces with agent counts. curl http://localhost:8080/api/workspaces ``` +#### POST /api/workspaces + +Register one or more workspace paths so ccflare can load agents from them. + +**Request:** +```json +{ + "paths": ["/opt/projects/my-app", "/mnt/c/Users/me/project"] +} +``` + +**Response:** +```json +{ + "success": true, + "added": 2, + "updated": 0, + "skipped": 0, + "invalidPaths": [], + "workspaces": [ + { + "name": "my-app", + "path": "/opt/projects/my-app", + "agentCount": 6 + } + ] +} +``` + +**Notes:** + +- Paths must exist inside the running container. Use `bun run agents:setup` (or add `-v` mounts manually) before calling this endpoint. +- You can send a single string with `path` or an array with `paths`. +- Duplicate entries simply refresh the `lastSeen` timestamp; they do not re-import files. + +#### One-Time Workspace Scan + +If you need to bulk-register every `.claude/agents` directory that already exists on disk (e.g., host-mounted Windows drives), run the bundled scanner once: + +```bash +bun run agents:scan -- /host /mnt/c +``` + +- With no arguments the scanner walks the current directory, your home directory, and a platform-aware list of defaults: on Linux/WSL it checks `/workspaces`, `/host`, `/data`, `/mnt/`, `/host_mnt/`, etc.; on Windows it automatically iterates every mounted drive letter (`C:\`, `D:\`, …) plus `%USERPROFILE%`. +- Pass custom roots explicitly (`bun run agents:scan -- /host /mnt/c /data/shared`) or set `AGENT_SCAN_ROOTS="/host,/mnt/c"` (comma/semicolon/newline separated). Use `AGENT_SCAN_EXTRA_ROOTS` to append to the defaults without replacing them. +- Windows-style paths such as `C:\Projects\Repo` are accepted everywhere; on Linux/WSL the scanner transparently normalizes them to `/mnt/c/Projects/Repo`. +- Control traversal fan-out via `--max-depth 10` or `AGENT_SCAN_MAX_DEPTH=10`. Set `AGENT_SCAN_INCLUDE_ROOT=true` if you truly want to walk `/`. +- Every discovered workspace is persisted to `~/.ccflare/workspaces.json`, so the dashboard immediately shows the agents after a single scan (no server restart required). + +Example commands: + +```powershell +# PowerShell on Windows – scan C: and D: +bun run agents:scan -- "C:\Users\me" "D:\labs" --max-depth 6 + +# WSL / Docker container with Windows drives mounted under /mnt +AGENT_SCAN_ROOTS="/host,/mnt/c" bun run agents:scan --max-depth 8 +``` + --- ### Logs @@ -867,4 +926,4 @@ The following strategy is available: 7. **Rate Limit Tracking**: Rate limit information is automatically extracted from responses and stored for each account, including reset times and remaining requests. -8. **Provider Filtering**: Accounts are automatically filtered by provider when selecting for requests, ensuring compatibility. \ No newline at end of file +8. **Provider Filtering**: Accounts are automatically filtered by provider when selecting for requests, ensuring compatibility. diff --git a/docs/deployment.md b/docs/deployment.md index 0d4be488..5febdf48 100644 --- a/docs/deployment.md +++ b/docs/deployment.md @@ -264,64 +264,92 @@ sudo systemctl start ccflare ## Docker Deployment -> **Important**: Docker files are not included in the repository. The configurations below are examples/templates that you can use as a starting point for creating your own Docker deployment. +ccflare now ships with a production-ready [`Dockerfile`](../Dockerfile) that runs the server inside +Docker without any manual wiring. The multi-stage build installs all workspace dependencies with +Bun 1.2.8, precompiles the React dashboard (`bun run build:dashboard`), and provisions a +runtime image that: -### Example Dockerfile +- exposes port 8080 via `bun run server` +- keeps configuration + SQLite data under `/data` by setting `ccflare_CONFIG_PATH=/data/config/ccflare.json` + and `ccflare_DB_PATH=/data/storage/ccflare.db` +- installs `curl` for the built-in Docker `HEALTHCHECK` (hits `http://127.0.0.1:${PORT}/api/stats`) +- declares `/data` as a volume so you can persist config when the container restarts. -```dockerfile -# Multi-stage build for optimal size -FROM oven/bun:1 AS builder +### Build the image -WORKDIR /app +```bash +docker build -t ccflare:latest . +``` -# Copy package files -COPY package.json bun.lockb ./ -COPY apps/ ./apps/ -COPY packages/ ./packages/ -COPY tsconfig.json ./ +### Run the container -# Install dependencies and build -RUN bun install --frozen-lockfile -RUN bun run build -RUN cd apps/server && bun build src/server.ts --compile --outfile dist/ccflare-server -RUN cd apps/cli && bun build src/cli.ts --compile --outfile dist/ccflare-cli +```bash +docker run -d \ + --name ccflare \ + -p 8080:8080 \ + -v $(pwd)/ccflare-data:/data \ + -e PORT=8080 \ + -e LB_STRATEGY=session \ + -e LOG_LEVEL=INFO \ + -e LOG_FORMAT=json \ + ccflare:latest +``` -# Runtime stage -FROM debian:bookworm-slim +The bind mount above creates `./ccflare-data` on the host so both the config file and SQLite +database survive upgrades. You can override the defaults by setting `ccflare_CONFIG_PATH` or +`ccflare_DB_PATH` to alternate locations, but `/data` will work out-of-the-box for most setups. -# Install runtime dependencies -RUN apt-get update && apt-get install -y \ - ca-certificates \ - && rm -rf /var/lib/apt/lists/* +### Host mounts for agent discovery (Linux, Windows, WSL) -# Create user -RUN useradd -r -s /bin/false ccflare +The agent scanner can only see paths that are mounted into the container. Use the pattern below that matches your environment, then run `docker exec ccflare bun run agents:scan`. -# Copy binary and dashboard -COPY --from=builder /app/apps/tui/dist/ccflare /usr/local/bin/ccflare -COPY --from=builder /app/packages/dashboard-web/dist /opt/ccflare/dashboard +| Host OS | Example `docker run` snippet | Notes | +|---------|-----------------------------|-------| +| Native Linux | `-v /:/host` | Exposes the full filesystem under `/host`; combine with more targeted mounts if you prefer (`-v /srv/workspaces:/workspaces`). | +| WSL2 (Docker Desktop) | `-v /:/host -v /mnt/c:/mnt/c` | `/mnt/c` maps to Windows’ `C:` drive so the scanner can normalize `C:\` paths. Add other drives (`/mnt/d`) as needed. | +| Windows (PowerShell) | `-v C:\\Users\\me\\agents:/windows/agents` | Docker Desktop for Windows can only mount directories you’ve shared; bind whichever folders contain `.claude/agents`. | -# Set permissions -RUN chmod +x /usr/local/bin/ccflare +After the container is running with the appropriate mounts: -# Create data directories -RUN mkdir -p /data /config && chown -R ccflare:ccflare /data /config +```bash +docker exec ccflare bun run agents:scan -- /host /mnt/c +``` -USER ccflare +Repeat the scan whenever you add new workspaces on the host. -# Environment -ENV PORT=8080 -ENV ccflare_CONFIG_PATH=/config/ccflare.json +#### Automated setup -EXPOSE 8080 +Instead of wiring these steps manually, run the orchestration script: -VOLUME ["/data", "/config"] +```bash +bun run agents:setup +``` -HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ - CMD ["/usr/local/bin/ccflare-server", "health"] || exit 1 +The script performs the following: -ENTRYPOINT ["/usr/local/bin/ccflare", "--serve"] -``` +1. Stops any existing `ccflare-dev` container. +2. Launches a temporary helper container with wide mounts (`/` → `/host`, `/mnt/*`). +3. Executes the agent scanner to discover every `.claude/agents` directory. +4. Reads the generated `~/.ccflare/workspaces.json` file (persisted inside the `ccflare-workspaces` volume) to determine the exact paths. +5. Restarts `ccflare-dev` with mounts only for the discovered workspaces, the `/data` volume, and the shared `/root/.ccflare` volume so the registry is immediately available on boot. + +If no workspaces are found, the script falls back to the wide mounts so you can run the scanner manually later. You can customize the behavior with environment variables: + +- `CCFLARE_CONTAINER`, `CCFLARE_IMAGE`, `CCFLARE_DATA_VOLUME` – override defaults. +- `CCFLARE_WORKSPACES_VOLUME` – change the named volume that backs `/root/.ccflare` (defaults to `ccflare-workspaces`). +- `AGENT_SCAN_ROOTS` – comma/semicolon separated list of additional roots to mount during the scan. +- `AGENT_SCAN_MAX_DEPTH` – change traversal depth (default `8`). + +### Runtime configuration + +All server environment variables continue to work in Docker. Common overrides include: + +- `PORT` – external HTTP port (remember to update `docker run -p` as well) +- `LB_STRATEGY` – currently `session` +- `LOG_LEVEL` / `LOG_FORMAT` – tune logging verbosity and output style +- `CLIENT_ID`, `SESSION_DURATION_MS`, `RETRY_*` – fine-tune runtime behavior +- `DEFAULT_AGENT_MODEL`, `DATA_RETENTION_DAYS`, `REQUEST_RETENTION_DAYS` – proxy defaults +- `ccflare_CONFIG_PATH` / `ccflare_DB_PATH` – explicit paths if you do not want to use `/data` ### Example Docker Compose @@ -331,29 +359,23 @@ version: '3.8' services: ccflare: build: . + image: ccflare:latest container_name: ccflare restart: unless-stopped ports: - "8080:8080" environment: - - PORT=8080 - - LB_STRATEGY=session - - LOG_LEVEL=INFO - - LOG_FORMAT=json - - CLIENT_ID=9d1c250a-e61b-44d9-88ed-5944d1962f5e - - SESSION_DURATION_MS=18000000 - - RETRY_ATTEMPTS=3 - - RETRY_DELAY_MS=1000 - - RETRY_BACKOFF=2 + PORT: 8080 + LB_STRATEGY: session + LOG_LEVEL: INFO + LOG_FORMAT: json + CLIENT_ID: 9d1c250a-e61b-44d9-88ed-5944d1962f5e + SESSION_DURATION_MS: 18000000 + RETRY_ATTEMPTS: 3 + RETRY_DELAY_MS: 1000 + RETRY_BACKOFF: 2 volumes: - - ./data:/data - - ./config:/config - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8080/health"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 40s + - ccflare_data:/data networks: - ccflare-net @@ -373,15 +395,22 @@ services: networks: - ccflare-net +volumes: + ccflare_data: + networks: ccflare-net: driver: bridge ``` +> **Tip**: The Dockerfile already defines a health check that uses `curl` to hit `/api/stats`. +> Compose/Swarm users can rely on that check or provide their own `healthcheck` stanza if they +> need a different endpoint. + ### Building and Running ```bash -# Build the Docker image +# Build the Docker image (already configured in the repo) docker build -t ccflare:latest . # Run with Docker @@ -389,14 +418,12 @@ docker run -d \ --name ccflare \ -p 8080:8080 \ -v $(pwd)/data:/data \ - -v $(pwd)/config:/config \ -e LB_STRATEGY=session \ ccflare:latest # Or use Docker Compose docker-compose up -d ``` - ## Cloudflare Pages (Dashboard Only) Deploy the dashboard as a static site on Cloudflare Pages while running the API server elsewhere: @@ -1297,4 +1324,4 @@ export TUI_THEME=dark # dark or light export TUI_COMPACT_MODE=false # Compact display ``` -For support and updates, check the project repository and documentation. \ No newline at end of file +For support and updates, check the project repository and documentation. diff --git a/package.json b/package.json index 38293e23..c39877a4 100644 --- a/package.json +++ b/package.json @@ -19,7 +19,9 @@ "build:tui": "bun run --cwd apps/tui build", "build:lander": "bun run --cwd apps/lander build", "format": "bunx biome format --write .", - "lint": "bunx --bun biome check --write --unsafe ." + "lint": "bunx --bun biome check --write --unsafe .", + "agents:scan": "bun packages/agents/src/scripts/scan-workspaces.ts", + "agents:setup": "bun scripts/setup-agents.ts" }, "engines": { "bun": ">=1.2.8" diff --git a/packages/agents/src/discovery.ts b/packages/agents/src/discovery.ts index a685c814..934af6fe 100644 --- a/packages/agents/src/discovery.ts +++ b/packages/agents/src/discovery.ts @@ -2,6 +2,7 @@ import { existsSync } from "node:fs"; import { readdir, readFile, writeFile } from "node:fs/promises"; import { basename, join, resolve } from "node:path"; import { Config } from "@ccflare/config"; +import { CLAUDE_MODEL_IDS } from "@ccflare/core"; import { Logger } from "@ccflare/logger"; import { type Agent, @@ -23,6 +24,13 @@ const DEFAULT_COLOR = "gray"; const log = new Logger("AgentRegistry"); +const MODEL_ALIASES: Record = { + opus: CLAUDE_MODEL_IDS.OPUS_4_1, + "opus-plan": CLAUDE_MODEL_IDS.OPUS_PLAN_MODE, + sonnet: CLAUDE_MODEL_IDS.SONNET_4_5, + haiku: CLAUDE_MODEL_IDS.HAIKU_4_5, +}; + export class AgentRegistry { private cache: AgentCache | null = null; private workspaces: Map = new Map(); @@ -123,10 +131,9 @@ export class AgentRegistry { // Handle shorthand model names if (data.model) { const modelLower = data.model.toLowerCase(); - if (modelLower === "opus") { - model = ALLOWED_MODELS[0]; // claude-opus-4-20250514 - } else if (modelLower === "sonnet") { - model = ALLOWED_MODELS[1]; // claude-sonnet-4-20250514 + const aliasMatch = MODEL_ALIASES[modelLower]; + if (aliasMatch) { + model = aliasMatch; } else if (this.isValidModel(data.model)) { model = data.model as AllowedModel; } else { @@ -292,37 +299,61 @@ export class AgentRegistry { // Register a workspace async registerWorkspace(workspacePath: string): Promise { - const normalizedPath = resolve(workspacePath); - - // Check if this workspace is already registered - if (this.workspaces.has(normalizedPath)) { - // Update last seen time - const workspace = this.workspaces.get(normalizedPath); - if (workspace) { - workspace.lastSeen = Date.now(); + await this.registerWorkspacesBulk([workspacePath]); + } + + async registerWorkspacesBulk(workspacePaths: string[]): Promise<{ + added: number; + updated: number; + skipped: number; + }> { + const normalizedNow = Date.now(); + let added = 0; + let updated = 0; + let skipped = 0; + const uniquePaths = new Set(); + + for (const rawPath of workspacePaths) { + if (!rawPath) { + skipped++; + continue; + } + + const normalizedPath = resolve(rawPath); + if (uniquePaths.has(normalizedPath)) { + continue; + } + uniquePaths.add(normalizedPath); + + if (this.workspaces.has(normalizedPath)) { + const workspace = this.workspaces.get(normalizedPath); + if (workspace) { + workspace.lastSeen = normalizedNow; + updated++; + } + continue; } - return; - } - // Extract workspace name from path - const pathParts = normalizedPath.split("/"); - const workspaceName = pathParts[pathParts.length - 1] || "workspace"; + const pathParts = normalizedPath.split("/"); + const workspaceName = pathParts[pathParts.length - 1] || "workspace"; - // Create new workspace entry - const workspace: AgentWorkspace = { - path: normalizedPath, - name: workspaceName, - lastSeen: Date.now(), - }; + const workspace: AgentWorkspace = { + path: normalizedPath, + name: workspaceName, + lastSeen: normalizedNow, + }; - this.workspaces.set(normalizedPath, workspace); - log.info(`Registered workspace: ${workspaceName} at ${normalizedPath}`); + this.workspaces.set(normalizedPath, workspace); + log.info(`Registered workspace: ${workspaceName} at ${normalizedPath}`); + added++; + } - // Save workspaces to disk - await this.saveWorkspaces(); + if (added > 0) { + await this.saveWorkspaces(); + await this.refresh(); + } - // Refresh to load agents from the new workspace - await this.refresh(); + return { added, updated, skipped }; } // Get current workspaces diff --git a/packages/agents/src/scripts/scan-workspaces.ts b/packages/agents/src/scripts/scan-workspaces.ts new file mode 100644 index 00000000..072408a1 --- /dev/null +++ b/packages/agents/src/scripts/scan-workspaces.ts @@ -0,0 +1,313 @@ +#!/usr/bin/env bun +import { existsSync } from "node:fs"; +import { readdir, stat } from "node:fs/promises"; +import { homedir } from "node:os"; +import { join, resolve } from "node:path"; +import { Logger } from "@ccflare/logger"; +import { agentRegistry } from "../discovery"; + +interface ScanOptions { + roots: string[]; + maxDepth: number; +} + +interface QueueEntry { + dir: string; + depth: number; +} + +const log = new Logger("AgentWorkspaceScanner"); +const DEFAULT_MAX_DEPTH = Number(process.env.AGENT_SCAN_MAX_DEPTH ?? 8); +const isWindows = process.platform === "win32"; + +const SKIP_DIR_NAMES = new Set( + [ + "node_modules", + ".git", + ".hg", + ".svn", + ".cache", + ".ccflare", + ".config", + ".vscode", + ".idea", + ".Trash", + "__pycache__", + "venv", + ".venv", + "Library", + "System Volume Information", + "$Recycle.Bin", + "ProgramData", + "Program Files", + "Program Files (x86)", + ].map((name) => name.toLowerCase()), +); + +const SKIP_ABSOLUTE_PREFIXES = [ + "/proc", + "/sys", + "/dev", + "/run", + "/var/lib/docker", + "/var/lib/containerd", + "/var/lib/snapd", + "/var/log", +]; + +function splitRootsInput(value: string | undefined): string[] { + if (!value) return []; + return value + .split(/[,;\n\r]+/) + .map((root) => root.trim()) + .filter(Boolean); +} + +function normalizeInputPath(rawPath: string): string { + const trimmed = rawPath.trim(); + if (!trimmed) return ""; + + const windowsDrivePattern = /^([a-zA-Z]):(?:[\\/](.*))?$/; + const match = trimmed.match(windowsDrivePattern); + if (match && !isWindows) { + const drive = match[1].toLowerCase(); + const rest = match[2]?.replace(/\\/g, "/").replace(/^\//, "") ?? ""; + return rest ? `/mnt/${drive}/${rest}` : `/mnt/${drive}`; + } + + return trimmed; +} + +function collectWindowsDriveRoots(): string[] { + const roots = new Set(); + const driveLetters = "cdefghijklmnopqrstuvwxyz"; + for (const letter of driveLetters) { + const candidate = `${letter.toUpperCase()}:\\`; + if (existsSync(candidate)) { + roots.add(candidate); + } + } + if (process.env.HOMEDRIVE && process.env.HOMEPATH) { + const home = `${process.env.HOMEDRIVE}${process.env.HOMEPATH}`; + if (existsSync(home)) { + roots.add(home); + } + } + if (process.env.USERPROFILE && existsSync(process.env.USERPROFILE)) { + roots.add(process.env.USERPROFILE); + } + return Array.from(roots); +} + +function parseArgs(): ScanOptions { + const args = process.argv.slice(2); + const roots: string[] = []; + let maxDepth = DEFAULT_MAX_DEPTH; + + for (let i = 0; i < args.length; i++) { + const arg = args[i]; + if (arg === "--max-depth" && args[i + 1]) { + maxDepth = Number(args[i + 1]) || DEFAULT_MAX_DEPTH; + i++; + continue; + } + + if (arg.startsWith("--max-depth=")) { + const [, depthValue] = arg.split("="); + maxDepth = Number(depthValue) || DEFAULT_MAX_DEPTH; + continue; + } + + roots.push(normalizeInputPath(arg)); + } + + const envRoots = splitRootsInput(process.env.AGENT_SCAN_ROOTS).map((root) => + normalizeInputPath(root), + ); + + const extraRoots = splitRootsInput(process.env.AGENT_SCAN_EXTRA_ROOTS).map( + (root) => normalizeInputPath(root), + ); + + const baseRoots = + roots.length > 0 + ? roots + : envRoots.length > 0 + ? envRoots + : getDefaultRoots(); + + const combinedRoots = [...baseRoots, ...extraRoots].filter(Boolean); + const resolvedRoots = Array.from( + new Set( + combinedRoots + .map((root) => normalizeInputPath(root)) + .map((root) => resolve(root)), + ), + ); + + if (resolvedRoots.length === 0) { + resolvedRoots.push(homedir()); + } + + return { roots: resolvedRoots, maxDepth }; +} + +function getDefaultRoots(): string[] { + const defaults = new Set(); + const cwd = process.cwd(); + if (cwd) defaults.add(cwd); + defaults.add(homedir()); + + if (isWindows) { + for (const drive of collectWindowsDriveRoots()) { + defaults.add(drive); + } + } else { + const linuxCandidates = new Set([ + "/workspaces", + "/workspace", + "/workdir", + "/host", + "/host_mnt", + "/data", + "/opt", + ]); + + const driveLetters = "cdefghijklmnopqrstuvwxyz"; + for (const letter of driveLetters) { + linuxCandidates.add(`/mnt/${letter}`); + linuxCandidates.add(`/host_mnt/${letter}`); + } + + for (const candidate of linuxCandidates) { + if (existsSync(candidate)) { + defaults.add(candidate); + } + } + } + + if ( + process.env.AGENT_SCAN_INCLUDE_ROOT === "true" && + !isWindows && + existsSync("/") + ) { + defaults.add("/"); + } + + return Array.from(defaults); +} + +function shouldSkipAbsolute(path: string): boolean { + if (path === "/") return false; + return SKIP_ABSOLUTE_PREFIXES.some( + (prefix) => path === prefix || path.startsWith(`${prefix}/`), + ); +} + +function shouldSkipName(name: string): boolean { + return SKIP_DIR_NAMES.has(name.toLowerCase()); +} + +async function discoverWorkspaces( + roots: string[], + maxDepth: number, +): Promise { + const found = new Set(); + const visited = new Set(); + const queue: QueueEntry[] = []; + + for (const root of roots) { + if (!existsSync(root)) { + log.warn(`Skipping missing root ${root}`); + continue; + } + queue.push({ dir: resolve(root), depth: 0 }); + } + + while (queue.length > 0) { + const current = queue.pop(); + if (!current) break; + const dir = resolve(current.dir); + const depth = current.depth; + + if (visited.has(dir)) continue; + visited.add(dir); + + if (shouldSkipAbsolute(dir)) continue; + if (depth > maxDepth) continue; + + let entries: Awaited>; + try { + entries = await readdir(dir, { withFileTypes: true }); + } catch (error) { + log.debug(`Cannot read ${dir}: ${String(error)}`); + continue; + } + + for (const entry of entries) { + if (!entry.isDirectory() || entry.isSymbolicLink()) { + continue; + } + + const entryPath = join(dir, entry.name); + + if (entry.name === ".claude") { + const agentsPath = join(entryPath, "agents"); + try { + const stats = await stat(agentsPath); + if (stats.isDirectory()) { + const workspacePath = resolve(dir); + found.add(workspacePath); + log.info(`Found agents directory at ${agentsPath}`); + } + } catch (error) { + log.debug( + `Failed to inspect potential agents directory ${agentsPath}: ${String(error)}`, + ); + } + // Always skip descending into `.claude` directories to avoid extra work + continue; + } + + if (shouldSkipName(entry.name)) { + continue; + } + + queue.push({ dir: entryPath, depth: depth + 1 }); + } + } + + return Array.from(found); +} + +async function main() { + const { roots, maxDepth } = parseArgs(); + log.info( + `Scanning ${roots.length} root${roots.length === 1 ? "" : "s"} up to depth ${maxDepth}`, + ); + + const workspaces = await discoverWorkspaces(roots, maxDepth); + + if (workspaces.length === 0) { + log.info("No .claude/agents directories found."); + return; + } + + log.info( + `Discovered ${workspaces.length} workspace${workspaces.length === 1 ? "" : "s"}.`, + ); + const result = await agentRegistry.registerWorkspacesBulk(workspaces); + log.info( + `Registered ${result.added} new workspace${ + result.added === 1 ? "" : "s" + } (updated ${result.updated}, skipped ${result.skipped}).`, + ); + + const registered = agentRegistry.getWorkspaces(); + log.info(`Total registered workspaces: ${registered.length}`); +} + +main().catch((error) => { + log.error("Agent workspace scan failed", error); + process.exitCode = 1; +}); diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts index 61d402d4..4f9886af 100644 --- a/packages/core/src/models.ts +++ b/packages/core/src/models.ts @@ -5,45 +5,31 @@ // Full model IDs as used by the Anthropic API export const CLAUDE_MODEL_IDS = { - // Claude 3.5 models - HAIKU_3_5: "claude-3-5-haiku-20241022", - SONNET_3_5: "claude-3-5-sonnet-20241022", - - // Claude 4 models - SONNET_4: "claude-sonnet-4-20250514", - OPUS_4: "claude-opus-4-20250514", - OPUS_4_1: "claude-opus-4-1-20250805", - - // Legacy Claude 3 models (for documentation/API examples) - OPUS_3: "claude-3-opus-20240229", - SONNET_3: "claude-3-sonnet-20240229", + HAIKU_4_5: "claude-4-5-haiku-20250107", + SONNET_4_5: "claude-sonnet-4-5-20250107", + OPUS_4_1: "claude-opus-4-1-20250805", + OPUS_PLAN_MODE: "claude-opus-plan-mode-20250805", } as const; // Model display names export const MODEL_DISPLAY_NAMES: Record = { - [CLAUDE_MODEL_IDS.HAIKU_3_5]: "Claude Haiku 3.5", - [CLAUDE_MODEL_IDS.SONNET_3_5]: "Claude Sonnet 3.5 v2", - [CLAUDE_MODEL_IDS.SONNET_4]: "Claude Sonnet 4", - [CLAUDE_MODEL_IDS.OPUS_4]: "Claude Opus 4", - [CLAUDE_MODEL_IDS.OPUS_4_1]: "Claude Opus 4.1", - [CLAUDE_MODEL_IDS.OPUS_3]: "Claude Opus 3", - [CLAUDE_MODEL_IDS.SONNET_3]: "Claude Sonnet 3", + [CLAUDE_MODEL_IDS.HAIKU_4_5]: "Claude Haiku 4.5", + [CLAUDE_MODEL_IDS.SONNET_4_5]: "Claude Sonnet 4.5", + [CLAUDE_MODEL_IDS.OPUS_4_1]: "Claude Opus 4.1", + [CLAUDE_MODEL_IDS.OPUS_PLAN_MODE]: "Claude Opus Plan Mode", }; // Short model names used in UI (for color mapping, etc.) export const MODEL_SHORT_NAMES: Record = { - [CLAUDE_MODEL_IDS.HAIKU_3_5]: "claude-3.5-haiku", - [CLAUDE_MODEL_IDS.SONNET_3_5]: "claude-3.5-sonnet", - [CLAUDE_MODEL_IDS.SONNET_4]: "claude-sonnet-4", - [CLAUDE_MODEL_IDS.OPUS_4]: "claude-opus-4", - [CLAUDE_MODEL_IDS.OPUS_4_1]: "claude-opus-4.1", - [CLAUDE_MODEL_IDS.OPUS_3]: "claude-3-opus", - [CLAUDE_MODEL_IDS.SONNET_3]: "claude-3-sonnet", + [CLAUDE_MODEL_IDS.HAIKU_4_5]: "claude-4.5-haiku", + [CLAUDE_MODEL_IDS.SONNET_4_5]: "claude-4.5-sonnet", + [CLAUDE_MODEL_IDS.OPUS_4_1]: "claude-opus-4.1", + [CLAUDE_MODEL_IDS.OPUS_PLAN_MODE]: "claude-opus-plan", }; // Default model for various contexts -export const DEFAULT_MODEL = CLAUDE_MODEL_IDS.SONNET_4; -export const DEFAULT_AGENT_MODEL = CLAUDE_MODEL_IDS.SONNET_4; +export const DEFAULT_MODEL = CLAUDE_MODEL_IDS.SONNET_4_5; +export const DEFAULT_AGENT_MODEL = CLAUDE_MODEL_IDS.SONNET_4_5; // Type for all valid model IDs export type ClaudeModelId = diff --git a/packages/core/src/pricing.ts b/packages/core/src/pricing.ts index cfe7f75b..570e1789 100644 --- a/packages/core/src/pricing.ts +++ b/packages/core/src/pricing.ts @@ -34,60 +34,50 @@ interface ApiResponse { // Bundled fallback pricing for Anthropic models (dollars per 1M tokens) const BUNDLED_PRICING: ApiResponse = { - anthropic: { - models: { - [CLAUDE_MODEL_IDS.HAIKU_3_5]: { - id: CLAUDE_MODEL_IDS.HAIKU_3_5, - name: MODEL_DISPLAY_NAMES[CLAUDE_MODEL_IDS.HAIKU_3_5], - cost: { - input: 0.8, - output: 4, - cache_read: 0.08, - cache_write: 1, - }, - }, - [CLAUDE_MODEL_IDS.SONNET_3_5]: { - id: CLAUDE_MODEL_IDS.SONNET_3_5, - name: MODEL_DISPLAY_NAMES[CLAUDE_MODEL_IDS.SONNET_3_5], - cost: { - input: 3, - output: 15, - cache_read: 0.3, - cache_write: 3.75, - }, - }, - [CLAUDE_MODEL_IDS.SONNET_4]: { - id: CLAUDE_MODEL_IDS.SONNET_4, - name: MODEL_DISPLAY_NAMES[CLAUDE_MODEL_IDS.SONNET_4], - cost: { - input: 3, - output: 15, - cache_read: 0.3, - cache_write: 3.75, - }, - }, - [CLAUDE_MODEL_IDS.OPUS_4]: { - id: CLAUDE_MODEL_IDS.OPUS_4, - name: MODEL_DISPLAY_NAMES[CLAUDE_MODEL_IDS.OPUS_4], - cost: { - input: 15, - output: 75, - cache_read: 1.5, - cache_write: 18.75, - }, - }, - [CLAUDE_MODEL_IDS.OPUS_4_1]: { - id: CLAUDE_MODEL_IDS.OPUS_4_1, - name: MODEL_DISPLAY_NAMES[CLAUDE_MODEL_IDS.OPUS_4_1], - cost: { - input: 15, - output: 75, - cache_read: 1.5, - cache_write: 18.75, - }, - }, - }, - }, + anthropic: { + models: { + [CLAUDE_MODEL_IDS.HAIKU_4_5]: { + id: CLAUDE_MODEL_IDS.HAIKU_4_5, + name: MODEL_DISPLAY_NAMES[CLAUDE_MODEL_IDS.HAIKU_4_5], + cost: { + input: 0.8, + output: 4, + cache_read: 0.08, + cache_write: 1, + }, + }, + [CLAUDE_MODEL_IDS.SONNET_4_5]: { + id: CLAUDE_MODEL_IDS.SONNET_4_5, + name: MODEL_DISPLAY_NAMES[CLAUDE_MODEL_IDS.SONNET_4_5], + cost: { + input: 3, + output: 15, + cache_read: 0.3, + cache_write: 3.75, + }, + }, + [CLAUDE_MODEL_IDS.OPUS_4_1]: { + id: CLAUDE_MODEL_IDS.OPUS_4_1, + name: MODEL_DISPLAY_NAMES[CLAUDE_MODEL_IDS.OPUS_4_1], + cost: { + input: 15, + output: 75, + cache_read: 1.5, + cache_write: 18.75, + }, + }, + [CLAUDE_MODEL_IDS.OPUS_PLAN_MODE]: { + id: CLAUDE_MODEL_IDS.OPUS_PLAN_MODE, + name: MODEL_DISPLAY_NAMES[CLAUDE_MODEL_IDS.OPUS_PLAN_MODE], + cost: { + input: 15, + output: 75, + cache_read: 1.5, + cache_write: 18.75, + }, + }, + }, + }, }; interface Logger { diff --git a/packages/dashboard-web/build.ts b/packages/dashboard-web/build.ts index 8d259f7c..e6a48dee 100644 --- a/packages/dashboard-web/build.ts +++ b/packages/dashboard-web/build.ts @@ -2,7 +2,29 @@ import { existsSync } from "node:fs"; import { rm, writeFile } from "node:fs/promises"; import path from "node:path"; -import plugin from "bun-plugin-tailwind"; +type TailwindPlugin = (typeof import("bun-plugin-tailwind")) extends { + default: infer PluginType; +} + ? PluginType + : never; + +const tailwindPlugin = await import("bun-plugin-tailwind").then< + TailwindPlugin | undefined +>( + (module) => module.default, + (error: unknown) => { + if (error instanceof Error) { + console.warn( + `⚠️ Failed to load bun-plugin-tailwind: ${error.message}. Continuing without Tailwind processing.`, + ); + } else { + console.warn( + "⚠️ Failed to load bun-plugin-tailwind due to an unknown error. Continuing without Tailwind processing.", + ); + } + return undefined; + }, +); console.log("\n🚀 Building dashboard...\n"); @@ -18,10 +40,16 @@ const start = performance.now(); const entrypoints = ["src/index.html"]; console.log(`📄 Building dashboard from ${entrypoints[0]}\n`); +const plugins: TailwindPlugin[] = []; + +if (tailwindPlugin) { + plugins.push(tailwindPlugin); +} + const result = await Bun.build({ - entrypoints, - outdir, - plugins: [plugin], + entrypoints, + outdir, + plugins, minify: true, target: "browser", sourcemap: "linked", diff --git a/packages/dashboard-web/package.json b/packages/dashboard-web/package.json index 52264e8f..95743413 100644 --- a/packages/dashboard-web/package.json +++ b/packages/dashboard-web/package.json @@ -9,9 +9,12 @@ "typecheck": "tsc --noEmit" }, "dependencies": { - "@ccflare/errors": "workspace:*", - "@ccflare/ui-common": "workspace:*", - "@ccflare/ui-constants": "workspace:*", + "@ccflare/core": "workspace:*", + "@ccflare/errors": "workspace:*", + "@ccflare/http-common": "workspace:*", + "@ccflare/ui-common": "workspace:*", + "@ccflare/ui-constants": "workspace:*", + "@ccflare/types": "workspace:*", "@radix-ui/react-dialog": "^1.1.14", "@radix-ui/react-dropdown-menu": "^2.1.15", "@radix-ui/react-label": "^2.1.0", diff --git a/packages/dashboard-web/src/api.ts b/packages/dashboard-web/src/api.ts index 429d7e77..fb53e21f 100644 --- a/packages/dashboard-web/src/api.ts +++ b/packages/dashboard-web/src/api.ts @@ -216,6 +216,19 @@ class API extends HttpClient { return data.strategy; } + async registerWorkspaces(paths: string[]): Promise<{ + success: boolean; + added: number; + updated: number; + skipped: number; + invalidPaths: string[]; + }> { + if (!paths.length) { + throw new Error("Path is required"); + } + return this.post("/api/workspaces", { paths }); + } + async listStrategies(): Promise { return this.get("/api/strategies"); } diff --git a/packages/dashboard-web/src/components/AgentsTab.tsx b/packages/dashboard-web/src/components/AgentsTab.tsx index b1b80a9e..a21895c6 100644 --- a/packages/dashboard-web/src/components/AgentsTab.tsx +++ b/packages/dashboard-web/src/components/AgentsTab.tsx @@ -11,11 +11,12 @@ import { RefreshCw, Settings, } from "lucide-react"; -import { useState } from "react"; +import { type FormEvent, useState } from "react"; import { useAgents, useBulkUpdateAgentPreferences, useDefaultAgentModel, + useRegisterWorkspace, useSetDefaultAgentModel, useUpdateAgentPreference, } from "../hooks/queries"; @@ -38,6 +39,7 @@ import { DialogTitle, DialogTrigger, } from "./ui/dialog"; +import { Input } from "./ui/input"; import { Select, SelectContent, @@ -56,12 +58,16 @@ export function AgentsTab() { useDefaultAgentModel(); const setDefaultModel = useSetDefaultAgentModel(); const bulkUpdatePreferences = useBulkUpdateAgentPreferences(); + const registerWorkspace = useRegisterWorkspace(); const [selectedWorkspace, setSelectedWorkspace] = useState( null, ); const [bulkUpdateDialogOpen, setBulkUpdateDialogOpen] = useState(false); const [bulkUpdateModel, setBulkUpdateModel] = useState(DEFAULT_AGENT_MODEL); + const [workspacePath, setWorkspacePath] = useState(""); + const [workspaceMessage, setWorkspaceMessage] = useState(null); + const [workspaceError, setWorkspaceError] = useState(null); const handleModelChange = (agentId: string, model: string) => { updatePreference.mutate({ agentId, model }); @@ -80,6 +86,39 @@ export function AgentsTab() { }); }; + const handleWorkspaceSubmit = (event: FormEvent) => { + event.preventDefault(); + setWorkspaceMessage(null); + setWorkspaceError(null); + const trimmed = workspacePath.trim(); + if (!trimmed) { + setWorkspaceError("Path is required"); + return; + } + registerWorkspace.mutate([trimmed], { + onSuccess: (data) => { + if (data.invalidPaths?.length) { + setWorkspaceError( + `Some paths were invalid: ${data.invalidPaths.join(", ")}`, + ); + } else { + setWorkspaceError(null); + } + setWorkspaceMessage( + `Registered ${data.added} workspace$${data.added === 1 ? "" : "s"}`, + ); + setWorkspacePath(""); + }, + onError: (error) => { + if (error instanceof Error) { + setWorkspaceError(error.message); + } else { + setWorkspaceError("Failed to register workspace"); + } + }, + }); + }; + if (isLoading) { return (
@@ -324,6 +363,61 @@ Your system prompt content here...`} + + + + + Register Workspace Paths + + + Point ccflare at new projects by adding absolute paths. Ensure the + directories are mounted into the container (use `bun run + agents:setup`). + + + +
+ setWorkspacePath(event.target.value)} + disabled={registerWorkspace.isPending} + /> +
+ + +
+ {workspaceMessage && ( +

{workspaceMessage}

+ )} + {workspaceError && ( +

{workspaceError}

+ )} +

+ Paths should contain a{" "} + .claude/agents folder. Once + added, the dashboard refreshes automatically. +

+
+
+
+ {/* Workspaces Section */} {workspacesWithCounts.length > 0 && ( diff --git a/packages/dashboard-web/src/components/charts/ModelPerformanceComparison.tsx b/packages/dashboard-web/src/components/charts/ModelPerformanceComparison.tsx index 03a49156..a63d1496 100644 --- a/packages/dashboard-web/src/components/charts/ModelPerformanceComparison.tsx +++ b/packages/dashboard-web/src/components/charts/ModelPerformanceComparison.tsx @@ -36,10 +36,10 @@ interface ModelPerformanceComparisonProps { // Model-based color palette const MODEL_COLORS: Record = { - "claude-3.5-sonnet": COLORS.purple, - "claude-3.5-haiku": COLORS.success, - "claude-3-opus": COLORS.blue, - "claude-opus-4": COLORS.pink, + "claude-4.5-sonnet": COLORS.purple, + "claude-4.5-haiku": COLORS.success, + "claude-opus-4.1": COLORS.blue, + "claude-opus-plan": COLORS.pink, }; function getModelColor(model: string): string { diff --git a/packages/dashboard-web/src/components/charts/ModelTokenSpeedChart.tsx b/packages/dashboard-web/src/components/charts/ModelTokenSpeedChart.tsx index 4a68622f..c8c76cb7 100644 --- a/packages/dashboard-web/src/components/charts/ModelTokenSpeedChart.tsx +++ b/packages/dashboard-web/src/components/charts/ModelTokenSpeedChart.tsx @@ -30,10 +30,10 @@ interface ModelTokenSpeedChartProps { // Model-based color palette const MODEL_COLORS: Record = { - "claude-3.5-sonnet": COLORS.purple, - "claude-3.5-haiku": COLORS.success, - "claude-3-opus": COLORS.blue, - "claude-opus-4": COLORS.pink, + "claude-4.5-sonnet": COLORS.purple, + "claude-4.5-haiku": COLORS.success, + "claude-opus-4.1": COLORS.blue, + "claude-opus-plan": COLORS.pink, // Add more models as needed }; diff --git a/packages/dashboard-web/src/components/charts/MultiModelChart.tsx b/packages/dashboard-web/src/components/charts/MultiModelChart.tsx index 9d384c7b..edc770cb 100644 --- a/packages/dashboard-web/src/components/charts/MultiModelChart.tsx +++ b/packages/dashboard-web/src/components/charts/MultiModelChart.tsx @@ -49,10 +49,10 @@ interface MultiModelChartProps { // Model-based color palette const MODEL_COLORS: Record = { - "claude-3.5-sonnet": COLORS.purple, - "claude-3.5-haiku": COLORS.success, - "claude-3-opus": COLORS.blue, - "claude-opus-4": COLORS.pink, + "claude-4.5-sonnet": COLORS.purple, + "claude-4.5-haiku": COLORS.success, + "claude-opus-4.1": COLORS.blue, + "claude-opus-plan": COLORS.pink, }; function getModelColor(model: string, index: number): string { diff --git a/packages/dashboard-web/src/components/conversation/Message.tsx b/packages/dashboard-web/src/components/conversation/Message.tsx index 0e9e919f..5db16d93 100644 --- a/packages/dashboard-web/src/components/conversation/Message.tsx +++ b/packages/dashboard-web/src/components/conversation/Message.tsx @@ -43,7 +43,8 @@ function MessageComponent({ ); const thinkingText = typeof thinkingBlock?.thinking === "string" ? thinkingBlock.thinking : ""; - const hasThinking = thinkingText && cleanLineNumbers(thinkingText).trim().length > 0; + const hasThinking = + thinkingText && cleanLineNumbers(thinkingText).trim().length > 0; const cleanedContent = typeof content === "string" ? cleanLineNumbers(content).trim() : ""; const hasTools = tools?.length || 0; diff --git a/packages/dashboard-web/src/hooks/queries.ts b/packages/dashboard-web/src/hooks/queries.ts index 59a354c0..c55a8dd9 100644 --- a/packages/dashboard-web/src/hooks/queries.ts +++ b/packages/dashboard-web/src/hooks/queries.ts @@ -180,6 +180,16 @@ export const useUpdateAgent = () => { }); }; +export const useRegisterWorkspace = () => { + const queryClient = useQueryClient(); + return useMutation({ + mutationFn: (paths: string[]) => api.registerWorkspaces(paths), + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: queryKeys.agents() }); + }, + }); +}; + // Note: Clear logs functionality appears to be removed from the API // Retention settings diff --git a/packages/database/src/analyze-performance.ts b/packages/database/src/analyze-performance.ts index be02a928..89e5c2fe 100644 --- a/packages/database/src/analyze-performance.ts +++ b/packages/database/src/analyze-performance.ts @@ -65,24 +65,24 @@ function analyzeQueryPerformance(db: Database) { `, params: [], }, - { - name: "P95 response time calculation", - query: ` - WITH ordered_times AS ( - SELECT - response_time_ms, - ROW_NUMBER() OVER (ORDER BY response_time_ms) as row_num, - COUNT(*) OVER () as total_count - FROM requests - WHERE model = ? AND response_time_ms IS NOT NULL - ) - SELECT response_time_ms as p95_response_time - FROM ordered_times - WHERE row_num = CAST(CEIL(total_count * 0.95) AS INTEGER) - LIMIT 1 - `, - params: ["claude-3-5-sonnet-20241022"], - }, + { + name: "P95 response time calculation", + query: ` + WITH ordered_times AS ( + SELECT + response_time_ms, + ROW_NUMBER() OVER (ORDER BY response_time_ms) as row_num, + COUNT(*) OVER () as total_count + FROM requests + WHERE model = ? AND response_time_ms IS NOT NULL + ) + SELECT response_time_ms as p95_response_time + FROM ordered_times + WHERE row_num = CAST(CEIL(total_count * 0.95) AS INTEGER) + LIMIT 1 + `, + params: ["claude-sonnet-4-5-20250107"], + }, ]; // Run each test query with EXPLAIN QUERY PLAN diff --git a/packages/http-api/src/handlers/agents.ts b/packages/http-api/src/handlers/agents.ts index 3d450f43..60a22090 100644 --- a/packages/http-api/src/handlers/agents.ts +++ b/packages/http-api/src/handlers/agents.ts @@ -1,3 +1,5 @@ +import { existsSync } from "node:fs"; +import { resolve } from "node:path"; import { agentRegistry } from "@ccflare/agents"; import { validateString } from "@ccflare/core"; import type { DatabaseOperations } from "@ccflare/database"; @@ -112,6 +114,66 @@ export function createWorkspacesListHandler() { }; } +export function createWorkspaceRegisterHandler() { + return async (req: Request): Promise => { + try { + const body = await req.json().catch(() => ({})); + const pathsInput = Array.isArray(body?.paths) + ? body.paths + : body?.path + ? [body.path] + : []; + + const normalizedPaths = pathsInput + .map((p: unknown) => (typeof p === "string" ? p.trim() : "")) + .filter(Boolean) + .map((p: string) => resolve(p)); + + if (normalizedPaths.length === 0) { + throw BadRequest("At least one path is required"); + } + + const invalidPaths: string[] = []; + const validPaths = normalizedPaths.filter((path) => { + if (existsSync(path)) { + return true; + } + invalidPaths.push(path); + return false; + }); + + if (validPaths.length === 0) { + return jsonResponse( + { + success: false, + message: "All provided paths are invalid", + invalidPaths, + }, + 400, + ); + } + + const result = await agentRegistry.registerWorkspacesBulk(validPaths); + const workspaces = agentRegistry.getWorkspaces(); + + return jsonResponse({ + success: true, + added: result.added, + updated: result.updated, + skipped: result.skipped, + invalidPaths, + workspaces, + }); + } catch (error) { + log.error("Error registering workspaces:", error); + if (error instanceof HttpError) { + return jsonResponse({ error: error.message }, error.status); + } + return jsonResponse({ error: "Failed to register workspaces" }, 500); + } + }; +} + export function createBulkAgentPreferenceUpdateHandler( dbOps: DatabaseOperations, ) { diff --git a/packages/http-api/src/router.ts b/packages/http-api/src/router.ts index 82fa52d0..30302d89 100644 --- a/packages/http-api/src/router.ts +++ b/packages/http-api/src/router.ts @@ -12,6 +12,7 @@ import { createAgentPreferenceUpdateHandler, createAgentsListHandler, createBulkAgentPreferenceUpdateHandler, + createWorkspaceRegisterHandler, createWorkspacesListHandler, } from "./handlers/agents"; import { createAgentUpdateHandler } from "./handlers/agents-update"; @@ -74,6 +75,7 @@ export class APIRouter { const oauthCallbackHandler = createOAuthCallbackHandler(dbOps); const agentsHandler = createAgentsListHandler(dbOps); const workspacesHandler = createWorkspacesListHandler(); + const workspaceRegisterHandler = createWorkspaceRegisterHandler(); const requestsStreamHandler = createRequestsStreamHandler(); const cleanupHandler = createCleanupHandler(dbOps, config); const compactHandler = createCompactHandler(dbOps); @@ -148,6 +150,9 @@ export class APIRouter { return bulkHandler(req); }); this.handlers.set("GET:/api/workspaces", () => workspacesHandler()); + this.handlers.set("POST:/api/workspaces", (req) => + workspaceRegisterHandler(req), + ); } /** diff --git a/packages/types/src/agent.ts b/packages/types/src/agent.ts index 4cfa135b..2ae9f575 100644 --- a/packages/types/src/agent.ts +++ b/packages/types/src/agent.ts @@ -39,9 +39,10 @@ export interface Agent { export type AgentResponse = Agent[]; export const ALLOWED_MODELS = [ - CLAUDE_MODEL_IDS.OPUS_4, - CLAUDE_MODEL_IDS.OPUS_4_1, - CLAUDE_MODEL_IDS.SONNET_4, + CLAUDE_MODEL_IDS.SONNET_4_5, + CLAUDE_MODEL_IDS.OPUS_4_1, + CLAUDE_MODEL_IDS.HAIKU_4_5, + CLAUDE_MODEL_IDS.OPUS_PLAN_MODE, ] as const; export type AllowedModel = (typeof ALLOWED_MODELS)[number]; diff --git a/scripts/setup-agents.ts b/scripts/setup-agents.ts new file mode 100755 index 00000000..b67f66c3 --- /dev/null +++ b/scripts/setup-agents.ts @@ -0,0 +1,250 @@ +#!/usr/bin/env bun +import { existsSync } from "node:fs"; +import { spawnSync } from "node:child_process"; + +interface WorkspaceEntry { + path: string; + name: string; + lastSeen: number; +} + +interface WorkspacesFile { + version: number; + workspaces: WorkspaceEntry[]; +} + +interface MountSpec { + src: string; + dest: string; +} + +const RUN_CONTAINER = process.env.CCFLARE_CONTAINER || "ccflare-dev"; +const SCAN_CONTAINER = `${RUN_CONTAINER}-scan`; +const IMAGE = process.env.CCFLARE_IMAGE || "ccflare:latest"; +const PORT = process.env.PORT || "8080"; +const DATA_VOLUME = process.env.CCFLARE_DATA_VOLUME || "ccflare-data"; +const WORKSPACES_VOLUME = + process.env.CCFLARE_WORKSPACES_VOLUME || "ccflare-workspaces"; +const MAX_DEPTH = process.env.AGENT_SCAN_MAX_DEPTH || "8"; +const BASE_SCAN_MOUNTS = detectScanMounts(); +const SCAN_ROOTS = BASE_SCAN_MOUNTS.map((m) => m.dest); + +if (BASE_SCAN_MOUNTS.length === 0) { + console.error( + "No scan mounts available. Provide AGENT_SCAN_ROOTS or ensure / and /mnt/ drives exist.", + ); + process.exit(1); +} + +async function main() { + logSection("Ensuring volumes"); + runDocker(["volume", "create", DATA_VOLUME], { allowFailure: true }); + runDocker(["volume", "create", WORKSPACES_VOLUME], { allowFailure: true }); + + logSection("Stopping existing containers"); + stopContainer(RUN_CONTAINER); + stopContainer(SCAN_CONTAINER); + + logSection("Starting temporary scanner container"); + const scanRunArgs = [ + "run", + "-d", + "--name", + SCAN_CONTAINER, + "-e", + "PORT=8080", + "-v", + `${DATA_VOLUME}:/data`, + "-v", + `${WORKSPACES_VOLUME}:/root/.ccflare`, + ...flattenMounts(BASE_SCAN_MOUNTS), + IMAGE, + "sh", + "-c", + "sleep infinity", + ]; + runDocker(scanRunArgs); + + logSection("Running agent scan"); + const scanCmd = [ + "exec", + "-e", + "ccflare_DEBUG=1", + SCAN_CONTAINER, + "bun", + "run", + "agents:scan", + "--max-depth", + MAX_DEPTH, + ...SCAN_ROOTS, + ]; + const scanResult = runDocker(scanCmd, { capture: true }); + process.stdout.write(scanResult.stdout || ""); + process.stderr.write(scanResult.stderr || ""); + + logSection("Reading discovered workspaces"); + const workspacesRaw = runDocker( + ["exec", SCAN_CONTAINER, "cat", "/root/.ccflare/workspaces.json"], + { capture: true, allowFailure: true }, + ); + + const workspaceData = parseWorkspaces(workspacesRaw.stdout || ""); + if (!workspaceData.workspaces.length) { + console.warn("No workspaces discovered. Keeping wide mounts."); + } + + logSection("Stopping scanner container"); + stopContainer(SCAN_CONTAINER); + + logSection("Building mount plan"); + const specificMounts = buildWorkspaceMounts(workspaceData.workspaces); + + const finalMounts = specificMounts.length + ? specificMounts + : BASE_SCAN_MOUNTS; // fallback + + logSection( + `Starting ${RUN_CONTAINER} with ${finalMounts.length} workspace mount${ + finalMounts.length === 1 ? "" : "s" + }`, + ); + const runArgs = [ + "run", + "-d", + "--name", + RUN_CONTAINER, + "-p", + `${PORT}:${PORT}`, + "-e", + `PORT=${PORT}`, + "-v", + `${DATA_VOLUME}:/data`, + "-v", + `${WORKSPACES_VOLUME}:/root/.ccflare`, + ...flattenMounts(finalMounts), + IMAGE, + ]; + runDocker(runArgs); + + logSection("Done"); +} + +function detectScanMounts(): MountSpec[] { + const mounts: MountSpec[] = []; + if (existsSync("/")) { + mounts.push({ src: "/", dest: "/host" }); + } + + const extraRootsEnv = process.env.AGENT_SCAN_ROOTS; + if (extraRootsEnv) { + for (const raw of extraRootsEnv.split(/[,;\n\r]+/)) { + const trimmed = raw.trim(); + if (trimmed && existsSync(trimmed)) { + mounts.push({ src: trimmed, dest: trimmed }); + } + } + } + + const potential = ["/mnt/c", "/mnt/d", "/mnt/e", "/mnt/f", "/mnt/g"]; + for (const path of potential) { + if (existsSync(path)) { + mounts.push({ src: path, dest: path }); + } + } + return dedupeMounts(mounts); +} + +function dedupeMounts(mounts: MountSpec[]): MountSpec[] { + const seen = new Map(); + for (const mount of mounts) { + const key = `${mount.src}:${mount.dest}`; + if (!seen.has(key)) { + seen.set(key, mount); + } + } + return Array.from(seen.values()); +} + +function flattenMounts(mounts: MountSpec[]): string[] { + const args: string[] = []; + for (const mount of mounts) { + args.push("-v", `${mount.src}:${mount.dest}`); + } + return args; +} + +function parseWorkspaces(raw: string): WorkspacesFile { + if (!raw?.trim()) { + return { version: 1, workspaces: [] }; + } + try { + const data = JSON.parse(raw) as WorkspacesFile; + return data; + } catch (error) { + console.warn("Failed to parse workspaces file", error); + return { version: 1, workspaces: [] }; + } +} + +function buildWorkspaceMounts(workspaces: WorkspaceEntry[]): MountSpec[] { + const mounts = new Map(); + for (const workspace of workspaces) { + const mapping = mapWorkspacePath(workspace.path); + if (!mapping) continue; + if (!existsSync(mapping.src)) { + console.warn(`Skipping missing host path ${mapping.src}`); + continue; + } + const key = `${mapping.src}::${mapping.dest}`; + if (!mounts.has(key)) { + mounts.set(key, mapping); + } + } + return Array.from(mounts.values()); +} + +function mapWorkspacePath(containerPath: string): MountSpec | null { + if (containerPath === "/host") { + return { src: "/", dest: "/host" }; + } + if (containerPath.startsWith("/host/")) { + const src = containerPath.replace(/^\/host/, ""); + const hostPath = src || "/"; + return { src: hostPath, dest: containerPath }; + } + return { src: containerPath, dest: containerPath }; +} + +interface RunOptions { + allowFailure?: boolean; + capture?: boolean; + timeoutMs?: number; +} + +function runDocker(args: string[], options: RunOptions = {}) { + const result = spawnSync("docker", args, { + encoding: "utf-8", + timeout: options.timeoutMs, + stdio: options.capture ? ["ignore", "pipe", "pipe"] : "inherit", + }); + if (result.status !== 0 && !options.allowFailure) { + console.error(`docker ${args.join(" ")} failed`); + if (options.capture) { + console.error(result.stderr); + } + process.exit(result.status ?? 1); + } + return result; +} + +function stopContainer(name: string) { + if (!name) return; + runDocker(["stop", name], { allowFailure: true }); + runDocker(["rm", name], { allowFailure: true }); +} + +function logSection(message: string) { + console.log(`\n=== ${message} ===`); +} + +await main();