diff --git a/.changeset/v3.23.17.md b/.changeset/v3.23.17.md deleted file mode 100644 index 2be7662eda2..00000000000 --- a/.changeset/v3.23.17.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -"roo-cline": patch ---- - -- Fix: move context condensing prompt to Prompts section (thanks @SannidhyaSah!) -- Add: jump icon for newly created files -- Fix: add character limit to prevent terminal output context explosion -- Fix: resolve global mode export not including rules files -- Fix: enable export, share, and copy buttons during API operations (thanks @MuriloFP!) -- Add: configurable timeout for evals (5-10 min) -- Add: auto-omit MCP content when no servers are configured -- Fix: sort symlinked rules files by symlink names, not target names -- Docs: clarify when to use update_todo_list tool -- Add: Mistral embedding provider (thanks @SannidhyaSah!) -- Fix: add run parameter to vitest command in rules (thanks @KJ7LNW!) -- Update: the max_tokens fallback logic in the sliding window -- Fix: add bedrock to ANTHROPIC_STYLE_PROVIDERS and restore vertex Claude model checking (thanks @daniel-lxs!) -- Add: llama-4-maverick model to Vertex AI provider (thanks @MuriloFP!) -- Fix: properly distinguish between user cancellations and API failures -- Add: todo list tool enable checkbox to provider advanced settings -- Add: moonshot provider (thanks @CellenLee!) -- Add: Qwen/Qwen3-235B-A22B-Instruct-2507 model to Chutes AI provider -- Fix: add case sensitivity mention to suggested fixes in apply_diff error message diff --git a/CHANGELOG.md b/CHANGELOG.md index 7eca7061819..b7277298fcb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Roo Code Changelog +## [3.23.17] - 2025-07-22 + +- Add: todo list tool enable checkbox to provider advanced settings +- Add: Moonshot provider (thanks @CellenLee!) +- Add: Qwen/Qwen3-235B-A22B-Instruct-2507 model to Chutes AI provider +- Fix: move context condensing prompt to Prompts section (thanks @SannidhyaSah!) +- Add: jump icon for newly created files +- Fix: add character limit to prevent terminal output context explosion +- Fix: resolve global mode export not including rules files +- Fix: enable export, share, and copy buttons during API operations (thanks @MuriloFP!) +- Add: configurable timeout for evals (5-10 min) +- Add: auto-omit MCP content when no servers are configured +- Fix: sort symlinked rules files by symlink names, not target names +- Docs: clarify when to use update_todo_list tool +- Add: Mistral embedding provider (thanks @SannidhyaSah!) +- Fix: add run parameter to vitest command in rules (thanks @KJ7LNW!) +- Update: the max_tokens fallback logic in the sliding window +- Fix: Bedrock and Vertext token counting improvements (thanks @daniel-lxs!) +- Add: llama-4-maverick model to Vertex AI provider (thanks @MuriloFP!) +- Fix: properly distinguish between user cancellations and API failures +- Fix: add case sensitivity mention to suggested fixes in apply_diff error message + ## [3.23.16] - 2025-07-19 - Add global rate limiting for OpenAI-compatible embeddings (thanks @daniel-lxs!) diff --git a/src/package.json b/src/package.json index 5e3cd3bc530..cfdec57bde6 100644 --- a/src/package.json +++ b/src/package.json @@ -3,7 +3,7 @@ "displayName": "%extension.displayName%", "description": "%extension.description%", "publisher": "RooVeterinaryInc", - "version": "3.23.16", + "version": "3.23.17", "icon": "assets/icons/icon.png", "galleryBanner": { "color": "#617A91",