diff --git a/README.md b/README.md
index 3293eb16887..576e50671dd 100644
--- a/README.md
+++ b/README.md
@@ -145,7 +145,7 @@ The current priorities are to improve core capabilities and user experience of t
4. **Context Delivery** - [Discussion](https://github.com/dwash96/cecli/issues/47)
* [ ] Use workflow for internal discovery to better target file snippets needed for specific tasks
- * [ ] Add support for partial files and code snippets in model completion messages
+ * [x] Add support for partial files and code snippets in model completion messages
* [x] Update message request structure for optimal caching
5. **TUI Experience** - [Discussion](https://github.com/dwash96/cecli/issues/48)
@@ -162,6 +162,38 @@ The current priorities are to improve core capabilities and user experience of t
* [x] Add a plugin-like system for allowing agent mode to use user-defined tools in simple python files
* [x] Add a dynamic tool discovery tool to allow the system to have only the tools it needs in context
+7. **Sub Agents**
+ * [ ] Add `/fork` and `/rejoin` commands to manually manage parts of the conversation history
+ * [ ] Add an instance-able view of the conversation system so sub agents get their own context and workspaces
+ * [ ] Modify coder classes to have discrete identifiers for themselves/management utilities for them to have their own slices of the world
+ * [ ] Refactor global files like todo lists to live inside instance folders to avoid state conflicts
+ * [ ] Add a `spawn` tool that launches a sub agent as a background command that the parent model waits for to finish
+ * [ ] Add visibility into active sub agent calls in TUI
+
+8. **Hooks**
+ * [ ] Add hooks base class for user defined python hooks with an execute method with type and priority settings
+ * [ ] Add hook manager that can accept user defined files and command line commands
+ * [ ] Integrate hook manager with coder classes with hooks for `start`, `on_message`, `end_message`, `pre_tool`, and `post_tool`
+
+9. **Efficient File Editing**
+ * [ ] Explore use of hashline file representation for more targeted file editing
+ * [ ] Assuming viability, update SEARCH part of SEARCH/REPLACE with hashline identification
+ * [ ] Update agent mode edit tools to work with hashline identification
+ * [ ] Update internal file diff representation to support hashline propagation
+
+10. **Dynamic Context Management**
+ * [ ] Update compaction to use observational memory sub agent calls to generate decision records that are used as the compaction basis
+ * [ ] Persist decision records to disk for sessions with some settings for managing lifetimes of such persistence
+ * [ ] Integrate RLM to extract information from decision records on disk and other definable notes
+ * [ ] Add a "describe" tool that launches a sub agent workflow that populates an RLM call's context with:
+ * Current Conversation History
+ * Past Decision Records
+ * Repo Map Found Files
+
+11. **Quality of Life**
+ * [ ] Add hot keys support for running repeatable commands like switching between preferred models
+ * [ ] Unified error message logging inside of `.cecli` directory
+
### All Contributors (Both Cecli and Aider main)
diff --git a/cecli/__init__.py b/cecli/__init__.py
index acf60fa46d4..4abb6391fe4 100644
--- a/cecli/__init__.py
+++ b/cecli/__init__.py
@@ -1,6 +1,6 @@
from packaging import version
-__version__ = "0.96.10.dev"
+__version__ = "0.97.0.dev"
safe_version = __version__
try:
diff --git a/cecli/coders/agent_coder.py b/cecli/coders/agent_coder.py
index 64d5c13d5aa..031e3b9d13d 100644
--- a/cecli/coders/agent_coder.py
+++ b/cecli/coders/agent_coder.py
@@ -1,18 +1,16 @@
-import ast
import asyncio
import base64
import json
import locale
import os
import platform
-import re
import time
import traceback
from collections import Counter, defaultdict
from datetime import datetime
from pathlib import Path
-from cecli import urls, utils
+from cecli import utils
from cecli.change_tracker import ChangeTracker
from cecli.helpers import nested
from cecli.helpers.background_commands import BackgroundCommandManager
@@ -29,11 +27,10 @@
from cecli.helpers.skills import SkillsManager
from cecli.llm import litellm
from cecli.mcp import LocalServer, McpServerManager
-from cecli.repo import ANY_GIT_ERROR
from cecli.tools.utils.registry import ToolRegistry
+from cecli.utils import copy_tool_call, tool_call_to_dict
from .base_coder import Coder
-from .editblock_coder import do_replace, find_original_update_blocks, find_similar_lines
class AgentCoder(Coder):
@@ -116,6 +113,7 @@ def _get_agent_config(self):
config["skip_cli_confirmations"] = nested.getter(
config, "skip_cli_confirmations", nested.getter(config, "yolo", [])
)
+ config["command_timeout"] = nested.getter(config, "command_timeout", 30)
config["tools_paths"] = nested.getter(config, "tools_paths", [])
config["tools_includelist"] = nested.getter(
@@ -686,12 +684,13 @@ async def process_tool_calls(self, tool_call_response):
self.last_round_tools = []
if self.partial_response_tool_calls:
for tool_call in self.partial_response_tool_calls:
- tool_name = tool_call.get("function", {}).get("name")
+ tool_name = getattr(tool_call.function, "name", None)
+ tool_call_copy = tool_call_to_dict(copy_tool_call(tool_call))
+ if "id" in tool_call_copy:
+ del tool_call_copy["id"]
+
if tool_name:
self.last_round_tools.append(tool_name)
- tool_call_copy = tool_call.copy()
- if "id" in tool_call_copy:
- del tool_call_copy["id"]
tool_call_str = str(tool_call_copy)
tool_vector = create_bigram_vector((tool_call_str,))
tool_vector_norm = normalize_vector(tool_vector)
@@ -703,111 +702,107 @@ async def process_tool_calls(self, tool_call_response):
self.tool_usage_history.pop(0)
if len(self.tool_call_vectors) > self.max_tool_vector_history:
self.tool_call_vectors.pop(0)
+
+ # Ensure we call base implementation to trigger execution of all tools (native + extracted)
return await super().process_tool_calls(tool_call_response)
async def reply_completed(self):
"""Process the completed response from the LLM.
- This is a key method that:
- 1. Processes any tool commands in the response (only after a '---' line)
- 2. Processes any SEARCH/REPLACE blocks in the response (only before the '---' line if one exists)
- 3. If tool commands were found, sets up for another automatic round
+ This handles:
+ 1. SEARCH/REPLACE blocks (Edit format)
+ 2. Tool execution follow-up (Reflections)
- This enables the "auto-exploration" workflow where the LLM can
- iteratively discover and analyze relevant files before providing
- a final answer to the user's question.
+ Tool extraction and execution is now handled in BaseCoder.consolidate_chunks
+ and BaseCoder.process_tool_calls respectively.
"""
content = self.partial_response_content
- if not content or not content.strip():
- if len(self.tool_usage_history) > self.tool_usage_retries:
- self.tool_usage_history = []
- return True
- original_content = content
- (
- processed_content,
- result_messages,
- tool_calls_found,
- content_before_last_separator,
- tool_names_this_turn,
- ) = await self._process_tool_commands(content)
+ tool_calls_found = bool(self.partial_response_tool_calls)
+
+ # 1. Handle Tool Execution Follow-up (Reflection)
if self.agent_finished:
self.tool_usage_history = []
self.reflected_message = None
if self.files_edited_by_tools:
_ = await self.auto_commit(self.files_edited_by_tools)
return False
- self.partial_response_content = processed_content.strip()
- has_search = "<<<<<<< SEARCH" in self.partial_response_content
- has_divider = "=======" in self.partial_response_content
- has_replace = ">>>>>>> REPLACE" in self.partial_response_content
- edit_match = has_search and has_divider and has_replace
- separator_marker = "\n---\n"
- if separator_marker in original_content and edit_match:
- has_search_before = "<<<<<<< SEARCH" in content_before_last_separator
- has_divider_before = "=======" in content_before_last_separator
- has_replace_before = ">>>>>>> REPLACE" in content_before_last_separator
- edit_match = has_search_before and has_divider_before and has_replace_before
- if edit_match:
- self.io.tool_output("Detected edit blocks, applying changes within Agent...")
- edited_files = await self._apply_edits_from_response()
- if self.reflected_message:
- return False
- if edited_files and self.num_reflections < self.max_reflections:
- cur_messages = ConversationManager.get_messages_dict(MessageTag.CUR)
- if cur_messages and len(cur_messages) >= 1:
- for msg in reversed(cur_messages):
- if msg["role"] == "user":
- original_question = msg["content"]
- break
- else:
- original_question = (
- "Please continue your exploration and provide a final answer."
- )
- next_prompt = f"""
-I have applied the edits you suggested.
-The following files were modified: {', '.join(edited_files)}. Let me continue working on your request.
-Your original question was: {original_question}"""
- self.reflected_message = next_prompt
- self.io.tool_output("Continuing after applying edits...")
- return False
+
+ # 2. Check for unfinished and recently finished background commands
+ background_commands = BackgroundCommandManager.list_background_commands()
+
+ # Get command timeout from agent_config
+ command_timeout = int(self.agent_config.get("command_timeout", 30))
+
+ # Check for unfinished commands
+ unfinished_commands = [
+ cmd_key
+ for cmd_key, cmd_info in background_commands.items()
+ if cmd_info.get("running", False)
+ ]
+
+ # Check for recently finished commands (within last command_timeout seconds)
+ current_time = time.time()
+ recently_finished_commands = [
+ cmd_key
+ for cmd_key, cmd_info in background_commands.items()
+ if not cmd_info.get("running", False)
+ and cmd_info.get("end_time")
+ and (current_time - cmd_info["end_time"]) < command_timeout * 4
+ ]
+
+ if unfinished_commands and not self.agent_finished:
+ waiting_msg = (
+ f"⏱️ Waiting for {len(unfinished_commands)} background command(s) to complete..."
+ )
+ self.reflected_message = (
+ f"⏱️ Waiting for {len(unfinished_commands)} background command(s) to"
+ " complete...\nPlease reply with 'waiting...' if you need the outputs of this"
+ " command for your current task and it has not yet finished or stop the command if"
+ " its outputs are no longer necessary"
+ )
+ self.io.tool_output(waiting_msg)
+ await asyncio.sleep(command_timeout / 2)
+ return True
+
+ # Check for recently finished commands that need reflection
+ if recently_finished_commands and not self.agent_finished:
+ return True # Retrigger reflection to process recently finished command outputs
+
+ # 3. If no content and no tools, we might be done or just empty response
+ if (not content or not content.strip()) and not tool_calls_found:
+ if len(self.tool_usage_history) > self.tool_usage_retries:
+ self.tool_usage_history = []
+ return True
+
if tool_calls_found and self.num_reflections < self.max_reflections:
self.tool_call_count = 0
self.files_added_in_exploration = set()
cur_messages = ConversationManager.get_messages_dict(MessageTag.CUR)
- if cur_messages and len(cur_messages) >= 1:
+ original_question = "Please continue your exploration and provide a final answer."
+ if cur_messages:
for msg in reversed(cur_messages):
if msg["role"] == "user":
original_question = msg["content"]
break
- else:
- original_question = (
- "Please continue your exploration and provide a final answer."
- )
- next_prompt_parts = []
- next_prompt_parts.append(
- "I have processed the results of the previous tool calls. Let me analyze them"
- " and continue working towards your request."
- )
- if result_messages:
- next_prompt_parts.append("\nResults from previous tool calls:")
- next_prompt_parts.extend(result_messages)
- next_prompt_parts.append("""
-Based on these results and the updated file context, I will proceed.""")
- else:
- next_prompt_parts.append("""
-No specific results were returned from the previous tool calls, but the file context may have been updated.
-I will proceed based on the current context.""")
- next_prompt_parts.append(f"\nYour original question was: {original_question}")
- self.reflected_message = "\n".join(next_prompt_parts)
- self.io.tool_output("Continuing exploration...")
- return False
- elif result_messages:
- results_block = "\n\n" + "\n".join(result_messages)
- self.partial_response_content += results_block
+
+ # Construct reflection prompt
+ next_prompt_parts = []
+ next_prompt_parts.append(
+ "I have processed the results of the previous tool calls. Let me analyze them"
+ " and continue working towards your request."
+ )
+ next_prompt_parts.append("""
+I will proceed based on the tool results and updated context.""")
+ next_prompt_parts.append(f"\nYour original question was: {original_question}")
+ self.reflected_message = "\n".join(next_prompt_parts)
+ self.io.tool_output("Continuing exploration...")
+ return False
+
if self.files_edited_by_tools:
saved_message = await self.auto_commit(self.files_edited_by_tools)
if not saved_message and hasattr(self.gpt_prompts, "files_content_gpt_edits_no_repo"):
saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo
+
self.tool_call_count = 0
self.files_added_in_exploration = set()
self.files_edited_by_tools = set()
@@ -845,293 +840,6 @@ async def _execute_tool_with_registry(self, norm_tool_name, params):
return f"Error: Could not find server instance for {server_name}"
return f"Error: Unknown tool name '{norm_tool_name}'"
- def _convert_concatenated_json_to_tool_calls(self, content):
- """
- Check if content contains concatenated JSON objects and convert them to tool call format.
-
- Args:
- content (str): Content to check for concatenated JSON
-
- Returns:
- str: Content with concatenated JSON converted to tool call format, or original content if no JSON found
- """
- try:
- json_chunks = utils.split_concatenated_json(content)
- if len(json_chunks) >= 1:
- tool_calls = []
- for chunk in json_chunks:
- try:
- json_obj = json.loads(chunk)
- if (
- isinstance(json_obj, dict)
- and "name" in json_obj
- and "arguments" in json_obj
- ):
- tool_name = json_obj["name"]
- arguments = json_obj["arguments"]
- kw_args = []
- for key, value in arguments.items():
- if isinstance(value, str):
- escaped_value = value.replace('"', '\\"')
- kw_args.append(f'{key}="{escaped_value}"')
- elif isinstance(value, bool):
- kw_args.append(f"{key}={str(value).lower()}")
- elif value is None:
- kw_args.append(f"{key}=None")
- else:
- kw_args.append(f"{key}={repr(value)}")
- kw_args_str = ", ".join(kw_args)
- tool_call = f"[tool_call({tool_name}, {kw_args_str})]"
- tool_calls.append(tool_call)
- else:
- tool_calls.append(chunk)
- except json.JSONDecodeError:
- tool_calls.append(chunk)
- if any(call.startswith("[tool_") for call in tool_calls):
- return "".join(tool_calls)
- except Exception as e:
- self.io.tool_warning(f"Error converting concatenated JSON to tool calls: {str(e)}")
- return content
-
- async def _process_tool_commands(self, content):
- """
- Process tool commands in the `[tool_call(name, param=value)]` format within the content.
-
- Rules:
- 1. Tool calls must appear after the LAST '---' line separator in the content
- 2. Any tool calls before this last separator are treated as text (not executed)
- 3. SEARCH/REPLACE blocks can only appear before this last separator
-
- Returns processed content, result messages, and a flag indicating if any tool calls were found.
- Also returns the content before the last separator for SEARCH/REPLACE block validation.
- """
- result_messages = []
- modified_content = content
- tool_calls_found = False
- call_count = 0
- max_calls = self.max_tool_calls
- tool_names = []
- content = self._convert_concatenated_json_to_tool_calls(content)
- separator_marker = "---"
- content_parts = content.split(separator_marker)
- if len(content_parts) == 1:
- tool_call_pattern = "\\[tool_call\\([^\\]]+\\)\\]"
- if re.search(tool_call_pattern, content):
- content_before_separator = ""
- content_after_separator = content
- else:
- return content, result_messages, False, content, tool_names
- content_before_separator = separator_marker.join(content_parts[:-1])
- content_after_separator = content_parts[-1]
- processed_content = content_before_separator + separator_marker
- last_index = 0
- tool_call_pattern = re.compile("\\[tool_.*?\\(", re.DOTALL)
- end_marker = "]"
- while True:
- match = tool_call_pattern.search(content_after_separator, last_index)
- if not match:
- processed_content += content_after_separator[last_index:]
- break
- start_pos = match.start()
- start_marker = match.group(0)
- backslashes = 0
- p = start_pos - 1
- while p >= 0 and content_after_separator[p] == "\\":
- backslashes += 1
- p -= 1
- if backslashes % 2 == 1:
- processed_content += content_after_separator[
- last_index : start_pos + len(start_marker)
- ]
- last_index = start_pos + len(start_marker)
- continue
- processed_content += content_after_separator[last_index:start_pos]
- scan_start_pos = start_pos + len(start_marker)
- paren_level = 1
- in_single_quotes = False
- in_double_quotes = False
- escaped = False
- end_paren_pos = -1
- for i in range(scan_start_pos, len(content_after_separator)):
- char = content_after_separator[i]
- if escaped:
- escaped = False
- elif char == "\\":
- escaped = True
- elif char == "'" and not in_double_quotes:
- in_single_quotes = not in_single_quotes
- elif char == '"' and not in_single_quotes:
- in_double_quotes = not in_double_quotes
- elif char == "(" and not in_single_quotes and not in_double_quotes:
- paren_level += 1
- elif char == ")" and not in_single_quotes and not in_double_quotes:
- paren_level -= 1
- if paren_level == 0:
- end_paren_pos = i
- break
- expected_end_marker_start = end_paren_pos + 1
- actual_end_marker_start = -1
- end_marker_found = False
- if end_paren_pos != -1:
- for j in range(expected_end_marker_start, len(content_after_separator)):
- if not content_after_separator[j].isspace():
- actual_end_marker_start = j
- if content_after_separator[actual_end_marker_start] == end_marker:
- end_marker_found = True
- break
- if not end_marker_found:
- tool_name = "unknown"
- try:
- partial_content = content_after_separator[scan_start_pos : scan_start_pos + 100]
- comma_pos = partial_content.find(",")
- if comma_pos > 0:
- tool_name = partial_content[:comma_pos].strip()
- else:
- space_pos = partial_content.find(" ")
- paren_pos = partial_content.find("(")
- if space_pos > 0 and (paren_pos < 0 or space_pos < paren_pos):
- tool_name = partial_content[:space_pos].strip()
- elif paren_pos > 0:
- tool_name = partial_content[:paren_pos].strip()
- except Exception:
- pass
- self.io.tool_warning(
- f"Malformed tool call for '{tool_name}'. Missing closing parenthesis or"
- " bracket. Skipping."
- )
- processed_content += start_marker
- last_index = scan_start_pos
- continue
- full_match_str = content_after_separator[start_pos : actual_end_marker_start + 1]
- inner_content = content_after_separator[scan_start_pos:end_paren_pos].strip()
- last_index = actual_end_marker_start + 1
- call_count += 1
- if call_count > max_calls:
- self.io.tool_warning(
- f"Exceeded maximum tool calls ({max_calls}). Skipping remaining calls."
- )
- continue
- tool_calls_found = True
- tool_name = None
- params = {}
- result_message = None
- tool_calls_found = True
- try:
- if inner_content:
- parts = inner_content.split(",", 1)
- potential_tool_name = parts[0].strip()
- is_string = (
- potential_tool_name.startswith("'")
- and potential_tool_name.endswith("'")
- or potential_tool_name.startswith('"')
- and potential_tool_name.endswith('"')
- )
- if not potential_tool_name.isidentifier() and not is_string:
- quoted_tool_name = json.dumps(potential_tool_name)
- if len(parts) > 1:
- inner_content = quoted_tool_name + ", " + parts[1]
- else:
- inner_content = quoted_tool_name
- parse_str = f"f({inner_content})"
- parsed_ast = ast.parse(parse_str)
- if (
- not isinstance(parsed_ast, ast.Module)
- or not parsed_ast.body
- or not isinstance(parsed_ast.body[0], ast.Expr)
- ):
- raise ValueError("Unexpected AST structure")
- call_node = parsed_ast.body[0].value
- if not isinstance(call_node, ast.Call):
- raise ValueError("Expected a Call node")
- if not call_node.args:
- raise ValueError("Tool name not found or invalid")
- tool_name_node = call_node.args[0]
- if isinstance(tool_name_node, ast.Name):
- tool_name = tool_name_node.id
- elif isinstance(tool_name_node, ast.Constant) and isinstance(
- tool_name_node.value, str
- ):
- tool_name = tool_name_node.value
- else:
- raise ValueError("Tool name must be an identifier or a string literal")
- tool_names.append(tool_name)
- for keyword in call_node.keywords:
- key = keyword.arg
- value_node = keyword.value
- if isinstance(value_node, ast.Constant):
- value = value_node.value
- if isinstance(value, str) and "\n" in value:
- lineno = value_node.lineno if hasattr(value_node, "lineno") else 0
- end_lineno = (
- value_node.end_lineno
- if hasattr(value_node, "end_lineno")
- else lineno
- )
- if end_lineno > lineno:
- if value.startswith("\n"):
- value = value[1:]
- if value.endswith("\n"):
- value = value[:-1]
- elif isinstance(value_node, ast.Name):
- id_val = value_node.id.lower()
- if id_val == "true":
- value = True
- elif id_val == "false":
- value = False
- elif id_val == "none":
- value = None
- else:
- value = value_node.id
- else:
- try:
- value = ast.unparse(value_node)
- except AttributeError:
- raise ValueError(
- f"Unsupported argument type for key '{key}': {type(value_node)}"
- )
- except Exception as unparse_e:
- raise ValueError(
- f"Could not unparse value for key '{key}': {unparse_e}"
- )
- suppressed_arg_values = ["..."]
- if isinstance(value, str) and value in suppressed_arg_values:
- self.io.tool_warning(
- f"Skipping suppressed argument value '{value}' for key '{key}' in tool"
- f" '{tool_name}'"
- )
- continue
- params[key] = value
- except (SyntaxError, ValueError) as e:
- result_message = f"Error parsing tool call '{inner_content}': {e}"
- self.io.tool_error(f"Failed to parse tool call: {full_match_str}\nError: {e}")
- result_messages.append(f"[Result (Parse Error): {result_message}]")
- continue
- except Exception as e:
- result_message = f"Unexpected error parsing tool call '{inner_content}': {e}"
- self.io.tool_error(f"""Unexpected error during parsing: {full_match_str}
-Error: {e}
-{traceback.format_exc()}""")
- result_messages.append(f"[Result (Parse Error): {result_message}]")
- continue
- try:
- norm_tool_name = tool_name.lower()
- result_message = await self._execute_tool_with_registry(norm_tool_name, params)
- except Exception as e:
- result_message = f"Error executing {tool_name}: {str(e)}"
- self.io.tool_error(f"""Error during {tool_name} execution: {e}
-{traceback.format_exc()}""")
- if result_message:
- result_messages.append(f"[Result ({tool_name}): {result_message}]")
- self.tool_call_count += call_count
- modified_content = processed_content
- return (
- modified_content,
- result_messages,
- tool_calls_found,
- content_before_separator,
- tool_names,
- )
-
def _get_repetitive_tools(self):
"""
Identifies repetitive tool usage patterns from rounds of tool calls.
@@ -1283,146 +991,6 @@ def _generate_write_context(self):
return "\n".join(context_parts)
return ""
- async def _apply_edits_from_response(self):
- """
- Parses and applies SEARCH/REPLACE edits found in self.partial_response_content.
- Returns a set of relative file paths that were successfully edited.
- """
- edited_files = set()
- try:
- edits = list(
- find_original_update_blocks(
- self.partial_response_content, self.fence, self.get_inchat_relative_files()
- )
- )
- self.shell_commands += [edit[1] for edit in edits if edit[0] is None]
- edits = [edit for edit in edits if edit[0] is not None]
- prepared_edits = []
- seen_paths = dict()
- self.need_commit_before_edits = set()
- for edit in edits:
- path = edit[0]
- if path in seen_paths:
- allowed = seen_paths[path]
- else:
- allowed = await self.allowed_to_edit(path)
- seen_paths[path] = allowed
- if allowed:
- prepared_edits.append(edit)
- await self.dirty_commit()
- self.need_commit_before_edits = set()
- failed = []
- passed = []
- for edit in prepared_edits:
- path, original, updated = edit
- full_path = self.abs_root_path(path)
- new_content = None
- if Path(full_path).exists():
- content = self.io.read_text(full_path)
- new_content = do_replace(full_path, content, original, updated, self.fence)
- if not new_content and original.strip():
- for other_full_path in self.abs_fnames:
- if other_full_path == full_path:
- continue
- other_content = self.io.read_text(other_full_path)
- other_new_content = do_replace(
- other_full_path, other_content, original, updated, self.fence
- )
- if other_new_content:
- path = self.get_rel_fname(other_full_path)
- full_path = other_full_path
- new_content = other_new_content
- self.io.tool_warning(f"Applied edit intended for {edit[0]} to {path}")
- break
- if new_content:
- if not self.dry_run:
- self.io.write_text(full_path, new_content)
- self.io.tool_output(f"Applied edit to {path}")
- else:
- self.io.tool_output(f"Did not apply edit to {path} (--dry-run)")
- passed.append((path, original, updated))
- else:
- failed.append(edit)
- if failed:
- blocks = "block" if len(failed) == 1 else "blocks"
- error_message = f"# {len(failed)} SEARCH/REPLACE {blocks} failed to match!\n"
- for edit in failed:
- path, original, updated = edit
- full_path = self.abs_root_path(path)
- content = self.io.read_text(full_path)
- error_message += f"""
-## SearchReplaceNoExactMatch: This SEARCH block failed to exactly match lines in {path}
-<<<<<<< SEARCH
-{original}=======
-{updated}>>>>>>> REPLACE
-
-"""
- did_you_mean = find_similar_lines(original, content)
- if did_you_mean:
- error_message += f"""Did you mean to match some of these actual lines from {path}?
-
-{self.fence[0]}
-{did_you_mean}
-{self.fence[1]}
-
-"""
- if updated in content and updated:
- error_message += f"""Are you sure you need this SEARCH/REPLACE block?
-The REPLACE lines are already in {path}!
-
-"""
- error_message += (
- "The SEARCH section must exactly match an existing block of lines including all"
- " white space, comments, indentation, docstrings, etc"
- )
- if passed:
- pblocks = "block" if len(passed) == 1 else "blocks"
- error_message += f"""
-# The other {len(passed)} SEARCH/REPLACE {pblocks} were applied successfully.
-Don't re-send them.
-Just reply with fixed versions of the {blocks} above that failed to match.
-"""
- self.io.tool_error(error_message)
- self.reflected_message = error_message
- edited_files = set(edit[0] for edit in passed)
- if edited_files:
- self.coder_edited_files.update(edited_files)
- self.auto_commit(edited_files)
- if self.auto_lint:
- lint_errors = self.lint_edited(edited_files)
- self.auto_commit(edited_files, context="Ran the linter")
- if lint_errors and not self.reflected_message:
- ok = await self.io.confirm_ask("Attempt to fix lint errors?")
- if ok:
- self.reflected_message = lint_errors
- shared_output = await self.run_shell_commands()
- if shared_output:
- self.io.tool_output("Shell command output:\n" + shared_output)
- if self.auto_test and not self.reflected_message:
- test_errors = await self.commands.execute("test", self.test_cmd)
- if test_errors:
- ok = await self.io.confirm_ask("Attempt to fix test errors?")
- if ok:
- self.reflected_message = test_errors
- self.show_undo_hint()
- except ValueError as err:
- self.num_malformed_responses += 1
- error_message = err.args[0]
- self.io.tool_error("The LLM did not conform to the edit format.")
- self.io.tool_output(urls.edit_errors)
- self.io.tool_output()
- self.io.tool_output(str(error_message))
- self.reflected_message = str(error_message)
- except ANY_GIT_ERROR as err:
- self.io.tool_error(f"Git error during edit application: {str(err)}")
- self.reflected_message = f"Git error during edit application: {str(err)}"
- except Exception as err:
- self.io.tool_error("Exception while applying edits:")
- self.io.tool_error(str(err), strip=False)
- self.io.tool_error(traceback.format_exc())
- self.reflected_message = f"Exception while applying edits: {str(err)}"
- return edited_files
-
def _add_file_to_context(self, file_path, explicit=False):
"""
Helper method to add a file to context as read-only.
diff --git a/cecli/coders/base_coder.py b/cecli/coders/base_coder.py
index a6ab2649d18..734fb2f2c65 100755
--- a/cecli/coders/base_coder.py
+++ b/cecli/coders/base_coder.py
@@ -31,7 +31,7 @@
import httpx
from litellm import experimental_mcp_client
-from litellm.types.utils import ModelResponse
+from litellm.types.utils import ChatCompletionMessageToolCall, Function, ModelResponse
from prompt_toolkit.patch_stdout import patch_stdout
from rich.console import Console
@@ -64,7 +64,7 @@
from cecli.sessions import SessionManager
from cecli.tools.utils.output import print_tool_response
from cecli.tools.utils.registry import ToolRegistry
-from cecli.utils import format_tokens, is_image_file
+from cecli.utils import copy_tool_call, format_tokens, is_image_file
from ..dump import dump # noqa: F401
from ..prompts.utils.registry import PromptObject, PromptRegistry
@@ -2357,23 +2357,19 @@ async def send_message(self, inp):
return
async def process_tool_calls(self, tool_call_response):
- if tool_call_response is None:
- return False
-
- # Handle different response structures
- try:
- # Try to get tool calls from the standard OpenAI response format
- if hasattr(tool_call_response, "choices") and tool_call_response.choices:
- message = tool_call_response.choices[0].message
- if hasattr(message, "tool_calls") and message.tool_calls:
- original_tool_calls = message.tool_calls
- else:
- return False
- else:
- # Handle other response formats
- return False
- except (AttributeError, IndexError):
- return False
+ # Use partial_response_tool_calls if available (populated by consolidate_chunks)
+ # otherwise try to extract from tool_call_response
+ original_tool_calls = []
+ if self.partial_response_tool_calls:
+ original_tool_calls = self.partial_response_tool_calls
+ elif tool_call_response is not None:
+ try:
+ if hasattr(tool_call_response, "choices") and tool_call_response.choices:
+ message = tool_call_response.choices[0].message
+ if hasattr(message, "tool_calls") and message.tool_calls:
+ original_tool_calls = message.tool_calls
+ except (AttributeError, IndexError):
+ pass
if not original_tool_calls:
return False
@@ -2404,10 +2400,13 @@ async def process_tool_calls(self, tool_call_response):
continue
# Create a new tool call for each JSON chunk, with a unique ID.
- new_function = tool_call.function.model_copy(update={"arguments": chunk})
- new_tool_call = tool_call.model_copy(
- update={"id": f"{tool_call.id}-{i}", "function": new_function}
- )
+ new_tool_call = copy_tool_call(tool_call)
+ if hasattr(new_tool_call, "model_copy"):
+ new_tool_call.function.arguments = chunk
+ new_tool_call.id = f"{tool_call.id}-{i}"
+ else:
+ new_tool_call.function.arguments = chunk
+ new_tool_call.id = f"{getattr(tool_call, 'id', 'call')}-{i}"
expanded_tool_calls.append(new_tool_call)
# Collect all tool calls grouped by server
@@ -2551,7 +2550,7 @@ async def _exec_server_tools(server, tool_calls_list):
all_results_content = []
for args in parsed_args_list:
- new_tool_call = tool_call.model_copy(deep=True)
+ new_tool_call = copy_tool_call(tool_call)
new_tool_call.function.arguments = json.dumps(args)
call_result = await experimental_mcp_client.call_openai_tool(
@@ -2806,6 +2805,7 @@ def add_assistant_reply_to_cur_messages(self):
ConversationManager.add_message(
message_dict=msg,
tag=MessageTag.CUR,
+ hash_key=("assistant_message", str(msg), str(time.monotonic_ns())),
)
def get_file_mentions(self, content, ignore_current=False):
@@ -3202,22 +3202,9 @@ def consolidate_chunks(self):
# Add provider-specific fields directly to the tool call object
tool_call.provider_specific_fields = provider_specific_fields_by_index[i]
- # Create dictionary version with provider-specific fields
- tool_call_dict = tool_call.model_dump()
-
- # Add provider-specific fields to the dictionary too (in case model_dump() doesn't include them)
- if tool_id in provider_specific_fields_by_id:
- tool_call_dict["provider_specific_fields"] = provider_specific_fields_by_id[
- tool_id
- ]
- elif i in provider_specific_fields_by_index:
- tool_call_dict["provider_specific_fields"] = (
- provider_specific_fields_by_index[i]
- )
-
# Only append to partial_response_tool_calls if it's empty
if len(self.partial_response_tool_calls) == 0:
- self.partial_response_tool_calls.append(tool_call_dict)
+ self.partial_response_tool_calls.append(tool_call)
self.partial_response_function_call = (
response.choices[0].message.tool_calls[0].function
@@ -3253,6 +3240,70 @@ def consolidate_chunks(self):
except AttributeError as e:
content_err = e
+ # If no native tool calls, check if the content contains JSON tool calls
+ # This handles models that write JSON in text instead of using native calling
+ if not self.partial_response_tool_calls and self.partial_response_content:
+ try:
+ # Simple extraction of JSON-like structures that look like tool calls
+ # Only look for tool calls if it looks like JSON
+ if "{" in self.partial_response_content or "[" in self.partial_response_content:
+ json_chunks = utils.split_concatenated_json(self.partial_response_content)
+ extracted_calls = []
+ chunk_index = 0
+
+ for chunk in json_chunks:
+ chunk_index += 1
+ try:
+ json_obj = json.loads(chunk)
+ if (
+ isinstance(json_obj, dict)
+ and "name" in json_obj
+ and "arguments" in json_obj
+ ):
+ # Create a Pydantic model for the tool call
+ function_obj = Function(
+ name=json_obj["name"],
+ arguments=(
+ json.dumps(json_obj["arguments"])
+ if isinstance(json_obj["arguments"], (dict, list))
+ else str(json_obj["arguments"])
+ ),
+ )
+ tool_call_obj = ChatCompletionMessageToolCall(
+ type="function",
+ function=function_obj,
+ id=f"call_{len(extracted_calls)}_{int(time.time())}_{chunk_index}",
+ )
+ extracted_calls.append(tool_call_obj)
+ elif isinstance(json_obj, list):
+ for item in json_obj:
+ if (
+ isinstance(item, dict)
+ and "name" in item
+ and "arguments" in item
+ ):
+ function_obj = Function(
+ name=item["name"],
+ arguments=(
+ json.dumps(item["arguments"])
+ if isinstance(item["arguments"], (dict, list))
+ else str(item["arguments"])
+ ),
+ )
+ tool_call_obj = ChatCompletionMessageToolCall(
+ type="function",
+ function=function_obj,
+ id=f"call_{len(extracted_calls)}_{int(time.time())}_{chunk_index}",
+ )
+ extracted_calls.append(tool_call_obj)
+ except json.JSONDecodeError:
+ continue
+
+ if extracted_calls:
+ self.partial_response_tool_calls = extracted_calls
+ except Exception:
+ pass
+
return response, func_err, content_err
def stream_wrapper(self, content, final):
@@ -3298,13 +3349,19 @@ def preprocess_response(self):
tool_list = []
tool_id_set = set()
- for tool_call_dict in self.partial_response_tool_calls:
+ for tool_call in self.partial_response_tool_calls:
+ # Handle both dictionary and object tool calls
+ if isinstance(tool_call, dict):
+ tool_id = tool_call.get("id")
+ else:
+ tool_id = getattr(tool_call, "id", None)
+
# LLM APIs sometimes return duplicates and that's annoying part 2
- if tool_call_dict.get("id") in tool_id_set:
+ if tool_id in tool_id_set:
continue
- tool_id_set.add(tool_call_dict.get("id"))
- tool_list.append(tool_call_dict)
+ tool_id_set.add(tool_id)
+ tool_list.append(tool_call)
self.partial_response_tool_calls = tool_list
diff --git a/cecli/commands/terminal_setup.py b/cecli/commands/terminal_setup.py
index 5dca6666c13..3fbe5a32e36 100644
--- a/cecli/commands/terminal_setup.py
+++ b/cecli/commands/terminal_setup.py
@@ -158,12 +158,28 @@ def _backup_file(cls, file_path, io):
@classmethod
def _update_alacritty(cls, path, io, dry_run=False):
"""Updates Alacritty TOML configuration with shift+enter binding."""
- if os.environ.get("TERM") != "alacritty":
- return False
+ import tomlkit
+
+ # Create directory if it doesn't exist
+ path.parent.mkdir(parents=True, exist_ok=True)
if not path.exists():
- io.tool_output(f"Skipping Alacritty: File not found at {path}")
- return False
+ if dry_run:
+ io.tool_output(f"DRY-RUN: Would create Alacritty config at {path}")
+ io.tool_output(
+ 'DRY-RUN: Would add binding: {"key": "Return", "mods": "Shift", "chars": "\\n"}'
+ )
+ return True
+ else:
+ io.tool_output(f"Creating Alacritty config at {path}")
+ # Create minimal Alacritty config with shift+enter binding
+ data = {
+ "keyboard": {"bindings": [{"key": "Return", "mods": "Shift", "chars": "\n"}]}
+ }
+ with open(path, "w", encoding="utf-8") as f:
+ tomlkit.dump(data, f)
+ io.tool_output("Created Alacritty config with shift+enter binding.")
+ return True
# Define the binding to add
new_binding = {"key": "Return", "mods": "Shift", "chars": "\n"}
@@ -173,7 +189,7 @@ def _update_alacritty(cls, path, io, dry_run=False):
io.tool_output(f"DRY-RUN: Would add binding: {new_binding}")
try:
with open(path, "r", encoding="utf-8") as f:
- data = toml.load(f)
+ data = tomlkit.load(f)
# Check if binding already exists
keyboard_section = data.get("keyboard", {})
@@ -195,7 +211,7 @@ def _update_alacritty(cls, path, io, dry_run=False):
else:
io.tool_output("DRY-RUN: Would update Alacritty config.")
return True
- except toml.TomlDecodeError:
+ except toml.TOMLDecodeError:
io.tool_output("DRY-RUN: Error: Could not parse Alacritty TOML file.")
return False
except Exception as e:
@@ -206,7 +222,7 @@ def _update_alacritty(cls, path, io, dry_run=False):
try:
with open(path, "r", encoding="utf-8") as f:
- data = toml.load(f)
+ data = tomlkit.load(f)
# Ensure keyboard section exists
if "keyboard" not in data:
@@ -237,12 +253,12 @@ def _update_alacritty(cls, path, io, dry_run=False):
# Write back to file
with open(path, "w", encoding="utf-8") as f:
- toml.dump(data, f)
+ tomlkit.dump(data, f)
io.tool_output("Updated Alacritty config.")
return True
- except toml.TomlDecodeError:
+ except toml.TOMLDecodeError:
io.tool_output("Error: Could not parse Alacritty TOML file. Is it valid TOML?")
return False
except Exception as e:
@@ -252,9 +268,21 @@ def _update_alacritty(cls, path, io, dry_run=False):
@classmethod
def _update_kitty(cls, path, io, dry_run=False):
"""Appends the Kitty mapping if not present."""
+ # Create directory if it doesn't exist
+ path.parent.mkdir(parents=True, exist_ok=True)
+
if not path.exists():
- io.tool_output(f"Skipping Kitty: File not found at {path}")
- return False
+ if dry_run:
+ io.tool_output(f"DRY-RUN: Would create Kitty config at {path}")
+ io.tool_output(f"DRY-RUN: Would add binding:\n{cls.KITTY_BINDING.strip()}")
+ return True
+ else:
+ io.tool_output(f"Creating Kitty config at {path}")
+ # Create Kitty config with shift+enter binding
+ with open(path, "w", encoding="utf-8") as f:
+ f.write(cls.KITTY_BINDING)
+ io.tool_output("Created Kitty config with shift+enter binding.")
+ return True
if dry_run:
io.tool_output(f"DRY-RUN: Would check Kitty config at {path}")
@@ -290,9 +318,6 @@ def _update_kitty(cls, path, io, dry_run=False):
@classmethod
def _update_konsole(cls, path, io, dry_run=False):
"""Updates Konsole keytab configuration with shift+enter binding."""
- if not os.environ.get("KONSOLE_VERSION"):
- return False
-
default_keytab_path = Path(__file__).parent / "terminal_data" / "linux.keytab"
if not path.exists():
@@ -354,10 +379,30 @@ def _update_konsole(cls, path, io, dry_run=False):
@classmethod
def _update_windows_terminal(cls, path, io, dry_run=False):
"""Parses JSON, adds action to 'actions' list and keybinding to 'keybindings' list."""
- if not path or not path.exists():
- io.tool_output("Skipping Windows Terminal: File not found.")
+ if not path:
+ io.tool_output("Skipping Windows Terminal: No path available.")
return False
+ # Create directory if it doesn't exist
+ path.parent.mkdir(parents=True, exist_ok=True)
+
+ if not path.exists():
+ if dry_run:
+ io.tool_output(f"DRY-RUN: Would create Windows Terminal config at {path}")
+ io.tool_output(f"DRY-RUN: Would add action: {json.dumps(cls.WT_ACTION, indent=2)}")
+ io.tool_output(
+ f"DRY-RUN: Would add keybinding: {json.dumps(cls.WT_KEYBINDING, indent=2)}"
+ )
+ return True
+ else:
+ io.tool_output(f"Creating Windows Terminal config at {path}")
+ # Create minimal Windows Terminal config with shift+enter binding
+ data = {"actions": [cls.WT_ACTION], "keybindings": [cls.WT_KEYBINDING]}
+ with open(path, "w", encoding="utf-8") as f:
+ json.dump(data, f, indent=4)
+ io.tool_output("Created Windows Terminal config with shift+enter binding.")
+ return True
+
if dry_run:
io.tool_output(f"DRY-RUN: Would check Windows Terminal config at {path}")
io.tool_output(f"DRY-RUN: Would add action: {json.dumps(cls.WT_ACTION, indent=2)}")
@@ -744,42 +789,118 @@ async def execute(cls, io, coder, args, **kwargs):
paths = cls._get_config_paths()
# Check for dry-run mode
- dry_run = args == "dry_run"
+ dry_run = args == "dry_run" or (hasattr(args, "dry_run") and args.dry_run)
if dry_run:
io.tool_output("DRY-RUN MODE: Showing what would be changed without modifying files\n")
+ # Explicit yes required should always be true for terminal setup
+ explicit_yes_required = True
+
updated = False
if "alacritty" in paths:
- if cls._update_alacritty(paths["alacritty"], io, dry_run=dry_run):
- updated = True
+ path = paths["alacritty"]
+ if path.exists():
+ question = f"Update Alacritty config at {path} to add shift+enter binding?"
+ else:
+ question = f"Create new Alacritty config at {path} with shift+enter binding?"
+
+ if dry_run or await coder.io.confirm_ask(
+ question, default="y", explicit_yes_required=explicit_yes_required
+ ):
+ if cls._update_alacritty(path, io, dry_run=dry_run):
+ updated = True
if "kitty" in paths:
- if cls._update_kitty(paths["kitty"], io, dry_run=dry_run):
- updated = True
+ path = paths["kitty"]
+ if path.exists():
+ question = f"Update Kitty config at {path} to add shift+enter binding?"
+ else:
+ question = f"Create new Kitty config at {path} with shift+enter binding?"
+
+ if dry_run or await coder.io.confirm_ask(
+ question, default="y", explicit_yes_required=explicit_yes_required
+ ):
+ if cls._update_kitty(path, io, dry_run=dry_run):
+ updated = True
if "konsole" in paths:
- if cls._update_konsole(paths["konsole"], io, dry_run=dry_run):
- updated = True
+ path = paths["konsole"]
+ if path.exists():
+ question = f"Update Konsole keytab at {path} to add shift+enter binding?"
+ else:
+ question = f"Create new Konsole keytab at {path} with shift+enter binding?"
+
+ if dry_run or await coder.io.confirm_ask(
+ question, default="y", explicit_yes_required=explicit_yes_required
+ ):
+ if cls._update_konsole(path, io, dry_run=dry_run):
+ updated = True
if "windows_terminal" in paths:
- if cls._update_windows_terminal(paths["windows_terminal"], io, dry_run=dry_run):
- updated = True
+ path = paths["windows_terminal"]
+ if path.exists():
+ question = f"Update Windows Terminal config at {path} to add shift+enter binding?"
+ else:
+ question = f"Create new Windows Terminal config at {path} with shift+enter binding?"
+
+ if dry_run or await coder.io.confirm_ask(
+ question, default="y", explicit_yes_required=explicit_yes_required
+ ):
+ if cls._update_windows_terminal(path, io, dry_run=dry_run):
+ updated = True
if "vscode" in paths:
- if cls._update_vscode(paths["vscode"], io, dry_run=dry_run):
- updated = True
- # Also update VS Code settings.json for proper shift+enter support
- if cls._update_vscode_settings(paths["vscode"], io, dry_run=dry_run):
- updated = True
+ path = paths["vscode"]
+ if path.exists():
+ question = f"Update VS Code keybindings at {path} to add shift+enter binding?"
+ else:
+ question = f"Create new VS Code keybindings at {path} with shift+enter binding?"
+
+ if dry_run or await coder.io.confirm_ask(
+ question, default="y", explicit_yes_required=explicit_yes_required
+ ):
+ if cls._update_vscode(path, io, dry_run=dry_run):
+ updated = True
+ # Also update VS Code settings.json for proper shift+enter support
+ settings_question = (
+ f"Update VS Code settings at {path.parent}/settings.json for proper shift+enter"
+ " support?"
+ )
+ if dry_run or await coder.io.confirm_ask(
+ settings_question, default="y", explicit_yes_required=explicit_yes_required
+ ):
+ if cls._update_vscode_settings(path, io, dry_run=dry_run):
+ updated = True
if "vscode_windows" in paths:
+ path = paths["vscode_windows"]
io.tool_output("Found Windows host VS Code configuration (running in WSL)")
- if cls._update_vscode(paths["vscode_windows"], io, dry_run=dry_run):
- updated = True
- # Also update Windows host VS Code settings.json
- if cls._update_vscode_settings(paths["vscode_windows"], io, dry_run=dry_run):
- updated = True
+ if path.exists():
+ question = (
+ f"Update Windows host VS Code keybindings at {path} to add shift+enter binding?"
+ )
+ else:
+ question = (
+ f"Create new Windows host VS Code keybindings at {path} with shift+enter"
+ " binding?"
+ )
+
+ if dry_run or await coder.io.confirm_ask(
+ question, default="y", explicit_yes_required=explicit_yes_required
+ ):
+ if cls._update_vscode(path, io, dry_run=dry_run):
+ updated = True
+ # Also update Windows host VS Code settings.json
+ settings_question = (
+ f"Update Windows host VS Code settings at {path.parent}/settings.json for"
+ " proper shift+enter support?"
+ )
+ if dry_run or await coder.io.confirm_ask(
+ settings_question, default="y", explicit_yes_required=explicit_yes_required
+ ):
+ if cls._update_vscode_settings(path, io, dry_run=dry_run):
+ updated = True
if dry_run:
if updated:
diff --git a/cecli/exceptions.py b/cecli/exceptions.py
index 0bedc5327e4..df067f684e7 100644
--- a/cecli/exceptions.py
+++ b/cecli/exceptions.py
@@ -36,6 +36,7 @@ class ExInfo:
ExInfo("JSONSchemaValidationError", True, None),
ExInfo("NotFoundError", False, None),
ExInfo("OpenAIError", True, None),
+ ExInfo("PermissionDeniedError", False, None),
ExInfo(
"RateLimitError",
True,
diff --git a/cecli/helpers/background_commands.py b/cecli/helpers/background_commands.py
index 72cef13d434..39f3d8f991b 100644
--- a/cecli/helpers/background_commands.py
+++ b/cecli/helpers/background_commands.py
@@ -94,7 +94,9 @@ class BackgroundProcess:
Represents a background process with output capture.
"""
- def __init__(self, command: str, process: subprocess.Popen, buffer: CircularBuffer):
+ def __init__(
+ self, command: str, process: subprocess.Popen, buffer: CircularBuffer, persist: bool = False
+ ):
"""
Initialize background process wrapper.
@@ -102,12 +104,18 @@ def __init__(self, command: str, process: subprocess.Popen, buffer: CircularBuff
command: Original command string
process: Subprocess.Popen object
buffer: CircularBuffer for output storage
+ persist: If True, output buffer won't be cleared when read
"""
+ import time
+
self.command = command
self.process = process
self.buffer = buffer
self.reader_thread = None
self.last_read_position = 0
+ self.start_time = time.time()
+ self.end_time = None
+ self.persist = persist
self._start_output_reader()
def _start_output_reader(self) -> None:
@@ -173,6 +181,8 @@ def stop(self, timeout: float = 5.0) -> Tuple[bool, str, Optional[int]]:
Returns:
Tuple of (success, output, exit_code)
"""
+ import time
+
try:
# Try SIGTERM first
self.process.terminate()
@@ -181,6 +191,7 @@ def stop(self, timeout: float = 5.0) -> Tuple[bool, str, Optional[int]]:
# Get final output
output = self.get_output(clear=True)
exit_code = self.process.returncode
+ self.end_time = time.time()
return True, output, exit_code
@@ -191,6 +202,7 @@ def stop(self, timeout: float = 5.0) -> Tuple[bool, str, Optional[int]]:
output = self.get_output(clear=True)
exit_code = self.process.returncode
+ self.end_time = time.time()
return True, output, exit_code
@@ -247,6 +259,9 @@ def start_background_command(
verbose: bool = False,
cwd: Optional[str] = None,
max_buffer_size: int = 4096,
+ existing_process: Optional[subprocess.Popen] = None,
+ existing_buffer: Optional[CircularBuffer] = None,
+ persist: bool = False,
) -> str:
"""
Start a command in background.
@@ -256,29 +271,35 @@ def start_background_command(
verbose: Whether to print verbose output
cwd: Working directory for command
max_buffer_size: Maximum buffer size for output
+ existing_process: Optional existing subprocess.Popen to register
+ existing_buffer: Optional existing CircularBuffer to use
+ persist: If True, output buffer won't be cleared when read
Returns:
Command key for future reference
"""
try:
- # Create output buffer
- buffer = CircularBuffer(max_size=max_buffer_size)
-
- # Start process with pipes for output capture
- process = subprocess.Popen(
- command,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- stdin=subprocess.DEVNULL, # No stdin for background commands
- cwd=cwd,
- text=True, # Use text mode for easier handling
- bufsize=1, # Line buffered
- universal_newlines=True,
- )
+ # Use existing buffer or create new one
+ buffer = existing_buffer or CircularBuffer(max_size=max_buffer_size)
+
+ # Use existing process or start new one
+ if existing_process:
+ process = existing_process
+ else:
+ process = subprocess.Popen(
+ command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.DEVNULL, # No stdin for background commands
+ cwd=cwd,
+ text=True, # Use text mode for easier handling
+ bufsize=1, # Line buffered
+ universal_newlines=True,
+ )
# Create background process wrapper
- bg_process = BackgroundProcess(command, process, buffer)
+ bg_process = BackgroundProcess(command, process, buffer, persist=persist)
# Generate unique key and store
command_key = cls._generate_command_key(command)
@@ -352,7 +373,7 @@ def get_all_command_outputs(cls, clear: bool = False) -> Dict[str, str]:
Get output from all background commands (running or recently finished).
Args:
- clear: If True, clear buffers after reading
+ clear: If True, clear buffers after reading (unless persist=True)
Returns:
Dictionary mapping command keys to their output
@@ -360,8 +381,10 @@ def get_all_command_outputs(cls, clear: bool = False) -> Dict[str, str]:
with cls._lock:
outputs = {}
for command_key, bg_process in cls._background_commands.items():
+ # Don't clear if persist flag is set
+ should_clear = clear and not getattr(bg_process, "persist", False)
if clear:
- output = bg_process.get_output(clear=True)
+ output = bg_process.get_output(clear=should_clear)
else:
output = bg_process.get_new_output()
if output.strip():
@@ -417,8 +440,10 @@ def list_background_commands(cls) -> Dict[str, Dict[str, any]]:
List all background commands with their status.
Returns:
- Dictionary with command information
+ Dictionary with command information including timestamps
"""
+ import time
+
with cls._lock:
result = {}
for command_key, bg_process in cls._background_commands.items():
@@ -426,5 +451,16 @@ def list_background_commands(cls) -> Dict[str, Dict[str, any]]:
"command": bg_process.command,
"running": bg_process.is_alive(),
"buffer_size": bg_process.buffer.size(),
+ "start_time": bg_process.start_time,
+ "end_time": bg_process.end_time,
+ "duration": (
+ time.time() - bg_process.start_time
+ if bg_process.is_alive()
+ else (
+ bg_process.end_time - bg_process.start_time
+ if bg_process.end_time
+ else None
+ )
+ ),
}
return result
diff --git a/cecli/io.py b/cecli/io.py
index 9aef7d983a2..96c756bf411 100644
--- a/cecli/io.py
+++ b/cecli/io.py
@@ -1046,8 +1046,9 @@ async def stop_input_task(self):
input_task = self.input_task
self.input_task = None
try:
- input_task.cancel()
- await input_task
+ if input_task:
+ input_task.cancel()
+ await input_task
except (
asyncio.CancelledError,
Exception,
@@ -1064,8 +1065,9 @@ async def stop_output_task(self):
output_task = self.output_task
self.output_task = None
try:
- output_task.cancel()
- await output_task
+ if output_task:
+ output_task.cancel()
+ await output_task
except (
asyncio.CancelledError,
EOFError,
diff --git a/cecli/main.py b/cecli/main.py
index af7b96c97b0..33310f01d09 100644
--- a/cecli/main.py
+++ b/cecli/main.py
@@ -1314,6 +1314,78 @@ def load_slow_imports(swallow=True):
raise e
+async def task(message, setting=None, env=None, force_git_root=None, return_coder=True):
+ """
+ Programmatically run cecli with a message and optional settings.
+
+ Args:
+ message: The message/command to send to cecli (e.g., "Add a function to process data")
+ setting: Optional YAML string of settings to override configuration
+ env: Optional dict of environment variables to set
+ force_git_root: Optional path to force as git root
+ return_coder: Whether to return the coder object (default: True)
+
+ Returns:
+ The coder object if return_coder=True, otherwise exit code
+ """
+ import json
+ import os
+ import tempfile
+
+ import yaml
+
+ # Set environment variables if provided
+ if env:
+ for key, value in env.items():
+ os.environ[key] = str(value)
+
+ # Build argv with message as --message flag
+ argv = ["--message", message]
+
+ # Handle settings via temporary config file
+ if setting:
+ # Parse YAML to validate
+ settings_dict = yaml.safe_load(setting)
+
+ # Add yes-always: True to ensure automatic confirmation
+ settings_dict["pretty"] = False
+ settings_dict["tui"] = False
+ settings_dict["yes-always"] = True
+
+ # Add agent-config with skip_cli_confirmations: true as JSON string
+ # Merge with existing agent-config if present
+ agent_config = {"skip_cli_confirmations": True}
+ if "agent-config" in settings_dict:
+ try:
+ existing_config = json.loads(settings_dict["agent-config"])
+ agent_config.update(existing_config)
+ except (json.JSONDecodeError, TypeError):
+ # If existing agent-config is not valid JSON, overwrite it
+ pass
+ settings_dict["agent-config"] = json.dumps(agent_config)
+
+ # Create temporary config file
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".yml", delete=False) as f:
+ yaml.dump(settings_dict, f)
+ config_file = f.name
+
+ # Add config file argument
+ argv = ["--config", config_file] + argv
+ else:
+ config_file = None
+
+ try:
+ # Run main_async with constructed arguments
+ result = await main_async(
+ argv=argv, force_git_root=force_git_root, return_coder=return_coder
+ )
+ return result
+ finally:
+ # Clean up temporary config file
+ if config_file and os.path.exists(config_file):
+ os.unlink(config_file)
+
+
async def graceful_exit(coder=None, exit_code=0):
sys.settrace(None)
if coder:
diff --git a/cecli/prompts/agent.yml b/cecli/prompts/agent.yml
index 79c475e293a..b4deab401b1 100644
--- a/cecli/prompts/agent.yml
+++ b/cecli/prompts/agent.yml
@@ -19,11 +19,11 @@ files_no_full_files_with_repo_map_reply: |
main_system: |
## Core Directives
- - **Role**: Act as an expert software engineer.
+ - **Role**: Act as an expert software engineer.
- **Act Proactively**: Autonomously use file discovery and context management tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ContextManager`) to gather information and fulfill the user's request. Chain tool calls across multiple turns to continue exploration.
- **Be Decisive**: Trust that your initial findings are valid. Refrain from asking the same question or searching for the same term in multiple similar ways.
- **Be Concise**: Keep all responses brief and direct (1-3 sentences). Avoid preamble, postamble, and unnecessary explanations. Do not repeat yourself.
- - **Be Careful**: Break updates down into smaller, more manageable chunks. Focus on one thing at a time.
+ - **Be Efficient**: Some tools allow you to perform multiple actions at a time, use them to work quickly and effectively. Respect their usage limits
## Core Workflow
diff --git a/cecli/tools/command.py b/cecli/tools/command.py
index 4122223d60c..7b8826ce031 100644
--- a/cecli/tools/command.py
+++ b/cecli/tools/command.py
@@ -37,6 +37,7 @@ class Tool(BaseTool):
async def execute(cls, coder, command_string, background=False, stop_background=None, **kwargs):
"""
Execute a shell command, optionally in background.
+ Commands run with timeout based on agent_config['command_timeout'] (default: 30 seconds).
"""
# Handle stopping background commands
if stop_background:
@@ -52,8 +53,15 @@ async def execute(cls, coder, command_string, background=False, stop_background=
if not confirmed:
return "Command execution skipped by user."
+ # Determine timeout from agent_config (default: 30 seconds)
+ timeout = 0
+ if hasattr(coder, "agent_config"):
+ timeout = coder.agent_config.get("command_timeout", 30)
+
if background:
return await cls._execute_background(coder, command_string)
+ elif timeout > 0:
+ return await cls._execute_with_timeout(coder, command_string, timeout)
else:
return await cls._execute_foreground(coder, command_string)
@@ -96,6 +104,108 @@ async def _execute_background(cls, coder, command_string):
"Output will be injected into chat stream."
)
+ @classmethod
+ async def _execute_with_timeout(cls, coder, command_string, timeout):
+ """
+ Execute command with timeout. If timeout elapses, move to background.
+
+ IMPORTANT: We use a different approach to avoid pipe conflicts.
+ Instead of reading pipes directly, we let BackgroundCommandManager
+ handle all pipe reading from the start.
+ """
+ import asyncio
+ import subprocess
+ import time
+
+ from cecli.helpers.background_commands import CircularBuffer
+
+ coder.io.tool_output(f"⚙️ Executing shell command with {timeout}s timeout: {command_string}")
+
+ # Create output buffer
+ buffer = CircularBuffer(max_size=4096)
+
+ # Start process with pipes for output capture
+ process = subprocess.Popen(
+ command_string,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.DEVNULL,
+ cwd=coder.root,
+ text=True,
+ bufsize=1,
+ universal_newlines=True,
+ )
+
+ # Immediately register with background manager to handle pipe reading
+ command_key = BackgroundCommandManager.start_background_command(
+ command_string,
+ verbose=coder.verbose,
+ cwd=coder.root,
+ max_buffer_size=4096,
+ existing_process=process,
+ existing_buffer=buffer,
+ persist=True,
+ )
+
+ # Now monitor the process with timeout
+ start_time = time.time()
+
+ while True:
+ # Check if process has completed
+ exit_code = process.poll()
+ if exit_code is not None:
+ # Process completed
+ output = buffer.get_all(clear=True)
+
+ # Format output
+ output_content = output or ""
+ output_limit = coder.large_file_token_threshold
+ if len(output_content) > output_limit:
+ output_content = (
+ output_content[:output_limit]
+ + f"\n... (output truncated at {output_limit} characters, based on"
+ " large_file_token_threshold)"
+ )
+
+ # Remove from background tracking since it's done
+ BackgroundCommandManager.stop_background_command(command_key)
+
+ # Output to TUI console if TUI exists (same logic as _execute_foreground)
+ if coder.tui and coder.tui():
+ coder.io.tool_output(output_content)
+
+ if exit_code == 0:
+ return (
+ f"Shell command completed within {timeout}s timeout (exit code 0)."
+ f" Output:\n{output_content}"
+ )
+ else:
+ return (
+ f"Shell command completed within {timeout}s timeout with exit code"
+ f" {exit_code}. Output:\n{output_content}"
+ )
+
+ # Check if timeout has expired
+ elapsed = time.time() - start_time
+ if elapsed >= timeout:
+ # Timeout elapsed, process continues in background
+ coder.io.tool_output(
+ f"⏱️ Command exceeded {timeout}s timeout, continuing in background..."
+ )
+
+ # Get any output captured so far
+ current_output = buffer.get_all(clear=False)
+
+ return (
+ f"Command exceeded {timeout}s timeout and is continuing in background.\n"
+ f"Command key: {command_key}\n"
+ f"Output captured so far:\n{current_output}\n"
+ )
+
+ # Wait a bit before checking again
+ await asyncio.sleep(1)
+
@classmethod
async def _execute_foreground(cls, coder, command_string):
"""
diff --git a/cecli/tui/app.py b/cecli/tui/app.py
index 264949b8c89..5b012759488 100644
--- a/cecli/tui/app.py
+++ b/cecli/tui/app.py
@@ -203,6 +203,10 @@ def _get_config(self):
"stop": "escape",
"cycle_forward": "tab",
"cycle_backward": "shift+tab",
+ "input_start": "ctrl+home",
+ "input_end": "ctrl+end",
+ "output_up": "shift+pageup",
+ "output_down": "shift+pagedown",
"editor": "ctrl+o",
"history": "ctrl+r",
"focus": "ctrl+f",
@@ -572,6 +576,10 @@ def show_error(self, message):
status_bar = self.query_one("#status-bar", StatusBar)
status_bar.show_notification(f"Error: {message}", severity="error", timeout=10)
+ def on_resize(self) -> None:
+ file_list = self.query_one("#file-list", FileList)
+ file_list.update_files(file_list.chat_files)
+
def on_input_area_text_changed(self, message: InputArea.TextChanged):
"""Handle text changes in input area."""
self._update_key_hints_for_commands(message.text, is_completion=False)
@@ -649,6 +657,16 @@ def action_clear_output(self):
self.worker.coder.show_announcements()
+ def action_output_up(self):
+ """Scroll the output area up one page."""
+ output_container = self.query_one("#output", OutputContainer)
+ output_container.action_page_up()
+
+ def action_output_down(self):
+ """Scroll the output area down one page."""
+ output_container = self.query_one("#output", OutputContainer)
+ output_container.action_page_down()
+
def action_interrupt(self):
"""Interrupt the current task."""
if self.worker:
diff --git a/cecli/tui/widgets/file_list.py b/cecli/tui/widgets/file_list.py
index 05eb2f7e96e..a36fad11dc9 100644
--- a/cecli/tui/widgets/file_list.py
+++ b/cecli/tui/widgets/file_list.py
@@ -6,8 +6,12 @@
class FileList(Static):
"""Widget to display the list of files in chat."""
+ chat_files = None
+
def update_files(self, chat_files):
"""Update the file list display."""
+ self.chat_files = chat_files
+
if not chat_files:
self.update("")
return
@@ -29,26 +33,19 @@ def update_files(self, chat_files):
else:
self.remove_class("empty")
- # For very large numbers of files, use a summary display
- if total_files > 20:
- read_only_count = len(rel_read_only_fnames or [])
- stub_file_count = len(rel_read_only_stubs_fnames or [])
- editable_count = len([f for f in rel_fnames if f not in (rel_read_only_fnames or [])])
-
- summary = f"{editable_count} editable file(s)"
- if read_only_count > 0:
- summary += f", {read_only_count} read-only file(s)"
- if stub_file_count > 0:
- summary += f", {stub_file_count} stub file(s)"
- summary += " (use /ls to list all files)"
- self.update(summary)
- return
+ # Get available width
+ try:
+ available_width = self.app.size.width
+ except Exception:
+ available_width = 80
- renderables = []
+ # Calculate text width for each section
+ show_readonly_summary = False
+ show_editable_summary = False
# Handle read-only files
+ ro_paths = []
if rel_read_only_fnames or rel_read_only_stubs_fnames:
- ro_paths = []
# Regular read-only files
for rel_path in sorted(rel_read_only_fnames or []):
ro_paths.append(rel_path)
@@ -57,8 +54,17 @@ def update_files(self, chat_files):
ro_paths.append(f"{rel_path} (stub)")
if ro_paths:
- files_with_label = ["Readonly:"] + ro_paths
- renderables.append(Columns(files_with_label))
+ # Calculate total characters needed for readonly section
+ # "Readonly:" label + sum of all path lengths
+ total_chars = len("Readonly:") + sum(len(path) for path in ro_paths)
+ # Account for padding (12 chars total across 2 rows) and spaces between files (n-1)
+ # Total available characters across 2 rows = available_width * 2
+ # If total_chars > available_width * 2 - 12 - (n-1), show summary
+ total_available = available_width * 1.5 - 12 - (len(ro_paths) - 1)
+
+ # If total_available is negative or zero, definitely show summary
+ if total_available <= 0 or total_chars > total_available:
+ show_readonly_summary = True
# Handle editable files
editable_files = [
@@ -66,7 +72,47 @@ def update_files(self, chat_files):
for f in sorted(rel_fnames)
if f not in rel_read_only_fnames and f not in rel_read_only_stubs_fnames
]
+
if editable_files:
+ # Calculate total characters needed for editable section
+ show_editable_label = bool(rel_read_only_fnames or rel_read_only_stubs_fnames)
+ total_chars = sum(len(path) for path in editable_files)
+ if show_editable_label:
+ total_chars += len("Editable:")
+
+ # Account for padding (12 chars total across 2 rows) and spaces between files (n-1)
+ # Total available characters across 2 rows = available_width * 2
+
+ total_available = available_width * 1.5 - 12 - (len(editable_files) - 1)
+
+ # If total_available is negative or zero, definitely show summary
+ if total_available <= 0 or total_chars > total_available:
+ show_editable_summary = True
+
+ # If either section needs summary, show overall summary
+ if show_readonly_summary or show_editable_summary or total_files > 20:
+ read_only_count = len(rel_read_only_fnames or [])
+ stub_file_count = len(rel_read_only_stubs_fnames or [])
+ editable_count = len([f for f in rel_fnames if f not in (rel_read_only_fnames or [])])
+
+ summary = f"{editable_count} editable file(s)"
+ if read_only_count > 0:
+ summary += f", {read_only_count} read-only file(s)"
+ if stub_file_count > 0:
+ summary += f", {stub_file_count} stub file(s)"
+ summary += " (use /ls to list all files)"
+ self.update(summary)
+ return
+
+ renderables = []
+
+ # Handle read-only files
+ if ro_paths and not show_readonly_summary:
+ files_with_label = ["Readonly:"] + ro_paths
+ renderables.append(Columns(files_with_label))
+
+ # Handle editable files
+ if editable_files and not show_editable_summary:
files_with_label = editable_files
if rel_read_only_fnames or rel_read_only_stubs_fnames:
files_with_label = ["Editable:"] + editable_files
diff --git a/cecli/tui/widgets/input_area.py b/cecli/tui/widgets/input_area.py
index 0ae743b2811..30fdfc5cd8b 100644
--- a/cecli/tui/widgets/input_area.py
+++ b/cecli/tui/widgets/input_area.py
@@ -264,6 +264,14 @@ def on_key(self, event) -> None:
return
+ if self.app.is_key_for("output_up", event.key):
+ self.app.action_output_up()
+ return
+
+ if self.app.is_key_for("output_down", event.key):
+ self.app.action_output_down()
+ return
+
if self.app.is_key_for("cycle_forward", event.key):
event.stop()
event.prevent_default()
@@ -301,6 +309,19 @@ def on_key(self, event) -> None:
event.stop()
event.prevent_default()
self._history_next()
+ elif self.app.is_key_for("input_start", event.key):
+ # Move cursor to start of first line
+ event.stop()
+ event.prevent_default()
+ self.cursor_location = (0, 0)
+ elif self.app.is_key_for("input_end", event.key):
+ # Move cursor to end of last line
+ event.stop()
+ event.prevent_default()
+ lines = self.text.split("\n")
+ row = max(0, len(lines) - 1)
+ col = len(lines[row])
+ self.cursor_location = (row, col)
def on_text_area_changed(self, event) -> None:
"""Update completions as user types."""
diff --git a/cecli/utils.py b/cecli/utils.py
index 478764cf94a..aac9b20b597 100644
--- a/cecli/utils.py
+++ b/cecli/utils.py
@@ -442,68 +442,118 @@ def printable_shell_command(cmd_list):
def split_concatenated_json(s: str) -> list[str]:
"""
- Splits a string containing one or more concatenated JSON objects.
+ Splits a string containing one or more concatenated JSON objects
+ and returns them as a list of raw strings.
"""
- try:
- json.loads(s)
- return [s]
- except json.JSONDecodeError:
- pass
-
res = []
- i = 0
+ decoder = json.JSONDecoder()
+ idx = 0
s_len = len(s)
- while i < s_len:
- # skip leading whitespace
- while i < s_len and s[i].isspace():
- i += 1
- if i >= s_len:
- break
- start_char = s[i]
- if start_char == "{":
- end_char = "}"
- elif start_char == "[":
- end_char = "]"
- else:
- # Doesn't start with a JSON object/array, so we can't parse it as a stream.
- # Return the rest of the string as a single chunk.
- res.append(s[i:])
+ while idx < s_len:
+ # 1. Use Regex-free "find" to jump to the next potential JSON start
+ # This replaces your manual 'while s[i].isspace()' loop
+ brace_idx = s.find("{", idx)
+ bracket_idx = s.find("[", idx)
+
+ # Determine the earliest starting point
+ if brace_idx == -1 and bracket_idx == -1:
+ # No more JSON documents found, but check for trailing text
+ remainder = s[idx:].strip()
+ if remainder:
+ res.append(s[idx:])
break
- start_index = i
- stack_depth = 0
- in_string = False
- escape = False
+ # Set idx to the first '{' or '[' found
+ start_index = (
+ min(brace_idx, bracket_idx)
+ if (brace_idx != -1 and bracket_idx != -1)
+ else max(brace_idx, bracket_idx)
+ )
- for j in range(start_index, s_len):
- char = s[j]
+ try:
+ # 2. Let the C-optimized parser find the end of the object
+ _, end_idx = decoder.raw_decode(s, start_index)
- if escape:
- escape = False
- continue
+ # 3. Slice the original string and add to results
+ res.append(s[start_index:end_idx])
- if char == "\\":
- escape = True
- continue
+ # Move our pointer to the end of the last document
+ idx = end_idx
- if char == '"':
- in_string = not in_string
+ except json.JSONDecodeError:
+ # If it looks like JSON but fails (e.g. malformed),
+ # we skip this character and try to find the next valid start
+ idx = start_index + 1
- if in_string:
- continue
+ return res
- if char == start_char:
- stack_depth += 1
- elif char == end_char:
- stack_depth -= 1
- if stack_depth == 0:
- res.append(s[start_index : j + 1])
- i = j + 1
- break
- else:
- # Unclosed object, add the remainder as the last chunk
- res.append(s[start_index:])
+
+def parse_concatenated_json(s: str) -> list:
+ objs = []
+ decoder = json.JSONDecoder()
+ idx = 0
+ s_len = len(s)
+
+ while idx < s_len:
+ # Jump to the next potential start of a JSON object or array
+ # This skips whitespace, commas, or "noise" between documents instantly
+ brace_idx = s.find("{", idx)
+ bracket_idx = s.find("[", idx)
+
+ # Determine which one comes first
+ if brace_idx == -1 and bracket_idx == -1:
break
+ elif brace_idx == -1:
+ idx = bracket_idx
+ elif bracket_idx == -1:
+ idx = brace_idx
+ else:
+ idx = min(brace_idx, bracket_idx)
- return res
+ try:
+ # raw_decode attempts to parse starting exactly at idx
+ obj, end_idx = decoder.raw_decode(s, idx)
+ objs.append(obj)
+ idx = end_idx
+ except json.JSONDecodeError:
+ # If it's a false start (like a { inside a non-JSON string),
+ # skip it and keep looking
+ idx += 1
+
+ return objs
+
+
+def copy_tool_call(tool_call):
+ """
+ Copies a tool call whether it's a Pydantic model, SimpleNamespace, or dict.
+ """
+ from types import SimpleNamespace
+
+ if hasattr(tool_call, "model_copy"):
+ return tool_call.model_copy(deep=True)
+ if isinstance(tool_call, SimpleNamespace):
+ import copy
+
+ return copy.deepcopy(tool_call)
+ if isinstance(tool_call, dict):
+ import copy
+
+ return copy.deepcopy(tool_call)
+ return tool_call
+
+
+def tool_call_to_dict(tool_call):
+ """
+ Converts any tool-call representation to a dict.
+ """
+ if hasattr(tool_call, "model_dump"):
+ return tool_call.model_dump()
+ if hasattr(tool_call, "__dict__"):
+ res = dict(tool_call.__dict__)
+ if "function" in res and hasattr(res["function"], "__dict__"):
+ res["function"] = dict(res["function"].__dict__)
+ return res
+ if isinstance(tool_call, dict):
+ return tool_call
+ return {}
diff --git a/cecli/website/docs/config/agent-mode.md b/cecli/website/docs/config/agent-mode.md
index 00f7f652100..3e714afc14d 100644
--- a/cecli/website/docs/config/agent-mode.md
+++ b/cecli/website/docs/config/agent-mode.md
@@ -169,6 +169,7 @@ agent-config:
# Performance and behavior settings
large_file_token_threshold: 12500 # Token threshold for large file warnings
skip_cli_confirmations: false # YOLO mode - be brave and let the LLM cook
+ command_timeout: 30 # Time to wait for commands to finish before automatic backgrounding occurs
# Skills configuration (see Skills documentation for details)
skills_paths: ["~/my-skills", "./project-skills"] # Directories to search for skills
diff --git a/cecli/website/docs/config/tui.md b/cecli/website/docs/config/tui.md
index ec4838ecba4..b9143584c88 100644
--- a/cecli/website/docs/config/tui.md
+++ b/cecli/website/docs/config/tui.md
@@ -59,6 +59,10 @@ tui-config:
history: "ctrl+r"
cycle_forward: "tab"
cycle_backward: "shift+tab"
+ input_start: "ctrl+home"
+ input_end: "ctrl+end"
+ output_up: "shift+pageup"
+ output_down: "shift+pagedown"
focus: "ctrl+f"
cancel: "ctrl+c"
clear: "ctrl+l"
@@ -80,6 +84,10 @@ The TUI provides customizable key bindings for all major actions. The default ke
| Search History | `ctrl+r` | Search through history for previous commands (requires fzf to be installed) |
| Cycle Forward | `tab` | Cycle forward through completion suggestions |
| Cycle Backward | `shift+tab` | Cycle backward through completion suggestions |
+| Input Start | `ctrl+home` | Move cursor to start of first line |
+| Input End | `ctrl+end` | Move cursor to end of last line |
+| Output Up | `pageup` | Scroll the output area up one page |
+| Output Down | `pagedown` | Scroll the output area down one page |
| Focus | `ctrl+f` | Focus the input area |
| Clear | `ctrl+l` | Clear the output area |
| Quit | `ctrl+q` | Exit the TUI |
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 75004668169..c21553bb880 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -46,12 +46,12 @@ FROM base AS aider-ce
ENV CECLI_DOCKER_IMAGE=dustinwashington/aider-ce
# Copy requirements files
-COPY requirements.txt /tmp/aider/
-COPY requirements/ /tmp/aider/requirements/
+COPY requirements.txt /tmp/cecli/
+COPY requirements/ /tmp/cecli/requirements/
# Install dependencies as root
-RUN uv pip install --no-cache-dir -r /tmp/aider/requirements.txt && \
- rm -rf /tmp/aider
+RUN uv pip install --no-cache-dir -r /tmp/cecli/requirements.txt && \
+ rm -rf /tmp/cecli
# Install playwright browsers
RUN uv pip install --no-cache-dir playwright && \
diff --git a/requirements.txt b/requirements.txt
index 7c49985e697..f1c3371c9ff 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -113,10 +113,6 @@ grep-ast==0.9.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
-grpcio==1.67.1
- # via
- # -c requirements/common-constraints.txt
- # litellm
h11==0.16.0
# via
# -c requirements/common-constraints.txt
@@ -186,7 +182,7 @@ linkify-it-py==2.0.3
# via
# -c requirements/common-constraints.txt
# markdown-it-py
-litellm==1.80.11
+litellm==1.81.11
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -421,6 +417,10 @@ tokenizers==0.22.1
# via
# -c requirements/common-constraints.txt
# litellm
+tomlkit==0.14.0
+ # via
+ # -c requirements/common-constraints.txt
+ # -r requirements/requirements.in
tqdm==4.67.1
# via
# -c requirements/common-constraints.txt
@@ -453,16 +453,12 @@ truststore==0.10.4
typing-extensions==4.15.0
# via
# -c requirements/common-constraints.txt
- # aiosignal
- # anyio
# beautifulsoup4
# huggingface-hub
# mcp
# openai
# pydantic
# pydantic-core
- # referencing
- # starlette
# textual
# typing-inspection
typing-inspection==0.4.2
diff --git a/requirements/common-constraints.txt b/requirements/common-constraints.txt
index 902d4066915..fe131426c2b 100644
--- a/requirements/common-constraints.txt
+++ b/requirements/common-constraints.txt
@@ -34,8 +34,6 @@ beautifulsoup4==4.14.2
# via -r requirements/requirements.in
build==1.3.0
# via pip-tools
-cachetools==6.2.2
- # via google-auth
certifi==2025.11.12
# via
# httpcore
@@ -115,27 +113,6 @@ gitdb==4.0.12
# via gitpython
gitpython==3.1.45
# via -r requirements/requirements.in
-google-api-core[grpc]==2.28.1
- # via
- # google-cloud-bigquery
- # google-cloud-core
-google-auth==2.43.0
- # via
- # google-api-core
- # google-cloud-bigquery
- # google-cloud-core
-google-cloud-bigquery==3.38.0
- # via -r requirements/requirements-dev.in
-google-cloud-core==2.5.0
- # via google-cloud-bigquery
-google-crc32c==1.7.1
- # via google-resumable-media
-google-resumable-media==2.8.0
- # via google-cloud-bigquery
-googleapis-common-protos==1.72.0
- # via
- # google-api-core
- # grpcio-status
greenlet==3.2.4
# via
# playwright
@@ -144,13 +121,6 @@ grep-ast==0.9.0
# via -r requirements/requirements.in
griffe==1.15.0
# via banks
-grpcio==1.67.1
- # via
- # google-api-core
- # grpcio-status
- # litellm
-grpcio-status==1.67.1
- # via google-api-core
h11==0.16.0
# via
# httpcore
@@ -215,7 +185,7 @@ kiwisolver==1.4.9
# via matplotlib
linkify-it-py==2.0.3
# via markdown-it-py
-litellm==1.80.11
+litellm==1.81.11
# via -r requirements/requirements.in
llama-index-core==0.14.8
# via llama-index-embeddings-huggingface
@@ -325,7 +295,6 @@ packaging==25.0
# via
# -r requirements/requirements.in
# build
- # google-cloud-bigquery
# huggingface-hub
# marshmallow
# matplotlib
@@ -367,24 +336,10 @@ propcache==0.4.1
# via
# aiohttp
# yarl
-proto-plus==1.26.1
- # via google-api-core
-protobuf==5.29.5
- # via
- # google-api-core
- # googleapis-common-protos
- # grpcio-status
- # proto-plus
psutil==7.1.3
# via -r requirements/requirements.in
ptyprocess==0.7.0
# via pexpect
-pyasn1==0.6.1
- # via
- # pyasn1-modules
- # rsa
-pyasn1-modules==0.4.2
- # via google-auth
pycodestyle==2.14.0
# via flake8
pycparser==2.23
@@ -440,7 +395,6 @@ pytest-mock==3.15.1
# via -r requirements/requirements-dev.in
python-dateutil==2.9.0.post0
# via
- # google-cloud-bigquery
# matplotlib
# pandas
python-dotenv==1.2.1
@@ -469,8 +423,6 @@ regex==2025.11.3
# transformers
requests==2.32.5
# via
- # google-api-core
- # google-cloud-bigquery
# huggingface-hub
# llama-index-core
# tiktoken
@@ -484,8 +436,6 @@ rpds-py==0.29.0
# via
# jsonschema
# referencing
-rsa==4.9.1
- # via google-auth
rustworkx==0.17.1
# via -r requirements/requirements.in
safetensors==0.7.0
@@ -548,6 +498,8 @@ tokenizers==0.22.1
# via
# litellm
# transformers
+tomlkit==0.14.0
+ # via -r requirements/requirements.in
torch==2.9.1
# via sentence-transformers
tqdm==4.67.1
@@ -580,9 +532,7 @@ typer==0.20.0
# via -r requirements/requirements-dev.in
typing-extensions==4.15.0
# via
- # aiosignal
# aiosqlite
- # anyio
# beautifulsoup4
# huggingface-hub
# llama-index-core
@@ -592,11 +542,8 @@ typing-extensions==4.15.0
# pydantic
# pydantic-core
# pyee
- # pytest-asyncio
- # referencing
# sentence-transformers
# sqlalchemy
- # starlette
# textual
# torch
# typer
diff --git a/requirements/requirements-dev.in b/requirements/requirements-dev.in
index e52d0cdc30a..e0c1ab29136 100644
--- a/requirements/requirements-dev.in
+++ b/requirements/requirements-dev.in
@@ -13,4 +13,3 @@ cogapp
semver
codespell
uv
-google-cloud-bigquery
diff --git a/requirements/requirements-dev.txt b/requirements/requirements-dev.txt
index d1cebb9dff6..33ca4ea3643 100644
--- a/requirements/requirements-dev.txt
+++ b/requirements/requirements-dev.txt
@@ -4,22 +4,10 @@ build==1.3.0
# via
# -c requirements/common-constraints.txt
# pip-tools
-cachetools==6.2.2
- # via
- # -c requirements/common-constraints.txt
- # google-auth
-certifi==2025.11.12
- # via
- # -c requirements/common-constraints.txt
- # requests
cfgv==3.5.0
# via
# -c requirements/common-constraints.txt
# pre-commit
-charset-normalizer==3.4.4
- # via
- # -c requirements/common-constraints.txt
- # requests
click==8.3.1
# via
# -c requirements/common-constraints.txt
@@ -53,55 +41,10 @@ fonttools==4.60.1
# via
# -c requirements/common-constraints.txt
# matplotlib
-google-api-core[grpc]==2.28.1
- # via
- # -c requirements/common-constraints.txt
- # google-cloud-bigquery
- # google-cloud-core
-google-auth==2.43.0
- # via
- # -c requirements/common-constraints.txt
- # google-api-core
- # google-cloud-bigquery
- # google-cloud-core
-google-cloud-bigquery==3.38.0
- # via
- # -c requirements/common-constraints.txt
- # -r requirements/requirements-dev.in
-google-cloud-core==2.5.0
- # via
- # -c requirements/common-constraints.txt
- # google-cloud-bigquery
-google-crc32c==1.7.1
- # via
- # -c requirements/common-constraints.txt
- # google-resumable-media
-google-resumable-media==2.8.0
- # via
- # -c requirements/common-constraints.txt
- # google-cloud-bigquery
-googleapis-common-protos==1.72.0
- # via
- # -c requirements/common-constraints.txt
- # google-api-core
- # grpcio-status
-grpcio==1.67.1
- # via
- # -c requirements/common-constraints.txt
- # google-api-core
- # grpcio-status
-grpcio-status==1.67.1
- # via
- # -c requirements/common-constraints.txt
- # google-api-core
identify==2.6.15
# via
# -c requirements/common-constraints.txt
# pre-commit
-idna==3.11
- # via
- # -c requirements/common-constraints.txt
- # requests
imgcat==0.6.0
# via
# -c requirements/common-constraints.txt
@@ -144,7 +87,6 @@ packaging==25.0
# via
# -c requirements/common-constraints.txt
# build
- # google-cloud-bigquery
# matplotlib
# pytest
pandas==2.3.3
@@ -175,26 +117,6 @@ pre-commit==4.5.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
-proto-plus==1.26.1
- # via
- # -c requirements/common-constraints.txt
- # google-api-core
-protobuf==5.29.5
- # via
- # -c requirements/common-constraints.txt
- # google-api-core
- # googleapis-common-protos
- # grpcio-status
- # proto-plus
-pyasn1==0.6.1
- # via
- # -c requirements/common-constraints.txt
- # pyasn1-modules
- # rsa
-pyasn1-modules==0.4.2
- # via
- # -c requirements/common-constraints.txt
- # google-auth
pygments==2.19.2
# via
# -c requirements/common-constraints.txt
@@ -231,7 +153,6 @@ pytest-mock==3.15.1
python-dateutil==2.9.0.post0
# via
# -c requirements/common-constraints.txt
- # google-cloud-bigquery
# matplotlib
# pandas
pytz==2025.2
@@ -242,19 +163,10 @@ pyyaml==6.0.3
# via
# -c requirements/common-constraints.txt
# pre-commit
-requests==2.32.5
- # via
- # -c requirements/common-constraints.txt
- # google-api-core
- # google-cloud-bigquery
rich==14.2.0
# via
# -c requirements/common-constraints.txt
# typer
-rsa==4.9.1
- # via
- # -c requirements/common-constraints.txt
- # google-auth
semver==3.0.4
# via
# -c requirements/common-constraints.txt
@@ -278,16 +190,11 @@ typer==0.20.0
typing-extensions==4.15.0
# via
# -c requirements/common-constraints.txt
- # pytest-asyncio
# typer
tzdata==2025.2
# via
# -c requirements/common-constraints.txt
# pandas
-urllib3==2.5.0
- # via
- # -c requirements/common-constraints.txt
- # requests
uv==0.9.11
# via
# -c requirements/common-constraints.txt
diff --git a/requirements/requirements-help.txt b/requirements/requirements-help.txt
index 1b1b4ce392d..21afcbcd2b5 100644
--- a/requirements/requirements-help.txt
+++ b/requirements/requirements-help.txt
@@ -382,9 +382,7 @@ triton==3.5.1
typing-extensions==4.15.0
# via
# -c requirements/common-constraints.txt
- # aiosignal
# aiosqlite
- # anyio
# huggingface-hub
# llama-index-core
# llama-index-workflows
diff --git a/requirements/requirements.in b/requirements/requirements.in
index c94e5d2a524..ef6b1783c88 100644
--- a/requirements/requirements.in
+++ b/requirements/requirements.in
@@ -29,6 +29,7 @@ shtab>=1.7.2
oslex>=0.1.3
mcp>=1.24.0
textual>=6.0.0
+tomlkit>=0.14.0
truststore
xxhash>=3.6.0