Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@ name: Push CI
on:
push:
branches: [main]
pull_request:
branches: [main]

jobs:
repo-guards:
Expand Down Expand Up @@ -37,6 +35,8 @@ jobs:
- uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: pip
cache-dependency-path: requirements.txt
- name: Install dependencies
run: pip install -r requirements.txt
- name: Lint with ruff
Expand Down
8 changes: 3 additions & 5 deletions ollama_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,15 @@
This is useful when Ollama is auto-started by the system on different ports.
"""

import httpx
import os
import socket
from typing import Optional

import httpx

# Common Ollama ports (default is 11434, but can run on others)
COMMON_OLLAMA_PORTS = [11434, 36199, 11435, 8000, 8080, 5000]


def detect_ollama_url() -> Optional[str]:
def detect_ollama_url() -> str | None:
"""
Automatically detect running Ollama instance.

Expand Down Expand Up @@ -81,7 +79,7 @@ def _test_ollama_connection(url: str, timeout: float = 1.0) -> bool:
return False


def get_ollama_url_sync() -> Optional[str]:
def get_ollama_url_sync() -> str | None:
"""
Synchronous Ollama detection.
Use this in non-async contexts.
Expand Down
17 changes: 9 additions & 8 deletions parallel_orchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,10 @@
import subprocess
import sys
import threading
from datetime import datetime, timezone
from collections.abc import Callable
from datetime import UTC, datetime
from pathlib import Path
from typing import Callable, Literal
from typing import Literal

from api.database import Feature, create_database
from api.dependency_resolver import are_dependencies_satisfied, compute_scheduling_scores
Expand Down Expand Up @@ -53,12 +54,12 @@ def start_session(self):
with self._lock:
self._session_started = True
with open(self.log_file, "w") as f:
f.write(f"=== Orchestrator Debug Log Started: {datetime.now().isoformat()} ===\n")
f.write(f"=== Orchestrator Debug Log Started: {datetime.now(UTC).isoformat()} ===\n")
f.write(f"=== PID: {os.getpid()} ===\n\n")

def log(self, category: str, message: str, **kwargs):
"""Write a timestamped log entry."""
timestamp = datetime.now().strftime("%H:%M:%S.%f")[:-3]
timestamp = datetime.now(UTC).strftime("%H:%M:%S.%f")[:-3]
with self._lock:
with open(self.log_file, "a") as f:
f.write(f"[{timestamp}] [{category}] {message}\n")
Expand Down Expand Up @@ -183,7 +184,7 @@ def __init__(
self._failure_counts: dict[int, int] = {}

# Session tracking for logging/debugging
self.session_start_time: datetime = None
self.session_start_time: datetime = datetime.now(UTC)

# Event signaled when any agent completes, allowing the main loop to wake
# immediately instead of waiting for the full POLL_INTERVAL timeout.
Expand Down Expand Up @@ -688,7 +689,7 @@ async def stream_output():

await asyncio.wait_for(stream_output(), timeout=INITIALIZER_TIMEOUT)

except asyncio.TimeoutError:
except TimeoutError:
print(f"ERROR: Initializer timed out after {INITIALIZER_TIMEOUT // 60} minutes", flush=True)
debug_log.log("INIT", "TIMEOUT - Initializer exceeded time limit",
timeout_minutes=INITIALIZER_TIMEOUT // 60)
Expand Down Expand Up @@ -777,7 +778,7 @@ async def _wait_for_agent_completion(self, timeout: float = POLL_INTERVAL):
# Event was set - an agent completed. Clear it for the next wait cycle.
self._agent_completed_event.clear()
debug_log.log("EVENT", "Woke up immediately - agent completed")
except asyncio.TimeoutError:
except TimeoutError:
# Timeout reached without agent completion - this is normal, just check anyway
pass

Expand Down Expand Up @@ -923,7 +924,7 @@ async def run_loop(self):
self._event_loop = asyncio.get_running_loop()

# Track session start for regression testing (UTC for consistency with last_tested_at)
self.session_start_time = datetime.now(timezone.utc)
self.session_start_time = datetime.now(UTC)

# Start debug logging session FIRST (clears previous logs)
# Must happen before any debug_log.log() calls
Expand Down
99 changes: 51 additions & 48 deletions progress.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,14 @@
"""

import json
import logging
import os
import sqlite3
import urllib.request
from datetime import UTC, datetime
from pathlib import Path

logger = logging.getLogger(__name__)
WEBHOOK_URL = os.environ.get("PROGRESS_N8N_WEBHOOK_URL")
PROGRESS_CACHE_FILE = ".progress_cache"

Expand Down Expand Up @@ -44,14 +46,14 @@ def has_features(project_dir: Path) -> bool:
return False

try:
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM features")
count = cursor.fetchone()[0]
conn.close()
return count > 0
except Exception:
with sqlite3.connect(db_file) as conn:
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM features")
count = cursor.fetchone()[0]
return count > 0
except Exception as e:
# Database exists but can't be read or has no features table
logger.debug("Failed to check features in %s: %s", db_file, e)
return False


Expand All @@ -70,37 +72,37 @@ def count_passing_tests(project_dir: Path) -> tuple[int, int, int]:
return 0, 0, 0

try:
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
# Single aggregate query instead of 3 separate COUNT queries
# Handle case where in_progress column doesn't exist yet (legacy DBs)
try:
cursor.execute("""
SELECT
COUNT(*) as total,
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing,
SUM(CASE WHEN in_progress = 1 THEN 1 ELSE 0 END) as in_progress
FROM features
""")
row = cursor.fetchone()
total = row[0] or 0
passing = row[1] or 0
in_progress = row[2] or 0
except sqlite3.OperationalError:
# Fallback for databases without in_progress column
cursor.execute("""
SELECT
COUNT(*) as total,
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing
FROM features
""")
row = cursor.fetchone()
total = row[0] or 0
passing = row[1] or 0
in_progress = 0
conn.close()
return passing, in_progress, total
except Exception:
with sqlite3.connect(db_file) as conn:
cursor = conn.cursor()
# Single aggregate query instead of 3 separate COUNT queries
# Handle case where in_progress column doesn't exist yet (legacy DBs)
try:
cursor.execute("""
SELECT
COUNT(*) as total,
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing,
SUM(CASE WHEN in_progress = 1 THEN 1 ELSE 0 END) as in_progress
FROM features
""")
row = cursor.fetchone()
total = row[0] or 0
passing = row[1] or 0
in_progress = row[2] or 0
except sqlite3.OperationalError:
# Fallback for databases without in_progress column
cursor.execute("""
SELECT
COUNT(*) as total,
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing
FROM features
""")
row = cursor.fetchone()
total = row[0] or 0
passing = row[1] or 0
in_progress = 0
return passing, in_progress, total
except Exception as e:
logger.error("Failed to count tests in %s: %s", db_file, e)
return 0, 0, 0


Expand All @@ -119,13 +121,13 @@ def get_all_passing_features(project_dir: Path) -> list[dict]:
return []

try:
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
cursor.execute("SELECT id, category, name FROM features WHERE passes = 1 ORDER BY priority ASC")
features = [{"id": row[0], "category": row[1], "name": row[2]} for row in cursor.fetchall()]
conn.close()
return features
except Exception:
with sqlite3.connect(db_file) as conn:
cursor = conn.cursor()
cursor.execute("SELECT id, category, name FROM features WHERE passes = 1 ORDER BY priority ASC")
features = [{"id": row[0], "category": row[1], "name": row[2]} for row in cursor.fetchall()]
return features
except Exception as e:
logger.error("Failed to get passing features from %s: %s", db_file, e)
return []


Expand All @@ -144,7 +146,8 @@ def send_progress_webhook(passing: int, total: int, project_dir: Path) -> None:
cache_data = json.loads(cache_file.read_text())
previous = cache_data.get("count", 0)
previous_passing_ids = set(cache_data.get("passing_ids", []))
except Exception:
except Exception as e:
logger.debug("Failed to read progress cache: %s", e)
previous = 0

# Only notify if progress increased
Expand Down Expand Up @@ -191,8 +194,8 @@ def send_progress_webhook(passing: int, total: int, project_dir: Path) -> None:
headers={"Content-Type": "application/json"},
)
urllib.request.urlopen(req, timeout=5)
except Exception:
pass
except Exception as e:
logger.warning("Failed to send progress webhook: %s", e)

# Update cache with count and passing IDs
cache_file.write_text(json.dumps({"count": passing, "passing_ids": current_passing_ids}))
Expand Down
28 changes: 17 additions & 11 deletions prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,12 @@
2. Base template: .claude/templates/{name}.template.md
"""

import logging
import shutil
from pathlib import Path

logger = logging.getLogger(__name__)

# Base templates location (generic templates)
TEMPLATES_DIR = Path(__file__).parent / ".claude" / "templates"

Expand Down Expand Up @@ -46,16 +49,16 @@ def load_prompt(name: str, project_dir: Path | None = None) -> str:
if project_path.exists():
try:
return project_path.read_text(encoding="utf-8")
except (OSError, PermissionError):
pass
except (OSError, PermissionError) as e:
logger.warning("Failed to read project-specific prompt %s: %s", project_path, e)

# 2. Try base template
template_path = TEMPLATES_DIR / f"{name}.template.md"
if template_path.exists():
try:
return template_path.read_text(encoding="utf-8")
except (OSError, PermissionError):
pass
except (OSError, PermissionError) as e:
logger.error("Failed to read base template %s: %s", template_path, e)

raise FileNotFoundError(
f"Prompt '{name}' not found in:\n"
Expand Down Expand Up @@ -212,8 +215,8 @@ def scaffold_project_prompts(project_dir: Path) -> Path:
try:
shutil.copy(template_path, dest_path)
copied_files.append(dest_name)
except (OSError, PermissionError):
pass
except (OSError, PermissionError) as e:
logger.warning("Failed to copy template %s to %s: %s", template_name, dest_path, e)

# Copy allowed_commands.yaml template to .autocoder/
examples_dir = Path(__file__).parent / "examples"
Expand All @@ -223,8 +226,8 @@ def scaffold_project_prompts(project_dir: Path) -> Path:
try:
shutil.copy(allowed_commands_template, allowed_commands_dest)
copied_files.append(".autocoder/allowed_commands.yaml")
except (OSError, PermissionError):
pass
except (OSError, PermissionError) as e:
logger.warning("Failed to copy allowed_commands.yaml template: %s", e)

if copied_files:
pass
Expand Down Expand Up @@ -257,15 +260,17 @@ def has_project_prompts(project_dir: Path) -> bool:
try:
content = legacy_spec.read_text(encoding="utf-8")
return "<project_specification>" in content
except (OSError, PermissionError):
except (OSError, PermissionError) as e:
logger.debug("Failed to read legacy spec %s: %s", legacy_spec, e)
return False
return False

# Check for valid spec content
try:
content = app_spec.read_text(encoding="utf-8")
return "<project_specification>" in content
except (OSError, PermissionError):
except (OSError, PermissionError) as e:
logger.debug("Failed to read app spec %s: %s", app_spec, e)
return False


Expand Down Expand Up @@ -294,5 +299,6 @@ def copy_spec_to_project(project_dir: Path) -> None:
try:
shutil.copy(project_spec, spec_dest)
return
except (OSError, PermissionError):
except (OSError, PermissionError) as e:
logger.warning("Failed to copy spec to project root: %s", e)
return
7 changes: 7 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,13 @@ ignore = [
"BLE001", # Do not catch blind exception (common in async code)
]

[tool.ruff.lint.per-file-ignores]
# CLI tools and test files can use print() for user output
"start*.py" = ["T201"]
"test_*.py" = ["T201"]
"client.py" = ["T201"] # User-facing configuration messages
"parallel_orchestrator.py" = ["T201"] # User-facing orchestrator output

[tool.ruff.lint.isort]
known-first-party = ["server", "api", "mcp_server"]
section-order = ["future", "standard-library", "third-party", "first-party", "local-folder"]
Expand Down
8 changes: 4 additions & 4 deletions registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import threading
import time
from contextlib import contextmanager
from datetime import datetime
from datetime import UTC, datetime
from pathlib import Path
from typing import Any

Expand Down Expand Up @@ -258,7 +258,7 @@ def register_project(name: str, path: Path) -> None:
project = Project(
name=name,
path=path.as_posix(),
created_at=datetime.now()
created_at=datetime.now(UTC)
)
session.add(project)

Expand Down Expand Up @@ -552,12 +552,12 @@ def set_setting(key: str, value: str) -> None:
setting = session.query(Settings).filter(Settings.key == key).first()
if setting:
setting.value = value
setting.updated_at = datetime.now()
setting.updated_at = datetime.now(UTC)
else:
setting = Settings(
key=key,
value=value,
updated_at=datetime.now()
updated_at=datetime.now(UTC)
)
session.add(setting)

Expand Down
Loading