-
Notifications
You must be signed in to change notification settings - Fork 375
fix: add MCP server config to ExpandChatSession for feature creation #88
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
358e7f2
c38b6d7
e04ffce
5afd57f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -58,6 +58,16 @@ class Feature(Base): | |
| # NULL/empty = no dependencies (backwards compatible) | ||
| dependencies = Column(JSON, nullable=True, default=None) | ||
|
|
||
| # DEPRECATED: Kept for backward compatibility with existing databases. | ||
| # These columns are no longer used but prevent NOT NULL violations on insert. | ||
| # Removed in commit 486979c but old databases still have them. | ||
| testing_in_progress = Column(Boolean, nullable=False, default=False) | ||
| last_tested_at = Column(DateTime, nullable=True, default=None) | ||
|
|
||
| # Track how many times this feature has been regression tested. | ||
| # Used for least-tested-first selection to ensure even test distribution. | ||
| regression_count = Column(Integer, nullable=False, default=0, index=True) | ||
|
|
||
| def to_dict(self) -> dict: | ||
| """Convert feature to dictionary for JSON serialization.""" | ||
| return { | ||
|
|
@@ -72,6 +82,8 @@ def to_dict(self) -> dict: | |
| "in_progress": self.in_progress if self.in_progress is not None else False, | ||
| # Dependencies: NULL/empty treated as empty list for backwards compat | ||
| "dependencies": self.dependencies if self.dependencies else [], | ||
| # Regression test tracking | ||
| "regression_count": self.regression_count or 0, | ||
| } | ||
|
|
||
| def get_dependencies_safe(self) -> list[int]: | ||
|
|
@@ -247,6 +259,22 @@ def _migrate_add_testing_columns(engine) -> None: | |
| pass | ||
|
|
||
|
|
||
| def _migrate_add_regression_count_column(engine) -> None: | ||
| """Add regression_count column to existing databases that don't have it. | ||
|
|
||
| This column tracks how many times each feature has been regression tested, | ||
| enabling least-tested-first selection for even test distribution. | ||
| """ | ||
| with engine.connect() as conn: | ||
| # Check if column exists | ||
| result = conn.execute(text("PRAGMA table_info(features)")) | ||
| columns = [row[1] for row in result.fetchall()] | ||
|
|
||
| if "regression_count" not in columns: | ||
| conn.execute(text("ALTER TABLE features ADD COLUMN regression_count INTEGER DEFAULT 0")) | ||
| conn.commit() | ||
|
Comment on lines
+262
to
+275
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Align the migration with the model’s NOT NULL constraint. Line 274 adds the column without 🔧 Suggested migration tweak- conn.execute(text("ALTER TABLE features ADD COLUMN regression_count INTEGER DEFAULT 0"))
+ conn.execute(text("ALTER TABLE features ADD COLUMN regression_count INTEGER NOT NULL DEFAULT 0"))🤖 Prompt for AI Agents |
||
|
|
||
|
|
||
| def _is_network_path(path: Path) -> bool: | ||
| """Detect if path is on a network filesystem. | ||
|
|
||
|
|
@@ -372,6 +400,7 @@ def create_database(project_dir: Path) -> tuple: | |
| _migrate_fix_null_boolean_fields(engine) | ||
| _migrate_add_dependencies_column(engine) | ||
| _migrate_add_testing_columns(engine) | ||
| _migrate_add_regression_count_column(engine) | ||
|
|
||
| # Migrate to add schedules tables | ||
| _migrate_add_schedules_tables(engine) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -10,6 +10,7 @@ | |
| import json | ||
| import logging | ||
| import os | ||
| import re | ||
| import shutil | ||
| import sys | ||
| import threading | ||
|
|
@@ -68,8 +69,9 @@ class ExpandChatSession: | |
|
|
||
| Unlike SpecChatSession which writes spec files, this session: | ||
| 1. Reads existing app_spec.txt for context | ||
| 2. Chats with the user to define new features | ||
| 3. Claude creates features via the feature_create_bulk MCP tool | ||
| 2. Parses feature definitions from Claude's output | ||
| 3. Creates features via REST API | ||
| 4. Tracks which features were created during the session | ||
| """ | ||
|
|
||
| def __init__(self, project_name: str, project_dir: Path): | ||
|
|
@@ -291,8 +293,7 @@ async def _query_claude( | |
| """ | ||
| Internal method to query Claude and stream responses. | ||
|
|
||
| Feature creation is handled by Claude calling the feature_create_bulk | ||
| MCP tool directly -- no text parsing needed. | ||
| Handles text responses and detects feature creation blocks. | ||
| """ | ||
| if not self.client: | ||
| return | ||
|
|
@@ -316,6 +317,9 @@ async def _query_claude( | |
| else: | ||
| await self.client.query(message) | ||
|
|
||
| # Accumulate full response to detect feature blocks | ||
| full_response = "" | ||
|
|
||
| # Stream the response | ||
| async for msg in self.client.receive_response(): | ||
| msg_type = type(msg).__name__ | ||
|
|
@@ -327,6 +331,7 @@ async def _query_claude( | |
| if block_type == "TextBlock" and hasattr(block, "text"): | ||
| text = block.text | ||
| if text: | ||
| full_response += text | ||
| yield {"type": "text", "content": text} | ||
|
|
||
| self.messages.append({ | ||
|
|
@@ -335,6 +340,123 @@ async def _query_claude( | |
| "timestamp": datetime.now().isoformat() | ||
| }) | ||
|
|
||
| # Check for feature creation blocks in full response (handle multiple blocks) | ||
| features_matches = re.findall( | ||
| r'<features_to_create>\s*(\[[\s\S]*?\])\s*</features_to_create>', | ||
| full_response | ||
| ) | ||
|
|
||
| if features_matches: | ||
| # Collect all features from all blocks, deduplicating by name | ||
| all_features: list[dict] = [] | ||
| seen_names: set[str] = set() | ||
|
|
||
| for features_json in features_matches: | ||
| try: | ||
| features_data = json.loads(features_json) | ||
|
|
||
| if features_data and isinstance(features_data, list): | ||
| for feature in features_data: | ||
| name = feature.get("name", "") | ||
| if name and name not in seen_names: | ||
| seen_names.add(name) | ||
| all_features.append(feature) | ||
| except json.JSONDecodeError as e: | ||
| logger.error(f"Failed to parse features JSON block: {e}") | ||
| # Continue processing other blocks | ||
|
|
||
| if all_features: | ||
| try: | ||
| # Create all deduplicated features | ||
| created = await self._create_features_bulk(all_features) | ||
|
|
||
| if created: | ||
| self.features_created += len(created) | ||
| self.created_feature_ids.extend([f["id"] for f in created]) | ||
|
|
||
| yield { | ||
| "type": "features_created", | ||
| "count": len(created), | ||
| "features": created | ||
| } | ||
|
|
||
| logger.info(f"Created {len(created)} features for {self.project_name}") | ||
| except Exception: | ||
| logger.exception("Failed to create features") | ||
| yield { | ||
| "type": "error", | ||
| "content": "Failed to create features" | ||
| } | ||
|
|
||
| async def _create_features_bulk(self, features: list[dict]) -> list[dict]: | ||
| """ | ||
| Create features directly in the database. | ||
|
|
||
| Args: | ||
| features: List of feature dictionaries with category, name, description, steps | ||
|
|
||
| Returns: | ||
| List of created feature dictionaries with IDs | ||
|
|
||
| Note: | ||
| Uses flush() to get IDs immediately without re-querying by priority range, | ||
| which could pick up rows from concurrent writers. | ||
| """ | ||
| # Import database classes | ||
| import sys | ||
| root = Path(__file__).parent.parent.parent | ||
| if str(root) not in sys.path: | ||
| sys.path.insert(0, str(root)) | ||
|
|
||
| from api.database import Feature, create_database | ||
|
|
||
| # Get database session | ||
| _, SessionLocal = create_database(self.project_dir) | ||
| session = SessionLocal() | ||
|
|
||
| try: | ||
| # Determine starting priority | ||
| max_priority_feature = session.query(Feature).order_by(Feature.priority.desc()).first() | ||
| current_priority = (max_priority_feature.priority + 1) if max_priority_feature else 1 | ||
|
|
||
| created_rows: list = [] | ||
|
|
||
| for f in features: | ||
| db_feature = Feature( | ||
| priority=current_priority, | ||
| category=f.get("category", "functional"), | ||
| name=f.get("name", "Unnamed feature"), | ||
| description=f.get("description", ""), | ||
| steps=f.get("steps", []), | ||
| passes=False, | ||
| in_progress=False, | ||
| ) | ||
| session.add(db_feature) | ||
| created_rows.append(db_feature) | ||
| current_priority += 1 | ||
|
|
||
| # Flush to get IDs without relying on priority range query | ||
| session.flush() | ||
|
|
||
| # Build result from the flushed objects (IDs are now populated) | ||
| created_features = [ | ||
| { | ||
| "id": db_feature.id, | ||
| "name": db_feature.name, | ||
| "category": db_feature.category, | ||
| } | ||
| for db_feature in created_rows | ||
| ] | ||
|
|
||
| session.commit() | ||
| return created_features | ||
|
|
||
| except Exception: | ||
| session.rollback() | ||
| raise | ||
| finally: | ||
| session.close() | ||
|
Comment on lines
+391
to
+458
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: # First, let's understand the file structure and examine the method in context
cd /repo
find . -name "expand_chat_session.py" -type f | head -20Repository: leonvanzyl/autocoder Length of output: 177 🏁 Script executed: # Look at the file around the specified lines to understand the context better
head -450 server/services/expand_chat_session.py | tail -100Repository: leonvanzyl/autocoder Length of output: 3922 🏁 Script executed: # Check the full method signature and what calls it
rg "_create_features_bulk" --context 5Repository: leonvanzyl/autocoder Length of output: 1744 🏁 Script executed: # Examine the database setup to understand if async is available
find . -name "database.py" -o -name "db.py" | head -10Repository: leonvanzyl/autocoder Length of output: 81 🏁 Script executed: # Check what create_database does
rg "def create_database" --context 10Repository: leonvanzyl/autocoder Length of output: 1047 🏁 Script executed: # Examine the database.py file to understand engine setup
cat -n api/database.py | head -150Repository: leonvanzyl/autocoder Length of output: 6849 🏁 Script executed: # Search for SQLAlchemy imports to see if async is used anywhere
rg "from sqlalchemy" --type py | head -30Repository: leonvanzyl/autocoder Length of output: 1079 🏁 Script executed: # Check if asyncio.to_thread is used anywhere in the project
rg "asyncio\.to_thread|to_thread" --type pyRepository: leonvanzyl/autocoder Length of output: 46 🏁 Script executed: # Look for other async methods that do DB operations
rg "async def.*:" server/services/expand_chat_session.py | head -20Repository: leonvanzyl/autocoder Length of output: 529 🏁 Script executed: # Check if there are other similar async methods doing sync DB operations
rg "async def.*\(" server/services/expand_chat_session.py -A 20 | grep -E "session\.|create_database" | head -20Repository: leonvanzyl/autocoder Length of output: 364 🏁 Script executed: # See the full create_database implementation to understand the engine
sed -n '200,300p' api/database.pyRepository: leonvanzyl/autocoder Length of output: 3908 Move synchronous database operations out of async context to prevent event loop blocking. This async method performs blocking I/O (query, flush, commit) with SQLAlchemy's synchronous ORM. Under concurrent load, these operations will stall the event loop. Use 🛠️ Example: offload sync DB work to a thread- async def _create_features_bulk(self, features: list[dict]) -> list[dict]:
+ async def _create_features_bulk(self, features: list[dict]) -> list[dict]:
+ return await asyncio.to_thread(self._create_features_bulk_sync, features)
+
+ def _create_features_bulk_sync(self, features: list[dict]) -> list[dict]:
"""
Create features directly in the database.
"""
# Import database classes
import sys
root = Path(__file__).parent.parent.parent🤖 Prompt for AI Agents |
||
|
|
||
| def get_features_created(self) -> int: | ||
| """Get the total number of features created in this session.""" | ||
| return self.features_created | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Migration gap: databases created after column removal won't have these columns.
The columns are added to the model, but
_migrate_add_testing_columns()(lines 241-253) is a no-op. This creates a problem for databases that were created after the columns were removed in commit 486979c but before this PR—they won't have these columns, and INSERT operations will fail.Consider updating
_migrate_add_testing_columnsto actually add the columns if missing:Proposed fix for _migrate_add_testing_columns
🤖 Prompt for AI Agents