diff --git a/.github/workflows/deploy_preview.yml b/.github/workflows/__deploy_preview.yml__ similarity index 100% rename from .github/workflows/deploy_preview.yml rename to .github/workflows/__deploy_preview.yml__ diff --git a/.github/workflows/deploy_prod.yml b/.github/workflows/__deploy_prod.yml__ similarity index 100% rename from .github/workflows/deploy_prod.yml rename to .github/workflows/__deploy_prod.yml__ diff --git a/.github/workflows/_deploy_prod.yml_ b/.github/workflows/_deploy_prod.yml_ deleted file mode 100644 index f9a2a9c..0000000 --- a/.github/workflows/_deploy_prod.yml_ +++ /dev/null @@ -1,27 +0,0 @@ -name: Release to CloudFlare Prod - -on: - push: - branches: - - prod - -permissions: - contents: write - issues: write - pull-requests: write - -jobs: - - deploy_prod: - runs-on: ubuntu-latest - permissions: - contents: read - deployments: write - name: Deploy to DeployStack.io - environment: - name: 'Production' - url: https://deploystack.io/docs - steps: - - name: Checkout - uses: actions/checkout@v4 - - run: echo "Executing webhook to deploy" \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7d110cd..65cefce 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,10 +34,7 @@ jobs: run: npm run lint:md - name: Run Lint Links - run: npm run lint:links - - - name: Run Lint Links - run: npm run build + run: npm run lint:links release: name: Run Release diff --git a/README.md b/README.md index 268b310..8a13f21 100644 --- a/README.md +++ b/README.md @@ -1,128 +1,264 @@ # DeployStack Documentation -This repository contains the official documentation site for [DeployStack](https://deploystack.io/docs/), The Complete MCP Management Platform, built with [fumadocs](https://fumadocs.vercel.app/). Visit [deploystack.io](https://deploystack.io) to learn more about our platform. +This repository contains the official documentation for [DeployStack](https://docs.deploystack.io/) - The First MCP-as-a-Service Platform. Built with [Mintlify](https://mintlify.com), our documentation provides a modern, searchable experience for developers building with DeployStack's satellite infrastructure. + +Visit [docs.deploystack.io](https://docs.deploystack.io) to explore the live documentation. ## Technology Stack -- **Framework**: Next.js 15 with App Router -- **Documentation**: Fumadocs for modern docs experience +- **Documentation Platform**: Mintlify - **Content**: MDX (Markdown + React components) -- **Styling**: Tailwind CSS -- **Language**: TypeScript +- **Deployment**: Automatic deployment via Mintlify platform ## Project Structure ```text . -├── docs/ # Documentation content (MDX files) -│ ├── development/ # Development documentation -│ │ ├── backend/ # Backend development guides -│ │ ├── frontend/ # Frontend development guides -│ │ └── gateway/ # Gateway architecture & implementation -│ ├── self-hosted/ # Self-hosting guides -│ ├── deploystack/ # Core DeployStack documentation -│ ├── assets/ # Images and static assets -│ └── ... # MCP guides and configuration docs -├── app/ # Next.js app directory (fumadocs framework) -├── lib/ # Documentation utilities & components -└── source.config.ts # Fumadocs configuration +├── general/ # Getting started and core concepts +│ ├── architecture.mdx # System architecture overview +│ ├── teams.mdx # Team management +│ ├── roles.mdx # Role-based access control +│ └── mcp-*.mdx # MCP server guides +├── self-hosted/ # Self-hosting guides +│ ├── quick-start.mdx # Quick start guide +│ ├── setup.mdx # Installation instructions +│ └── docker-compose.mdx # Docker deployment +├── development/ # Development documentation +│ ├── frontend/ # Frontend development guides +│ │ ├── index.mdx # Frontend overview +│ │ ├── ui/ # UI system documentation +│ │ └── ... +│ ├── backend/ # Backend development guides +│ │ ├── index.mdx # Backend overview +│ │ ├── api/ # API documentation +│ │ ├── database/ # Database guides +│ │ └── ... +│ └── satellite/ # Satellite development guides +│ ├── index.mdx # Satellite overview +│ ├── architecture.mdx +│ └── ... +├── assets/ # Images and static assets +│ └── images/ +│ ├── logo/ # Logo files +│ └── ... +├── docs.json # Mintlify configuration +├── index.mdx # Documentation home page +└── README.md # This file ``` -**Note**: The `app/` directory contains the fumadocs framework setup and should not be modified for content changes. All documentation content goes in the `docs/` directory. +## Local Development -## Development Setup +Mintlify provides a local development CLI for previewing documentation changes: ```bash -# Install dependencies -npm ci +# Install Mintlify CLI globally (one-time setup) +npm install -g mintlify + +# Start local development server +mintlify dev + +# The documentation will be available at http://localhost:3000 +``` + +The local server provides: +- Hot reloading for instant content updates +- Navigation preview +- Component rendering +- Dark/light mode testing + +## Writing Documentation + +### Content Format + +All documentation is written in **MDX** (Markdown with JSX components): -# Start documentation development server (http://localhost:3000) -npm run dev +```mdx +--- +title: Your Page Title +description: A brief description of the page content +--- -# Build documentation site for production -npm run build +# Your Page Title -# Start production server -npm run start +Write your content here using standard Markdown syntax. -# Validate documentation quality -npm run lint:md # Markdown linting -npm run lint:links # Link validation + +Use Mintlify components for callouts and special content blocks. + + +## Section Header + +More content here... ``` -## Contributing Guidelines +### Mintlify Components -### Writing Documentation +Mintlify provides built-in components for enhanced documentation: -1. **Content Format**: Write all documentation in MDX format (`.mdx` files) -2. **Location**: Store all content in the `docs/` directory -3. **Navigation**: Use `meta.json` files in each directory to control navigation structure -4. **Assets**: Place images in `docs/assets/images/` with appropriate subdirectories -5. **Links**: Use absolute paths for all references: - - Documentation: `/docs/development/gateway/` - - Images: `/docs/assets/images/example.png` -6. **Brand Colors**: Use the primary color (`text-primary`, `bg-primary`) for consistency - avoid introducing other accent colors +**Callouts:** +```mdx +General information +Important information +Helpful tips +Important warnings +Success status +Critical warnings +``` -### Navigation Structure +**Code Groups:** +```mdx + +```bash macOS/Linux +npm install +``` -Fumadocs automatically generates navigation from your file structure and `meta.json` files: +```powershell Windows +npm install +``` + +``` -- Each directory can have a `meta.json` file to configure its appearance in navigation -- File-based routing: `docs/deploystack/index.mdx` becomes `/docs/deploystack` -- Nested directories create hierarchical navigation +**Cards:** +```mdx + + + Begin your DeployStack journey + + + Explore the API documentation + + +``` -### Adding New Content +**Steps:** +```mdx + + + Run `npm install` to install required packages + + + Set up your environment variables + + + Run `npm run dev` to start the development server + + +``` -1. Create new `.mdx` files in the appropriate `docs/` subdirectory -2. Add or update `meta.json` files to control navigation -3. Follow established naming conventions -4. Ensure all links use absolute paths -5. Test locally with `npm run dev` +### Navigation Configuration -### Asset Management +Navigation is controlled via `docs.json`: -For diagrams and images: +- **Tabs**: Top-level navigation sections (General, Self Hosted, Frontend Development, etc.) +- **Groups**: Subsections within each tab +- **Pages**: Individual documentation pages -1. Use [drow.io](https://app.diagrams.net/) for creating diagrams -2. Export as PNG or WebP format -3. Optimize images for web (compress file sizes) -4. Place files in appropriate subdirectories under `docs/assets/images/` +To add a new page: +1. Create the `.mdx` file in the appropriate directory +2. Add the page path to `docs.json` under the relevant group +3. Test locally with `mintlify dev` -## Deployment Process +### Content Guidelines -Our deployment uses a two-branch workflow: +**File Naming:** +- Use kebab-case: `my-new-page.mdx` +- Index files represent the directory: `index.mdx` -- **`main`**: Development branch for content updates and testing -- **`prod`**: Production branch that deploys to [deploystack.io/docs](https://deploystack.io/docs) +**Links:** +- Use absolute paths from documentation root: `/development/backend/api/index` +- Mintlify automatically handles `.mdx` extensions + +**Images:** +- Store in `assets/images/` with logical subdirectories +- Reference with absolute paths: `/assets/images/logo/dark.webp` +- Optimize images before committing (compress file sizes) + +**Frontmatter:** +```yaml +--- +title: Page Title (required) +description: Page description for SEO (required) +--- +``` + +## Asset Management + +### Images and Diagrams + +1. **Diagrams**: Create with [draw.io](https://app.diagrams.net/) +2. **Export Format**: PNG or WebP +3. **Optimization**: Compress images before committing +4. **Location**: Store in `assets/images/` with appropriate subdirectories + +## Contributing Guidelines ### Workflow -1. Create feature branches from `main` -2. Submit pull requests to `main` -3. After approval and merge to `main`, changes are automatically validated -4. Merge to `prod` to deploy to production +1. **Fork or Branch**: Create a feature branch from `main` +2. **Write Content**: Add or update documentation in MDX format +3. **Test Locally**: Run `mintlify dev` to preview changes +4. **Submit PR**: Create a pull request to `main` branch +5. **Review**: Wait for review and address feedback +6. **Merge**: Changes are automatically deployed after merge -### Continuous Integration +### Pull Request Guidelines -The CI pipeline includes: +- Write clear commit messages +- Test all links and navigation +- Verify code examples are correct +- Check for spelling and grammar +- Ensure images load correctly +- Preview on mobile and desktop layouts -- Markdown linting and validation -- Link checking to prevent broken links -- Automatic fumadocs build verification -- Production deployment triggers +### Documentation Standards -## Local Development +**Writing Style:** +- Write in clear, concise language +- Use active voice +- Address the reader directly ("you") +- Avoid jargon without explanation +- Include code examples where helpful + +**Code Examples:** +- Include complete, working examples +- Add comments for clarity +- Show expected output when relevant +- Test all code before committing + +**Structure:** +- Start with overview/introduction +- Progress from basic to advanced +- Use descriptive section headers +- Include related documentation links at the end + +## Deployment + +### Automatic Deployment + +Mintlify automatically deploys documentation when changes are merged to `main`: + +- **Trigger**: Push or merge to `main` branch +- **Build**: Mintlify builds the documentation +- **Deploy**: Changes go live at docs.deploystack.io +- **CDN**: Content served via Mintlify's global CDN + +### Branch Strategy + +- **`main`**: Production branch that deploys to docs.deploystack.io +- **Feature Branches**: Create from `main` for new content or updates +- **Pull Requests**: All changes must go through PR review -When running `npm run dev`, the documentation site will be available at `http://localhost:3000`. The fumadocs framework provides: +## Need Help? -- Hot reloading for content changes -- Automatic navigation generation -- Built-in search functionality -- Responsive design -- Dark/light mode support +- 📚 **Documentation**: [docs.deploystack.io](https://docs.deploystack.io) +- 💬 **Discord**: [Join our community](https://discord.gg/42Ce3S7b3b) +- 🐛 **Issues**: [GitHub Issues](https://github.com/deploystackio/documentation/issues) +- 🌐 **Website**: [deploystack.io](https://deploystack.io) +- 🚀 **Dashboard**: [cloud.deploystack.io](https://cloud.deploystack.io) -## 💬 Need Help? +## Links -- 📚 Check our [Documentation](https://deploystack.io/docs) -- 🎯 Report issues on [GitHub](https://github.com/deploystackio/documentation/issues) -- 📧 Join our Discord at [https://discord.gg/UjFWwByB](https://discord.gg/UjFWwByB) +- [Mintlify Documentation](https://mintlify.com/docs) +- [MDX Documentation](https://mdxjs.com/) +- [DeployStack Main Repository](https://github.com/deploystackio/deploystack) +- [DeployStack Changelog](https://deploystack.io/changelog) diff --git a/_DEPRECATED/gateway/api.mdx b/_DEPRECATED/gateway/api.mdx deleted file mode 100644 index 719b609..0000000 --- a/_DEPRECATED/gateway/api.mdx +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: Gateway API Communication -description: Backend communication patterns and URL management for CLI commands -sidebar: API -icon: Globe ---- - -# Gateway API Communication - -The DeployStack Gateway CLI manages backend communication automatically through stored configuration and credential management. This guide covers how CLI commands interact with the backend and manage different environments. - -## Backend URL Management - -### Automatic URL Storage - -When users authenticate with the gateway, the backend URL is automatically stored alongside their credentials. This eliminates the need to specify the backend URL for every command after initial login. - -**Storage Location:** -- **Primary**: macOS Keychain, Windows Credential Manager, or Linux Secret Service -- **Fallback**: Encrypted file at `~/.deploystack/credentials.enc` - -The backend URL is stored as part of the `StoredCredentials` object and persists across CLI sessions. - -### URL Resolution Priority - -CLI commands resolve the backend URL using this priority order: - -1. **Command-line override** - `--url` flag when provided -2. **Stored URL** - URL saved during authentication -3. **Default fallback** - `https://cloud.deploystack.io` - -This approach supports both development workflows with local backends and production usage seamlessly. - -### Environment Detection - -The gateway automatically adapts behavior based on the backend URL: - -**Production Mode** (`https://cloud.deploystack.io`): -- Strict HTTPS enforcement -- Full SSL certificate validation -- Standard error messages - -**Development Mode** (localhost or custom URLs): -- HTTP connections allowed for localhost -- Development-specific error messages -- Additional debugging context - -## Command Implementation Patterns - -### Authentication Check - -All API-dependent commands should verify authentication before making requests. The credential storage handles token validation and expiration checking automatically. - -### Backend URL Usage - -Commands should retrieve stored credentials and use the embedded backend URL rather than requiring URL parameters. The URL resolution pattern ensures consistency across all commands. - -### Error Handling - -Different backend environments may return different error formats. Commands should handle both production and development error responses gracefully. - -## API Client Configuration - -### Credential Integration - -The API client accepts stored credentials and automatically extracts the appropriate backend URL. No additional URL configuration is required when credentials contain the backend information. - -### Request Headers - -All authenticated requests include: -- Bearer token authentication -- User-Agent identification -- Content-Type specification - -### Timeout Handling - -Network operations include appropriate timeouts with different values for various operation types: -- OAuth callback operations -- API requests -- Token refresh operations - -## Development Workflow - -### Local Backend Testing - -Developers working with local backends can authenticate once and have all commands automatically use the development server: - -The authentication flow stores the development URL, and subsequent commands use it automatically without additional configuration. - -### URL Override Capability - -Commands maintain `--url` override options for testing different backends or switching environments temporarily without re-authentication. - -### Environment Switching - -To switch between environments, users can either: -- Re-authenticate with a different backend URL -- Use command-line URL overrides for temporary testing - -## Security Considerations - -### URL Validation - -Backend URLs are validated during authentication to ensure they meet security requirements for the target environment. - -### Credential Isolation - -Each backend URL maintains separate credential storage, preventing credential leakage between development and production environments. - -### HTTPS Enforcement - -Production environments enforce HTTPS communication, while development environments allow HTTP for localhost testing. - -## Error Response Handling - -### Network Errors - -Commands should provide helpful error messages that include the backend URL being used, especially for development environments where connectivity issues are common. - -### Authentication Errors - -Token expiration and invalid token errors should guide users to re-authenticate, preserving their backend URL preference. - -### Backend-Specific Errors - -Different backend versions or configurations may return varying error formats. Commands should handle these gracefully and provide consistent user experience. - -## Integration Guidelines - -### New Command Development - -When developing new CLI commands that interact with the backend: - -1. Use the credential storage system for authentication -2. Extract backend URL from stored credentials -3. Implement URL override options for flexibility -4. Handle environment-specific error cases -5. Provide clear error messages with backend context - -### API Client Usage - -The DeployStack API client handles most backend communication complexity automatically. Commands should focus on business logic rather than HTTP details. - -### Testing Considerations - -Test commands against both production and development backends to ensure consistent behavior across environments. The URL storage system supports this testing workflow naturally. diff --git a/_DEPRECATED/gateway/caching-system.mdx b/_DEPRECATED/gateway/caching-system.mdx deleted file mode 100644 index bb5da8e..0000000 --- a/_DEPRECATED/gateway/caching-system.mdx +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: Gateway Caching System -description: Team-aware tool caching architecture that enables fast gateway startup and automatic tool discovery across MCP servers -sidebar: Caching System -icon: Database ---- - -import { Card, Cards } from 'fumadocs-ui/components/card'; -import { Zap, Users, RefreshCw, Shield, Clock, HardDrive } from 'lucide-react'; - -# Gateway Caching System - -The DeployStack Gateway implements a sophisticated team-aware caching system that dramatically improves performance by pre-discovering and caching tools from MCP servers. This enables instant gateway startup and seamless tool availability for development teams. - -## Architecture Overview - -The caching system operates on a **cache-as-manifest philosophy** where tools are proactively discovered and stored locally, serving as both a performance optimization and a configuration manifest that defines what should be running versus what is actually running in the persistent background process model. - -## Core Concepts - - - } - title="Fast Gateway Startup" - > - Cached tools enable instant gateway startup without waiting for MCP server discovery - - - } - title="Team-Aware Isolation" - > - Each team's tools are cached separately with complete isolation and security boundaries - - - } - title="Automatic Discovery" - > - Tools are automatically discovered and cached when switching teams or refreshing configurations - - - } - title="Secure Storage" - > - Cache files are stored securely with team-specific access controls and encryption - - - } - title="Intelligent Invalidation" - > - Cache is automatically invalidated based on configuration changes and time-based policies - - - } - title="Fallback Mechanisms" - > - Graceful fallback to cached data when live discovery fails or servers are unavailable - - - -## Cache Architecture - -### Storage Structure -The caching system uses a hierarchical file-based storage approach: - -- **Base Directory**: `~/.deploystack/cache/` -- **Team Isolation**: `teams/{teamId}/` -- **Cache Files**: `tools-cache.json` per team - -This structure ensures complete isolation between teams while providing fast local access to cached tool information. - -### Cache Content -Each team's cache contains: - -- **Tool Definitions**: Complete tool schemas with input parameters and descriptions -- **Server Metadata**: Information about which MCP server provides each tool -- **Namespaced Names**: Tools are namespaced as `serverName-toolName` for conflict resolution -- **Discovery Timestamps**: When each tool was last discovered and validated -- **Configuration Hashes**: Checksums to detect when server configurations change - -## Tool Discovery Workflow - -### Automatic Discovery Triggers -Tool discovery is automatically triggered during: - -- **Team Switching**: When developers switch to a different team context -- **Configuration Refresh**: When MCP server configurations are updated from the cloud -- **Manual Refresh**: When developers explicitly request tool discovery -- **Cache Invalidation**: When cached data becomes stale or invalid - -### Discovery Process -The discovery workflow follows these steps: - -1. **Server Enumeration**: Identify all MCP servers configured for the team -2. **Process Communication**: Connect to already-running MCP server processes as described in [Gateway Process Management](/development/gateway/process-management) -3. **Tool Interrogation**: Query each running server for its available tools using MCP protocol -4. **Schema Extraction**: Extract complete tool schemas including parameters and descriptions -5. **Namespacing**: Apply server-specific namespacing to prevent tool name conflicts -6. **Cache Storage**: Store discovered tools in the team-specific cache file - -**Note**: In the persistent background process model, tool discovery communicates with already-running MCP servers rather than spawning processes specifically for discovery. - -### Centralized Management -All tool discovery operations are managed through a centralized `ToolDiscoveryManager` that: - -- **Eliminates Code Duplication**: Single source of truth for all discovery logic -- **Provides Consistent Behavior**: Uniform discovery behavior across all Gateway components -- **Handles Error Recovery**: Robust error handling with fallback mechanisms -- **Manages Progress Feedback**: Consistent user feedback during discovery operations - -## Cache Invalidation Strategy - -### Time-Based Invalidation -Cache entries are automatically invalidated based on: - -- **Maximum Age**: Default 24-hour time-to-live for cached tool information -- **Configuration Changes**: Immediate invalidation when server configurations change -- **Team Context Changes**: Cache clearing when switching between teams - -### Configuration-Based Invalidation -The system detects configuration changes through: - -- **Server Configuration Hashing**: Checksums of server spawn commands and environment variables -- **Team Membership Changes**: Detection of team member additions or removals -- **Permission Updates**: Changes to team-based access policies - -### Manual Invalidation -Developers and administrators can manually invalidate cache through: - -- **CLI Commands**: Explicit cache clearing and refresh commands -- **Team Switching**: Automatic cache refresh when switching team contexts -- **Configuration Updates**: Cache refresh when updating MCP server configurations - -## Performance Optimization - -### Cache-First Strategy -The Gateway prioritizes cached data for optimal performance: - -- **Instant Tool Exposure**: Cached tools are immediately available to MCP clients -- **Background Refresh**: Cache updates happen asynchronously without blocking operations -- **Predictive Loading**: Frequently-used tools are kept warm in cache -- **Lazy Discovery**: New servers are discovered on-demand when first accessed - -### Fallback Mechanisms -When live discovery fails, the system provides graceful degradation: - -- **Cached Tool Fallback**: Use previously cached tools when servers are unavailable -- **Partial Discovery**: Continue with available tools even if some servers fail -- **Error State Caching**: Cache error states to avoid repeated failed discovery attempts -- **Recovery Strategies**: Automatic retry with exponential backoff for failed discoveries - -## Team Isolation and Security - -### Access Control -Each team's cache is completely isolated through: - -- **Directory Separation**: Team-specific cache directories prevent cross-team access -- **File Permissions**: Operating system-level permissions restrict cache file access -- **Encryption**: Sensitive cache data is encrypted using team-specific keys -- **Audit Logging**: All cache operations are logged for security and compliance - -### Data Privacy -The caching system ensures data privacy by: - -- **Local Storage Only**: Cache files are stored locally and never transmitted -- **Credential Exclusion**: No sensitive credentials are stored in cache files -- **Metadata Only**: Only tool schemas and metadata are cached, not actual data -- **Automatic Cleanup**: Cache files are automatically cleaned up when teams are removed - -## Integration Points - -The caching system integrates seamlessly with other Gateway components: - -- **[MCP Configuration Management](/development/gateway/mcp)**: Uses team configurations to determine which servers to discover -- **[Gateway Process Management](/development/gateway/process-management)**: Coordinates with process spawning for tool discovery -- **[Gateway Project Structure](/development/gateway/structure)**: Implements the centralized architecture through the utils layer -- **HTTP Proxy Server**: Provides cached tool information for immediate client responses - -## Cache Management Operations - -### Developer Commands -The Gateway provides several commands for cache management: - -- **Status Checking**: View current cache status and tool counts -- **Manual Refresh**: Force refresh of cached tools from all servers -- **Cache Clearing**: Remove cached data for troubleshooting -- **Discovery Testing**: Validate tool discovery for specific servers - -### Administrative Operations -Enterprise administrators can manage caching through: - -- **Team-Wide Refresh**: Refresh cache for all team members -- **Policy Enforcement**: Apply caching policies across teams -- **Usage Analytics**: Monitor cache hit rates and discovery patterns -- **Troubleshooting**: Diagnose cache-related issues and performance problems - -## Monitoring and Observability - -### Cache Metrics -The system tracks comprehensive caching metrics: - -- **Cache Hit Rates**: Percentage of requests served from cache vs. live discovery -- **Discovery Success Rates**: Success/failure rates for tool discovery operations -- **Cache Size**: Storage usage and tool counts per team -- **Refresh Frequency**: How often cache is refreshed and invalidated - -### Performance Indicators -Key performance indicators include: - -- **Gateway Startup Time**: Time from start to tool availability -- **Tool Discovery Duration**: Time required to discover tools from each server -- **Cache Effectiveness**: Reduction in discovery time due to caching -- **Error Recovery Time**: Time to recover from failed discovery operations - -This caching system ensures that the DeployStack Gateway provides instant tool availability while maintaining the security, isolation, and performance requirements of enterprise development teams. diff --git a/_DEPRECATED/gateway/device-management.mdx b/_DEPRECATED/gateway/device-management.mdx deleted file mode 100644 index 6e75ec9..0000000 --- a/_DEPRECATED/gateway/device-management.mdx +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: Device Management Architecture -description: Technical implementation of device detection, caching, and management in the DeployStack Gateway CLI -sidebar: Device Management ---- - -# Gateway Device Management Architecture - -The DeployStack Gateway implements a sophisticated device management system that balances security, performance, and user experience. This document explains the technical architecture, design decisions, and implementation details from a developer perspective. - -## Architecture Overview - -The Gateway's device management system consists of three core components: - -**Device Detection System** -- Hardware fingerprinting for unique device identification -- System information collection for compatibility and analytics -- Lightweight signature generation for cache validation - -**Device Information Cache** -- High-performance caching to eliminate redundant device detection -- Secure storage using OS keychain with encrypted fallback -- Integrity validation and automatic cache invalidation - -**OAuth2 Integration** -- Device registration during authentication flow -- Device information included in token exchange -- No separate device management endpoints required - -## The Performance Problem We Solved - -### Original Challenge - -Before implementing device caching, every Gateway command suffered from a significant performance bottleneck: - -- **Device fingerprinting took 3+ seconds** on every command execution -- Commands like `deploystack refresh` and `deploystack mcp` felt sluggish -- Users experienced poor CLI responsiveness -- System resources were wasted on redundant hardware detection - -### Root Cause Analysis - -Device fingerprinting is inherently expensive because it requires: -- Network interface enumeration to collect MAC addresses -- System information queries across multiple OS APIs -- Cryptographic hashing of collected hardware data -- File system operations to gather system details - -This expensive operation was happening on **every single command** because device information is required for: -- Backend API authentication and device tracking -- Security validation and audit logging -- Configuration management and team analytics - -## Device Caching Architecture - -### Design Principles - -**Performance First** -- Cache-first architecture with graceful fallback -- 30x performance improvement (3s → 0.1s) -- Persistent cache across logout/login sessions - -**Security Without Compromise** -- Hardware signature validation for cache integrity -- Automatic invalidation on hardware changes -- Encrypted storage with integrity checksums - -**Developer Experience** -- Completely transparent to end users -- No manual cache management required -- Automatic background operation - -### Cache Storage Strategy - -We implemented a dual-storage approach for maximum reliability: - -**Primary: OS Keychain Storage** -- macOS: Keychain Services -- Windows: Credential Manager -- Linux: Secret Service API -- Benefits: Native OS security, encrypted at rest, user-scoped access - -**Fallback: Encrypted File Storage** -- AES-256-GCM encryption with derived keys -- Stored in `~/.deploystack/device-cache.enc` -- File permissions restricted to user only (0o600) -- Used when keychain access fails or is unavailable - -### Cache Validation System - -**Hardware Signature Validation** -- Lightweight hardware signature (not full fingerprint) -- Detects major hardware changes without expensive operations -- Automatically invalidates cache when hardware changes detected - -**Integrity Protection** -- SHA256 checksums with random salts prevent tampering -- Cache version tracking for schema evolution -- Automatic cleanup of corrupted or invalid cache entries - -**Time-Based Expiration** -- 30-day cache lifetime for security -- Automatic renewal during normal usage -- Configurable expiration for different deployment scenarios - -## Device Detection Implementation - -### Hardware Fingerprinting Process - -**Network Interface Collection** -- Enumerate all network interfaces -- Extract MAC addresses from physical interfaces -- Filter out virtual and temporary interfaces -- Handle cross-platform interface naming differences - -**System Information Gathering** -- Operating system type and version -- System architecture (x64, arm64, etc.) -- Hostname and system identifiers -- Node.js runtime version for compatibility - -**Fingerprint Generation** -- Combine hardware identifiers in deterministic order -- Apply cryptographic hashing (SHA256) -- Generate stable, unique device identifier -- Ensure consistency across reboots and minor system changes - -### Lightweight Hardware Signatures - -For cache validation, we use a much faster "hardware signature" instead of full fingerprinting: - -**Why Separate Signatures?** -- Full fingerprinting: 3+ seconds, comprehensive hardware analysis -- Hardware signature: \<100ms, basic system identifiers -- Signature detects major changes (new hardware, different machine) -- Signature allows minor changes (software updates, network changes) - -**Signature Components** -- Primary MAC address of main network interface -- System hostname and basic OS identifiers -- Minimal set of stable hardware characteristics -- Fast to compute, sufficient for cache validation - -## Security Architecture - -### Threat Model Considerations - -**Cache Tampering Protection** -- SHA256 checksums with random salts -- Integrity validation on every cache access -- Automatic invalidation of corrupted cache -- Secure key derivation for encryption - -**Hardware Change Detection** -- Automatic cache invalidation when hardware changes -- Prevents cache reuse on different machines -- Detects both major and minor hardware modifications -- Balances security with usability - -**Storage Security** -- OS keychain provides encrypted storage -- Fallback encryption uses industry-standard AES-256-GCM -- File permissions restrict access to user only -- No plaintext device information stored - -### Privacy Considerations - -**Minimal Data Collection** -- Only collect device information necessary for functionality -- No tracking or analytics data in device cache -- User control over device naming and identification -- Clear data retention and cleanup policies - -**Data Isolation** -- Device cache is user-scoped and isolated -- No cross-user cache sharing or access -- Secure cleanup when users are removed -- Audit trail separate from cached data diff --git a/_DEPRECATED/gateway/enterprise-management.mdx b/_DEPRECATED/gateway/enterprise-management.mdx deleted file mode 100644 index e77463c..0000000 --- a/_DEPRECATED/gateway/enterprise-management.mdx +++ /dev/null @@ -1,303 +0,0 @@ ---- -title: Enterprise MCP Management -description: How the Gateway transforms MCP servers into enterprise governance tools with toggleable controls -sidebar: Enterprise Management -icon: Building2 ---- - -import { Card, Cards } from 'fumadocs-ui/components/card'; -import { Building2, ToggleLeft, Eye, Shield } from 'lucide-react'; - -# Enterprise MCP Management - -The DeployStack Gateway transforms individual MCP servers into enterprise governance tools, presenting each server as a toggleable tool with comprehensive management capabilities for organizational control. - -## Business Context - -### The Enterprise Challenge -Traditional MCP implementations expose individual tools from multiple servers, creating a complex landscape that's difficult to govern at scale. Enterprise organizations need: - -- **Visibility**: Clear overview of which MCP servers are available and active -- **Control**: Ability to enable/disable entire MCP servers based on policy -- **Governance**: Centralized management with audit trails -- **Compliance**: Team-based access controls and usage monitoring - -### DeployStack Solution -The Gateway addresses these challenges by presenting **MCP servers as tools** rather than exposing individual server tools, enabling enterprise governance while maintaining developer productivity. - -## Architecture Overview - - - } - title="Server-as-Tool Model" - > - Each MCP server appears as a single toggleable tool with rich metadata - - - } - title="Management Actions" - > - Enable, disable, and status operations for operational control - - - } - title="Enterprise Visibility" - > - Rich descriptions and metadata from secure catalog integration - - - } - title="Policy Enforcement" - > - Team-based access controls with centralized governance - - - -## Tool Transformation - -### From Individual Tools to Server Management -**Traditional MCP Approach:** -```json -{ - "tools": [ - {"name": "brightdata__search", "description": "Search the web"}, - {"name": "brightdata__scrape", "description": "Scrape webpage content"}, - {"name": "calculator__add", "description": "Add two numbers"}, - {"name": "calculator__multiply", "description": "Multiply numbers"} - ] -} -``` - -**DeployStack Enterprise Approach:** -```json -{ - "tools": [ - { - "name": "brightdata-mcp", - "description": "brightdata-mcp MCP server - Web scraping and data collection", - "inputSchema": { - "type": "object", - "properties": { - "action": { - "type": "string", - "enum": ["enable", "disable", "status"] - } - } - } - }, - { - "name": "calculator-server", - "description": "calculator-server MCP server - Mathematical operations and calculations", - "inputSchema": { - "type": "object", - "properties": { - "action": { - "type": "string", - "enum": ["enable", "disable", "status"] - } - } - } - } - ] -} -``` - -## Management Actions - -### Enable Action -**Purpose**: Activate an MCP server for use -**Usage**: `{"action": "enable"}` - -**Process:** -1. Validates server configuration from team catalog -2. Spawns MCP server process with injected credentials -3. Establishes stdio communication channel -4. Returns operational status and process information - -**Response Example:** -```json -{ - "server": "brightdata-mcp", - "action": "enabled", - "status": "running", - "message": "brightdata-mcp MCP server has been enabled and is running" -} -``` - -### Disable Action -**Purpose**: Deactivate a running MCP server -**Usage**: `{"action": "disable"}` - -**Process:** -1. Locates running MCP server process -2. Gracefully terminates process with 5-second timeout -3. Cleans up resources and communication channels -4. Confirms successful shutdown - -**Response Example:** -```json -{ - "server": "brightdata-mcp", - "action": "disabled", - "status": "stopped", - "message": "brightdata-mcp MCP server has been disabled" -} -``` - -### Status Action (Default) -**Purpose**: Retrieve comprehensive server information -**Usage**: `{"action": "status"}` or no action parameter - -**Information Provided:** -- Current operational status (running/stopped) -- Server description from enterprise catalog -- Runtime environment details -- Performance metrics (uptime, message count, error count) -- Process health information - -**Response Example:** -```json -{ - "server": "brightdata-mcp", - "action": "status_check", - "status": "running", - "description": "Web scraping and data collection platform", - "runtime": "nodejs", - "message": "brightdata-mcp MCP server is running", - "uptime": 1847293, - "messageCount": 42, - "errorCount": 0 -} -``` - -## Enterprise Benefits - -### Centralized Governance -- **Policy Enforcement**: Administrators control which MCP servers are available per team -- **Access Control**: Team-based permissions determine server availability -- **Audit Trail**: All enable/disable actions logged for compliance -- **Resource Management**: Centralized control over computational resources - -### Developer Experience -- **Simplified Interface**: Developers see clean server names instead of complex tool hierarchies -- **Rich Metadata**: Comprehensive descriptions help developers understand capabilities -- **Operational Control**: Developers can manage server lifecycle as needed -- **Status Transparency**: Clear visibility into server health and performance - -### Operational Excellence -- **Resource Optimization**: Servers only run when needed, reducing resource consumption -- **Error Isolation**: Server-level management isolates issues to specific services -- **Performance Monitoring**: Built-in metrics for operational visibility -- **Graceful Degradation**: Individual server failures don't impact other services - -## Metadata Integration - -### Catalog-Driven Descriptions -Server descriptions are pulled from the enterprise catalog stored securely: - -```typescript -// From team configuration -const installation = teamConfig.installations.find( - inst => inst.installation_name === serverName -); - -const description = installation?.server?.description || ''; - -// Resulting tool description -const toolDescription = `${serverName} MCP server${description ? ` - ${description}` : ''}`; -``` - -### Rich Server Information -Each server tool includes: -- **Installation Name**: Clean, human-readable identifier -- **Description**: Business context from enterprise catalog -- **Runtime**: Technical environment (nodejs, python, go, etc.) -- **Team Context**: Access permissions and policies -- **Operational Metrics**: Performance and health data - -## Security and Compliance - -### Credential Management -- **Secure Injection**: Credentials injected at process spawn time -- **No Exposure**: Developers never see or handle credentials directly -- **Centralized Control**: All credentials managed through enterprise catalog -- **Audit Trail**: Credential usage tracked for compliance - -### Access Control -- **Team-Based**: Server availability determined by team membership -- **Policy-Driven**: Enterprise policies control server access -- **Role-Based**: Different permissions for different team roles -- **Centralized Management**: All access control managed through cloud control plane - -### Monitoring and Compliance -- **Usage Tracking**: All server interactions logged and monitored -- **Performance Metrics**: Operational data for capacity planning -- **Error Reporting**: Centralized error tracking and alerting -- **Compliance Reporting**: Audit trails for regulatory requirements - -## Implementation Workflow - -### Tool Discovery Flow -1. **Client Request**: Development tool calls `tools/list` -2. **Server Enumeration**: Gateway iterates through team's MCP server configurations -3. **Metadata Enrichment**: Descriptions pulled from secure catalog -4. **Tool Generation**: Each server becomes a management tool -5. **Response**: Clean list of server management tools returned - -### Tool Execution Flow -1. **Action Request**: Client calls server tool with management action -2. **Server Identification**: Gateway maps tool name to server configuration -3. **Action Processing**: Enable/disable/status action executed -4. **Process Management**: Server processes spawned/terminated as needed -5. **Response**: Operational status and metadata returned - -## Developer Workflow - -### Typical Usage Pattern -1. **Discovery**: Developer calls `tools/list` to see available MCP servers -2. **Status Check**: Calls server tool with `status` action to understand current state -3. **Activation**: Uses `enable` action to start needed MCP servers -4. **Work**: Utilizes MCP server capabilities through other tools/interfaces -5. **Cleanup**: Uses `disable` action to stop servers when done - -### VS Code Integration -In VS Code, developers see: -``` -🔧 Available Tools: -├── brightdata-mcp - brightdata-mcp MCP server - Web scraping and data collection -├── calculator-server - calculator-server MCP server - Mathematical operations -└── github-integration - github-integration MCP server - GitHub API access -``` - -Each tool can be toggled on/off with simple actions, providing enterprise governance with developer-friendly controls. - -## Developer Tool Discovery - -### CLI-Based Exploration -Before enabling MCP servers through the enterprise management interface, developers can explore available tools using the CLI tool discovery feature: - -**Command**: `deploystack mcp --tools ` - -**Purpose**: Allows developers to understand what capabilities each MCP server provides before activation, enabling informed decisions about which servers to enable for their workflow. - -**Benefits**: -- **Preview Capabilities**: See all available tools and their descriptions without starting the server -- **Parameter Understanding**: Review required and optional parameters for each tool -- **Informed Decisions**: Choose the right MCP servers based on actual tool availability -- **Development Planning**: Plan workflows around available tool capabilities - -### Integration with Enterprise Management -The CLI tool discovery complements the enterprise management approach: - -1. **Discovery Phase**: Developer uses `deploystack mcp --tools` to explore server capabilities -2. **Planning Phase**: Developer identifies which servers provide needed functionality -3. **Activation Phase**: Developer enables specific servers through enterprise management tools -4. **Utilization Phase**: Developer uses the activated servers' capabilities in their workflow - -This workflow ensures developers make informed decisions about server activation while maintaining enterprise governance and control. - -The enterprise management layer transforms complex MCP server ecosystems into manageable, governable, and developer-friendly tools that meet both organizational requirements and developer productivity needs. diff --git a/_DEPRECATED/gateway/index.mdx b/_DEPRECATED/gateway/index.mdx deleted file mode 100644 index 047ea12..0000000 --- a/_DEPRECATED/gateway/index.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: Gateway Development -description: Developer documentation for the DeployStack Gateway - the local secure proxy that manages MCP servers and credentials for enterprise teams. -sidebar: Gateway -icon: Terminal ---- - -import { Card, Cards } from 'fumadocs-ui/components/card'; -import { Terminal, Code2, Settings, Shield, Zap, Users, Rocket } from 'lucide-react'; - -# DeployStack Gateway Development - -The DeployStack Gateway is the local secure proxy that connects developers to their team's MCP servers through a centralized control plane. It acts as a smart process manager and credential vault, running MCP server processes as persistent background services while enforcing access policies from the cloud. - -## Architecture Overview - -The Gateway implements a sophisticated Control Plane / Data Plane architecture with comprehensive transport support: - -- **Control Plane**: Authenticates with `cloud.deploystack.io` to download team configurations and access policies -- **Data Plane**: Manages local MCP server processes with stdio, SSE, and Streamable HTTP transport protocols -- **Security Layer**: Injects credentials securely into process environments without exposing them to developers -- **Session Management**: Handles secure SSE connections with cryptographic session IDs for VS Code compatibility -- **Transport Layer**: Supports both legacy SSE transport and modern Streamable HTTP transport for maximum client compatibility - -## Core Features - - - } - title="Triple Transport Support" - > - Supports stdio transport for CLI tools, SSE transport for VS Code compatibility, and Streamable HTTP for modern MCP clients - - - } - title="Secure Credential Injection" - > - Injects API tokens and credentials directly into process environments without developer exposure - - - } - title="Individual Tool Exposure" - > - Exposes individual MCP tools with namespacing (e.g., brightdata-search_engine) for direct use in development environments - - - } - title="Session Management" - > - Cryptographically secure session handling with automatic cleanup for persistent connections - - - } - title="Unified Proxy" - > - Single HTTP endpoint supporting multiple client types with intelligent request routing - - - } - title="Team-Based Access" - > - Enforces team-based access control policies downloaded from the cloud control plane - - - } - title="Tool Caching System" - > - Team-aware caching enables instant gateway startup and automatic tool discovery on team switching - - - -## Development Setup - -### Prerequisites - -- Node.js (v18 or higher) -- npm (v8 or higher) -- TypeScript development environment -- A DeployStack account at [cloud.deploystack.io](https://cloud.deploystack.io) - -### Local Development - -```bash -# Navigate to the gateway service -cd services/gateway - -# Install dependencies -npm install - -# Start development server -npm run dev - -# Build for production -npm run build - -# Start production build -npm start -``` - -## Key Components - -### Authentication Module -Handles secure authentication with the DeployStack cloud control plane and manages access tokens. - -### Configuration Sync -Downloads and synchronizes team MCP server configurations, including process spawn commands and environment variables. - -### Process Manager -Manages the lifecycle of MCP server processes, including: -- On-demand process spawning -- Stdio communication handling -- Process cleanup and resource management -- Environment variable injection - -### HTTP Proxy Server -Exposes multiple endpoints for different client types: -- **GET /sse**: SSE connection establishment for VS Code and legacy clients -- **POST /message**: Session-based JSON-RPC for SSE clients -- **POST /mcp**: Streamable HTTP endpoint for modern MCP clients -- **GET /health**: Health check endpoint for monitoring - -### Session Manager -Handles secure SSE connections with: -- Cryptographically secure session ID generation -- Session lifecycle management and cleanup -- Connection state tracking and validation -- Automatic timeout and resource management - -### Enterprise Management Layer -Transforms MCP servers into enterprise governance tools: -- Each MCP server appears as a toggleable tool -- Enable/disable/status actions for operational control -- Rich metadata from secure catalog integration -- Team-based access policy enforcement - -### Security Layer -Ensures credentials are handled securely: -- Encrypted storage of downloaded configurations -- Secure environment variable injection -- No credential exposure to developer environment -- Session-based authentication for persistent connections - -## Configuration Format - -The Gateway works with MCP server configurations in this format: - -```json -{ - "name": "brightdata", - "command": "npx", - "args": ["@brightdata/mcp"], - "env": { - "API_TOKEN": "secure-token-from-vault" - } -} -``` - -## Development Workflow - -1. **Authentication**: Gateway authenticates with cloud control plane -2. **Config Download**: Downloads team's MCP server configurations -3. **Persistent Process Startup**: Starts all configured MCP servers as background processes when gateway launches -4. **HTTP Server**: Starts local HTTP server with multiple endpoints immediately available: - - SSE endpoint: `localhost:9095/sse` (for VS Code and legacy clients) - - Messages endpoint: `localhost:9095/message` (for session-based JSON-RPC) - - MCP endpoint: `localhost:9095/mcp` (for modern Streamable HTTP clients) - - Health endpoint: `localhost:9095/health` (for monitoring) -5. **Request Handling**: Receives MCP requests from development tools and intelligently routes to appropriate transport -6. **Process Management**: Maintains persistent background processes as described in [Gateway Process Management](/development/gateway/process-management). -7. **Credential Injection**: Securely injects environment variables into running processes at startup -8. **Tool Routing**: Routes namespaced tool calls to persistent MCP servers via stdio transport -9. **Transport Selection**: Automatically detects client capabilities and uses appropriate transport (SSE or Streamable HTTP) - -For detailed information about the caching system, see [Gateway Caching System](/development/gateway/caching-system). - -## Language Support - -The Gateway is language-agnostic and supports MCP servers written in: - -- **Node.js**: `npx`, `node` commands -- **Python**: `python`, `pip`, `pipenv` commands -- **Go**: Compiled binary execution -- **Rust**: Compiled binary execution -- **Any Language**: Via appropriate runtime commands - -## Security Considerations - -### Credential Management -- Credentials are never written to disk in plain text -- Environment variables are injected directly into spawned processes -- No credential exposure to the developer's shell environment - -### Process Isolation -- Each MCP server runs in its own isolated process -- Process cleanup ensures no resource leaks -- Automatic process termination after idle periods - -### Network Security -- Local HTTP server only binds to localhost -- No external network exposure by default -- Secure communication with cloud control plane - -## Contributing - -The Gateway is actively under development. Key areas for contribution: - -- **Process Management**: Improving spawn/cleanup logic -- **Security**: Enhancing credential handling -- **Performance**: Optimizing stdio communication -- **Platform Support**: Adding Windows/Linux compatibility -- **Error Handling**: Robust error recovery diff --git a/_DEPRECATED/gateway/mcp.mdx b/_DEPRECATED/gateway/mcp.mdx deleted file mode 100644 index 73391e0..0000000 --- a/_DEPRECATED/gateway/mcp.mdx +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: Gateway MCP Configuration Management -description: How the DeployStack Gateway CLI downloads, processes, and securely stores MCP server configurations for teams -sidebar: MCP Configuration -icon: Bot ---- - -# Gateway MCP Configuration Management - -The DeployStack Gateway CLI automatically manages MCP (Model Context Protocol) server configurations for teams, downloading installation data from the backend API and storing it securely for local process management. - -## Overview - -The Gateway implements a sophisticated MCP configuration system that: - -- **Downloads** team MCP installations from the backend API -- **Processes** raw API data into Gateway-ready server configurations -- **Stores** both raw and processed data securely using OS-level storage -- **Manages** team context switching with automatic config updates - -## API Integration - -### Legacy Team-Based Endpoint -The Gateway can fetch MCP installations from the legacy team-based endpoint: -``` -GET /api/teams/{teamId}/mcp/installations -``` - -### Modern Three-Tier Gateway Endpoint -For optimal performance and device-specific configurations, the Gateway uses the modern three-tier endpoint: -``` -GET /api/gateway/me/mcp-configurations?hardware_id={hardwareId} -``` - -This endpoint automatically merges Template + Team + User configurations and returns ready-to-use server configurations with device-specific user arguments and environment variables. For detailed information about this endpoint, see the [Backend API Documentation](/development/backend/api). - -### Response Structure -The API returns team MCP installations with this interface: -```typescript -interface MCPInstallationsResponse { - success: boolean; - data: MCPInstallation[]; -} -``` - -## Data Storage Architecture - -### Dual Storage Approach -The Gateway stores **both** raw API data and processed configurations: - -1. **Raw Installations** - Complete API response for audit and debugging -2. **Processed Server Configs** - Gateway-ready configurations for process spawning - -### Storage Interface -```typescript -interface TeamMCPConfig { - team_id: string; - team_name: string; - installations: MCPInstallation[]; // Raw API data - servers: MCPServerConfig[]; // Processed configs - last_updated: string; -} -``` - -### Secure Storage -- **Primary**: OS Keychain (macOS Keychain, Windows Credential Manager, Linux Secret Service) -- **Fallback**: AES-256-CBC encrypted files -- **Key Format**: `${userEmail}-${teamId}` for team isolation - -## Configuration Processing - -The Gateway transforms raw API installations into executable server configurations: - -### Runtime Detection -- **Node.js**: `npx @package-name` -- **Python**: `python -m package_name` -- **Go**: Direct binary execution -- **Custom**: Uses `installation_methods` from API - -### Environment Variable Merging -1. Server default environment variables -2. User-customized overrides from `user_environment_variables` -3. Secure injection at process spawn time - -## Team Context Integration - -### Automatic Management -- **Login**: Downloads default team's MCP configuration -- **Team Switch**: Clears old config, downloads new team's config -- **Logout**: Clears all stored MCP configurations - -### Configuration Lifecycle -1. API authentication and team selection -2. MCP installations download via API -3. Data validation and filtering -4. Configuration processing and transformation -5. Secure storage with team isolation -6. Runtime access for process management - -## Developer Commands - -### Configuration Management -- `deploystack mcp --status` - Show current configuration status -- `deploystack mcp --refresh` - Force refresh from API -- `deploystack mcp --clear` - Clear stored configuration -- `deploystack mcp --test` - Run processing validation tests - -### Debug Information -The `deploystack mcp` command shows raw stored data including: -- Complete team information -- Processed server configurations -- Raw API installation data -- Environment variables (with sensitive data masking) - -## Security Considerations - -### Data Isolation -- Each team's configuration stored with unique keys -- No cross-team data access possible -- Automatic cleanup on team changes - -### Credential Protection -- Environment variables injected at runtime only -- No plain text storage of sensitive data -- OS-level keychain integration for maximum security - -## Tool Discovery and Caching - -Beyond configuration management, the Gateway implements an advanced tool discovery system that automatically identifies and caches individual tools from each MCP server. This system operates seamlessly with the configuration management to provide: - -### Automatic Discovery -- **Team Switching**: Tools are automatically discovered from all servers when switching teams -- **Configuration Updates**: Tool cache is refreshed when server configurations change -- **Manual Refresh**: Developers can explicitly refresh tools using CLI commands - -### Team-Aware Caching -- **Isolated Storage**: Each team's discovered tools are cached separately -- **Fast Startup**: Gateway starts instantly using cached tool information -- **Fallback Support**: Cached tools remain available even when servers are temporarily unavailable - -For comprehensive details about the tool discovery and caching system, see [Gateway Caching System](/development/gateway/caching-system). - -## Developer Commands - -### Configuration Management -- `deploystack mcp --status` - Show current configuration status -- `deploystack mcp --refresh` - Force refresh from API -- `deploystack mcp --clear` - Clear stored configuration - -### Tool Discovery -- `deploystack mcp --tools ` - Discover and display tools from a specific MCP server (requires running gateway) -- `deploystack teams --switch ` - Switch teams with automatic tool discovery - -**Note**: The `--tools` command only works when the gateway is running (`deploystack start`), as it communicates with already-running MCP server processes rather than spawning them on-demand. - -## Integration Points - -The stored MCP configurations are consumed by: - -- **Process Manager** - Spawns MCP server processes using stored configs as described in [Process Management](/development/gateway/process-management) -- **HTTP Proxy** - Routes requests to appropriate MCP servers using cached tool information -- **Environment Injection** - Securely provides credentials to spawned processes -- **Tool Discovery System** - Uses configurations to discover and cache available tools as detailed in [Gateway Caching System](/development/gateway/caching-system) - -This system ensures that the Gateway has immediate access to team-specific MCP server configurations while maintaining security and team isolation throughout the development workflow. diff --git a/_DEPRECATED/gateway/meta.json b/_DEPRECATED/gateway/meta.json deleted file mode 100644 index f72ea6f..0000000 --- a/_DEPRECATED/gateway/meta.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "title": "Gateway Development", - "description": "Documentation for DeployStack Gateway Development", - "icon": "Plug", - "root": false, - "pages": [ - "..." - ] -} diff --git a/_DEPRECATED/gateway/oauth.mdx b/_DEPRECATED/gateway/oauth.mdx deleted file mode 100644 index c610dcc..0000000 --- a/_DEPRECATED/gateway/oauth.mdx +++ /dev/null @@ -1,367 +0,0 @@ ---- -title: Gateway OAuth Implementation -description: OAuth2 client implementation for CLI authentication with DeployStack backend -sidebar: OAuth -icon: Shield ---- - -# Gateway OAuth Implementation - -The DeployStack Gateway implements an OAuth2 client for secure CLI authentication with the DeployStack backend. This enables users to authenticate via their browser and use the CLI with proper access tokens. - -## Architecture Overview - -The gateway acts as an OAuth2 client implementing the **Authorization Code flow with PKCE** (Proof Key for Code Exchange) for enhanced security. The implementation consists of: - -- **OAuth2 Client** - Handles the complete authorization flow -- **Callback Server** - Temporary HTTP server for receiving authorization codes -- **API Client** - Makes authenticated requests to backend APIs -- **Credential Storage** - Secure token storage and retrieval - -## OAuth2 Flow Process - -### 1. Authorization Request - -When a user runs the login command, the CLI: - -- Generates a cryptographically secure PKCE code verifier (128 random bytes) -- Creates a SHA256 code challenge from the verifier -- Generates a random state parameter for CSRF protection -- Builds the authorization URL with all required OAuth2 parameters -- Opens the user's default browser to the authorization endpoint -- Starts a temporary callback server on localhost port 8976 - -The authorization URL includes: -- `response_type=code` for authorization code flow -- `client_id=deploystack-gateway-cli` for client identification -- `redirect_uri=http://localhost:8976/oauth/callback` for callback handling -- Requested scopes (see [OAuth Scope Management](#oauth-scope-management) below) -- PKCE parameters: `code_challenge` and `code_challenge_method=S256` -- Random `state` parameter for security - -### 2. User Authorization - -The browser opens to the backend's consent page where the user: - -- Reviews the requested permissions and scopes -- Sees security warnings about CLI access -- Can approve or deny the authorization request -- Is redirected back to the CLI's callback server upon decision - -### 3. Callback Handling - -The temporary callback server: - -- Listens only on localhost for security -- Validates the callback path (`/oauth/callback`) -- Extracts the authorization code and state parameters -- Validates the state parameter matches the original request -- Displays a success or error page to the user -- Automatically shuts down after receiving the callback - -### 4. Token Exchange with Device Registration - -After receiving the authorization code, the CLI: - -- Detects device information (hostname, OS, hardware fingerprint) -- Exchanges the code for access and refresh tokens -- Includes the PKCE code verifier for verification -- **Automatically registers the device** during token exchange -- Validates the token response from the backend -- Fetches user information using the new access token -- Stores credentials securely for future use - -#### Automatic Device Registration - -During the token exchange process, the gateway automatically registers the current device with the backend for security and management purposes: - -**Device Information Collected:** -- `device_name`: User-friendly name (defaults to hostname) -- `hostname`: System hostname -- `hardware_id`: Unique hardware fingerprint based on MAC addresses and system info -- `os_type`: Operating system (macOS, Windows, Linux) -- `os_version`: OS version string -- `arch`: System architecture (x64, arm64, etc.) -- `node_version`: Node.js version for compatibility tracking -- `user_agent`: CLI version and platform information - -**Security Benefits:** -- Device registration happens only during authenticated login sessions -- **No separate device registration endpoints exist** - this prevents unauthorized device registration and enhances security -- Hardware fingerprinting provides unique device identification -- Enables device management and access control in the backend -- Eliminates the need for manual device registration API calls - -**Process Flow:** -1. Gateway detects current device information using system APIs -2. Device info is included in the OAuth2 token request -3. Backend validates the token request and registers the device -4. Device information is returned in the token response -5. Gateway logs successful device registration to the user (e.g., "📱 Device registered: MacBook-Pro.local") - -**Error Handling:** -If device registration fails during token exchange: -- The OAuth2 login process continues successfully -- User authentication is not affected -- Device context may be limited for some features -- Error is logged but doesn't break the login flow - -## PKCE Security Implementation - -The gateway implements PKCE (Proof Key for Code Exchange) following RFC 7636: - -- **Code Verifier**: 128 random bytes encoded as base64url -- **Code Challenge**: SHA256 hash of the verifier, base64url encoded -- **Challenge Method**: Always uses `S256` (SHA256) -- **State Validation**: Cryptographically secure random state parameter - -PKCE provides security benefits: -- Prevents authorization code interception attacks -- No client secret required (suitable for public clients) -- Protects against malicious applications - -## Client Configuration - -The gateway is pre-registered with the backend as: - -- **Client ID**: `deploystack-gateway-cli` -- **Client Type**: Public client (no secret required) -- **Redirect URIs**: `http://localhost:8976/oauth/callback`, `http://127.0.0.1:8976/oauth/callback` -- **Allowed Scopes**: See source code at `services/gateway/src/utils/auth-config.ts` -- **PKCE**: Required with SHA256 method -- **Token Lifetime**: 1 week access tokens, 30 day refresh tokens - -## Command Integration - -### Login Command - -The login command orchestrates the complete OAuth2 flow: - -- Checks if the user is already authenticated -- Displays "already logged in" message if credentials are valid -- Initiates the OAuth2 flow if authentication is needed -- Handles browser opening and callback server management -- Stores credentials securely upon successful authentication -- Provides clear success confirmation with user email - -### Authenticated Commands - -Commands like `whoami`, `teams`, and `start` use stored credentials: - -- Check authentication status before proceeding -- Display helpful error messages if not authenticated -- Use Bearer token authentication for API requests -- Automatically refresh expired tokens when possible -- Handle token expiration gracefully - -## Error Handling - -The OAuth implementation includes comprehensive error handling: - -### Error Types - -- **TIMEOUT**: OAuth callback not received within time limit -- **ACCESS_DENIED**: User denied the authorization request -- **BROWSER_ERROR**: Failed to open browser automatically -- **NETWORK_ERROR**: Network connectivity issues -- **STORAGE_ERROR**: Failed to store credentials securely -- **TOKEN_EXPIRED**: Access token has expired -- **INVALID_TOKEN**: Token format or signature invalid -- **INVALID_GRANT**: Authorization code or refresh token invalid - -### User Guidance - -Each error type provides specific user guidance: -- Timeout errors suggest retrying the command -- Access denied errors explain the approval requirement -- Browser errors offer manual URL opening -- Network errors suggest connectivity checks -- Storage errors indicate keychain permission issues - -## Browser Integration - -The CLI provides seamless browser integration: - -- **Automatic Opening**: Uses the system's default browser -- **Cross-Platform**: Works on Windows, macOS, and Linux -- **Fallback Handling**: Displays manual URL if auto-open fails -- **User Feedback**: Clear messages about browser actions -- **Security Warnings**: Alerts for development server usage - -## Token Management - -### Token Refresh - -The gateway automatically handles token refresh: - -- Monitors token expiration with 5-minute buffer -- Attempts refresh before tokens expire -- Uses refresh tokens for seamless re-authentication -- Falls back to full re-authentication if refresh fails -- Updates stored credentials with new tokens - -### Token Validation - -Before each API request, the gateway: - -- Checks token expiration locally -- Validates token format and structure -- Handles 401 responses with automatic refresh -- Provides clear error messages for invalid tokens - -## Development vs Production - -The OAuth client adapts to different environments: - -### Development Mode -- Uses HTTP for localhost callback server -- Accepts self-signed certificates for development -- Displays security warnings for non-production servers -- Provides detailed error information for debugging - -### Production Mode -- Enforces HTTPS for all communications -- Validates SSL certificates strictly -- Uses secure callback URLs -- Limits error information exposure - -## Integration with Backend - -The gateway OAuth client integrates with the [backend OAuth2 server](/development/backend/oauth2-server): - -- **Client Registration**: Pre-registered with known client ID -- **PKCE Support**: Uses SHA256 method as required by backend -- **Scope Validation**: Requests only backend-supported scopes -- **Token Format**: Handles backend's custom JWT-like token format -- **Error Responses**: Processes standard OAuth2 error responses -- **Endpoint Discovery**: Uses standard OAuth2 endpoint paths -- **Device Registration**: Automatic device registration during token exchange - -### Device Management Integration - -The gateway's device registration integrates seamlessly with the backend's device management system: - -**Backend Integration Points:** -- **OAuth2 Token Endpoint**: Extended to accept optional `device_info` in token requests -- **Device Service**: Uses existing `DeviceService.registerOrUpdateDevice()` method -- **Database Storage**: Device information stored in the `devices` table -- **User Association**: Devices automatically linked to the authenticated user - -**Token Request Enhancement:** -The gateway includes device information in the OAuth2 token request: -```json -{ - "grant_type": "authorization_code", - "code": "authorization_code_here", - "redirect_uri": "http://localhost:8976/oauth/callback", - "client_id": "deploystack-gateway-cli", - "code_verifier": "pkce_verifier_here", - "device_info": { - "device_name": "MacBook-Pro.local", - "hostname": "MacBook-Pro.local", - "hardware_id": "a1b2c3d4e5f6789012345678901234ab", - "os_type": "macOS", - "os_version": "14.2.1", - "arch": "arm64", - "node_version": "v20.10.0", - "user_agent": "DeployStack-CLI/1.0.0 (darwin; arm64)" - } -} -``` - -**Token Response Enhancement:** -When device registration succeeds, the backend includes device information in the token response: -```json -{ - "access_token": "...", - "token_type": "Bearer", - "expires_in": 3600, - "refresh_token": "...", - "scope": "mcp:read account:read...", - "device": { - "id": "550e8400-e29b-41d4-a716-446655440000", - "device_name": "MacBook-Pro.local", - "is_active": true, - "is_trusted": true, - "created_at": "2025-08-23T10:20:30Z" - } -} -``` - -**Security Design:** -- Device registration only occurs during authenticated OAuth2 flows -- **No separate device creation endpoints exist** - this architectural decision prevents unauthorized device registration and eliminates potential security vulnerabilities -- Hardware fingerprinting ensures unique device identification across multiple login sessions -- Device information is validated using JSON schema before processing -- Gateway automatically handles device lookup using hardware fingerprints without requiring manual registration - -For comprehensive information about device management and hardware fingerprinting, see the [Device Management Documentation](/device-management). - -## Security Considerations - -The OAuth implementation follows security best practices: - -- **PKCE Required**: All authorization requests use PKCE -- **State Validation**: Prevents CSRF attacks -- **Localhost Binding**: Callback server only accepts local connections -- **Timeout Protection**: All operations have reasonable timeouts -- **Secure Storage**: Credentials stored using OS keychain -- **No Secrets**: Public client design eliminates secret management - -For detailed security implementation including credential storage, token expiration, and local file security, see the [Gateway Security Guide](/development/gateway/security). - -## OAuth Scope Management - -The gateway requests specific OAuth scopes during authentication to access backend APIs. Scope configuration must stay synchronized between the gateway and backend. - -### Current Scopes - -For the current list of supported scopes, check the source code at: -- **Gateway scopes**: `services/gateway/src/utils/auth-config.ts` in the `scopes` array -- **Backend validation**: `services/backend/src/services/oauth/authorizationService.ts` in the `validateScope()` method - -### Adding New Scopes - -When the backend adds support for a new OAuth scope, you must update the gateway configuration: - -1. **Add the scope** to the `scopes` array in `services/gateway/src/utils/auth-config.ts` -2. **Add a description** to the `SCOPE_DESCRIPTIONS` object in the same file -3. **Test the login flow** to ensure the new scope is requested and granted - -Example: -```typescript -// In services/gateway/src/utils/auth-config.ts -scopes: [ - 'mcp:read', - 'mcp:categories:read', - 'your-new-scope', // Add new scope here - // ... other scopes -], - -// And add description -export const SCOPE_DESCRIPTIONS: Record = { - 'mcp:read': 'Access your MCP server installations and configurations', - 'your-new-scope': 'Description of what this scope allows', // Add description - // ... other descriptions -}; -``` - -### Scope Synchronization - -**Critical**: The gateway and backend must have matching scope configurations: -- If backend supports a scope but gateway doesn't request it, users won't get that permission -- If gateway requests a scope but backend doesn't support it, authentication will fail - -Always coordinate scope changes between both services. - -## Testing OAuth Flow - -During development, the OAuth flow can be tested: - -1. Start the backend in development mode -2. Build the gateway CLI -3. Run the login command with development URL -4. Complete the browser authorization flow -5. Verify authentication with the whoami command - -The OAuth implementation provides a secure, user-friendly authentication experience that follows industry standards while integrating seamlessly with the DeployStack backend. diff --git a/_DEPRECATED/gateway/process-management.mdx b/_DEPRECATED/gateway/process-management.mdx deleted file mode 100644 index 37991f9..0000000 --- a/_DEPRECATED/gateway/process-management.mdx +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Gateway Process Management -description: How the DeployStack Gateway manages MCP server processes with persistent background processes, secure credential injection, and enterprise governance -sidebar: Process Management -icon: Cpu ---- - -import { Card, Cards } from 'fumadocs-ui/components/card'; -import { Zap, Shield, Monitor, RefreshCw, AlertTriangle, Users } from 'lucide-react'; - -# Gateway Process Management - -The DeployStack Gateway implements sophisticated process management to handle MCP server lifecycles with enterprise-grade security, performance, and governance. Each MCP server runs as a persistent background process with secure credential injection and continuous availability. - -## Architecture Overview - -The Gateway's process management system operates on a **persistent background process** model, similar to Claude Desktop, where all configured MCP server processes are started when the gateway launches and run continuously until shutdown. This approach provides instant tool availability and eliminates the latency associated with process spawning during development workflows. - -## Core Concepts - - - } - title="Persistent Background Processes" - > - All configured MCP servers start with the gateway and run continuously, providing instant tool availability - - - } - title="Secure Credential Injection" - > - API tokens and credentials are injected directly into process environments without developer exposure - - - } - title="Runtime State Management" - > - Comprehensive tracking of running processes with health monitoring and team isolation - - - } - title="Graceful Lifecycle Management" - > - Proper MCP shutdown sequence following protocol specifications for clean termination - - - } - title="State Comparison & Recovery" - > - Compares expected vs actual running processes with automatic recovery mechanisms - - - } - title="Team Context Switching" - > - Seamless switching between teams with complete process lifecycle management - - - -## Selective Restart Capability - -The Gateway supports **selective restart** functionality, allowing individual MCP servers to be managed without requiring a full gateway restart. This feature dramatically improves configuration update performance and eliminates downtime for unchanged servers. - -### Key Features - -- **Individual Server Control**: Add, remove, or restart specific MCP servers via HTTP API -- **Change Detection**: Automatically detects added, removed, and modified server configurations -- **Fallback Safety**: Falls back to full restart if selective operations fail -- **Zero Downtime**: Unchanged servers continue running during configuration updates - -### API Endpoints - -The Gateway exposes HTTP endpoints for selective server management: - -- `POST /api/mcp/servers` - Add new MCP servers to running gateway -- `DELETE /api/mcp/servers/:serverName` - Remove specific servers -- `POST /api/mcp/servers/:serverName/restart` - Restart individual servers - -### Implementation Services - -- **Selective Restart Service**: Handles HTTP communication with running gateway processes -- **Configuration Change Service**: Detects configuration differences and orchestrates selective operations -- **Process Manager Integration**: Provides individual server lifecycle control capabilities - -## Process Lifecycle - -### Gateway Startup Phase -When the Gateway starts (`deploystack start`), all configured MCP servers for the selected team are launched simultaneously: - -- **Team Configuration Loading**: Downloads and validates team MCP server configurations -- **Bulk Process Spawning**: Starts all configured MCP servers as background processes -- **Runtime Detection**: Automatic detection of Node.js, Python, Go, or custom runtime requirements -- **Environment Preparation**: Secure injection of team-specific credentials and configuration -- **MCP Protocol Handshake**: Establishes JSON-RPC communication with 30-second timeout for package downloads -- **Runtime State Registration**: Adds all successfully started processes to the runtime state manager - -### Continuous Operation Phase -During normal operation, all MCP servers run continuously in the background: - -- **Persistent Availability**: All tools are immediately available without process spawning delays -- **Request Routing**: Direct routing of tool calls to already-running MCP server processes -- **Health Monitoring**: Continuous monitoring of process status, uptime, and responsiveness -- **State Comparison**: Regular comparison of expected vs actual running processes -- **Error Logging**: Proper distinction between informational stderr output and actual errors - -### Team Context Switching -When switching teams, the Gateway performs complete process lifecycle management: - -- **Graceful Shutdown**: Stops all MCP servers for the current team following MCP protocol -- **Configuration Refresh**: Downloads new team's MCP server configurations -- **Process Restart**: Starts all MCP servers for the new team -- **State Synchronization**: Updates runtime state to reflect the new team context - -### Gateway Shutdown Phase -When the Gateway stops (`deploystack stop` or Ctrl+C), processes are terminated gracefully: - -- **MCP Protocol Compliance**: Follows proper MCP shutdown sequence (close stdin → wait → SIGTERM → wait → SIGKILL) -- **Parallel Shutdown**: All processes are stopped concurrently for faster shutdown -- **Resource Cleanup**: Ensures all file descriptors and system resources are properly released -- **State Cleanup**: Clears runtime state and removes process tracking information - -## Security Model - -### Credential Isolation -The Gateway implements a **zero-exposure credential model** where: - -- Credentials are never written to disk in plain text -- Environment variables are injected directly into spawned processes -- No credential access from the developer's shell environment -- Automatic credential rotation when team configurations change - -### Process Isolation -Each MCP server runs in complete isolation with: - -- **Separate Process Space**: No shared memory or resources between MCP servers -- **Independent Environments**: Each process has its own environment variable set -- **Resource Boundaries**: CPU and memory limits to prevent resource exhaustion -- **Network Isolation**: Controlled network access based on server requirements - -## Enterprise Governance - -### Tool-Level Management -The Gateway transforms traditional MCP servers into enterprise-manageable tools by presenting each server as: - -- **Enable/Disable Controls**: Administrators can control which MCP servers are available -- **Status Monitoring**: Real-time visibility into process health and performance -- **Usage Analytics**: Tracking of tool usage patterns and resource consumption -- **Access Policies**: Team-based access control enforcement - -### Operational Controls -Enterprise administrators gain operational control through: - -- **Centralized Configuration**: All MCP server configurations managed through the cloud control plane -- **Policy Enforcement**: Automatic enforcement of team-based access policies -- **Audit Logging**: Comprehensive logging of all process management activities -- **Resource Management**: Monitoring and control of system resource usage - -## Performance Optimization - -### Resource Efficiency -The Gateway optimizes resource usage through the persistent background process model: - -- **Continuous Operation**: All processes run continuously, eliminating spawn/cleanup overhead -- **Shared Process Pool**: Multiple tool requests reuse the same persistent MCP server processes -- **Memory Stability**: Consistent memory usage patterns with no spawn/cleanup cycles -- **CPU Optimization**: Direct request routing to running processes minimizes CPU overhead - -### Response Time Optimization -Instant response times are achieved through: - -- **Zero Latency**: Tools are immediately available from already-running processes -- **Parallel Processing**: Concurrent handling of multiple tool requests across persistent processes -- **Persistent Connections**: Maintained stdio connections eliminate connection establishment overhead -- **Cache-as-Manifest**: Cached tool information serves as configuration manifest for instant startup - -## Error Handling and Recovery - -### Failure Detection -The Gateway monitors for various failure scenarios: - -- **Process Crashes**: Automatic detection of terminated or crashed processes -- **Communication Failures**: Identification of broken stdio communication channels -- **Timeout Conditions**: Detection of unresponsive processes -- **Resource Exhaustion**: Monitoring for memory or CPU limit violations - -### Recovery Strategies -When failures are detected, the Gateway implements: - -- **Automatic Restart**: Immediate restart of crashed processes with exponential backoff -- **Fallback Mechanisms**: Graceful degradation when processes are unavailable -- **Error Reporting**: Detailed error reporting to developers and administrators -- **Circuit Breaker**: Temporary disabling of problematic processes to prevent cascading failures - -## Integration Points - -The process management system integrates with other Gateway components: - -- **[MCP Configuration Management](/development/gateway/mcp)**: Uses team configurations to determine spawn parameters -- **[Caching System](/development/gateway/caching-system)**: Coordinates with tool discovery and caching mechanisms -- **[Project Structure](/development/gateway/structure)**: Implements the architecture defined in the core modules -- **HTTP Proxy Server**: Provides process information for request routing decisions - -## Monitoring and Observability - -### Process Metrics -The Gateway tracks comprehensive metrics including: - -- **Process Count**: Number of active MCP server processes -- **Resource Usage**: CPU, memory, and file descriptor consumption -- **Request Throughput**: Number of requests processed per process -- **Error Rates**: Frequency and types of process errors -- **Response Times**: Latency metrics for tool requests - -### Health Indicators -Key health indicators monitored include: - -- **Process Responsiveness**: Time to respond to health check requests -- **Memory Growth**: Detection of memory leaks or excessive memory usage -- **Error Patterns**: Identification of recurring error conditions -- **Resource Limits**: Proximity to configured resource boundaries - -This process management system ensures that the DeployStack Gateway can reliably handle enterprise workloads while maintaining the security, performance, and governance requirements of modern development teams. diff --git a/_DEPRECATED/gateway/security.mdx b/_DEPRECATED/gateway/security.mdx deleted file mode 100644 index 2148544..0000000 --- a/_DEPRECATED/gateway/security.mdx +++ /dev/null @@ -1,374 +0,0 @@ ---- -title: Gateway Security -description: Security implementation and best practices for the DeployStack Gateway CLI -sidebar: Security -icon: Lock ---- - -# Gateway Security - -The DeployStack Gateway implements multiple layers of security to protect user credentials, ensure secure communication, and maintain system integrity. This document covers the security architecture and implementation details. - -## Credential Storage Security - -### OS Keychain Integration - -The gateway uses the **Zowe Secrets SDK** for cross-platform secure credential storage, providing native integration with each operating system's secure storage mechanism: - -**Platform-specific storage:** -- **macOS**: Keychain Access using the Security.framework -- **Windows**: Credential Manager using CredWrite/CredRead APIs -- **Linux**: Secret Service API using libsecret - -The keychain integration stores credentials with the service name `deploystack-gateway` and uses the user's email address as the account identifier. This approach leverages the operating system's built-in security features including: - -- Hardware-backed encryption where available -- User authentication requirements for access -- Automatic credential isolation between users -- Integration with system security policies - -### Encrypted File Fallback - -When OS keychain access is unavailable or fails, credentials are stored in encrypted files as a secure fallback: - -**Encryption Details:** -- **Algorithm**: AES-256-CBC encryption -- **Key Derivation**: Fixed key with padding (development approach) -- **Initialization Vector**: Random 16-byte IV generated per encryption -- **Storage Format**: `IV:encrypted_data` in hexadecimal encoding - -**File Security:** -- **Location**: `~/.deploystack/credentials.enc` -- **Permissions**: `0o600` (owner read/write only) -- **Directory Permissions**: `0o700` (owner access only) - -### Account Management - -The gateway maintains a secure account tracking system: - -**Account List:** -- **Location**: `~/.deploystack/accounts.json` -- **Content**: Array of user email addresses (no sensitive data) -- **Purpose**: Enables credential discovery from keychain storage -- **Format**: JSON array with most recent accounts first - -**Security Considerations:** -- Contains only email addresses, no tokens or passwords -- Used for keychain credential lookup -- Automatically maintained during login/logout operations -- Cleaned up when credentials are cleared - -## Token Security - -### Access Token Format - -Access tokens use a custom JWT-like format designed for the DeployStack backend: - -**Token Structure:** -``` -. -``` - -**Components:** -- **Random Token**: 512-bit cryptographically secure random value -- **Payload**: Base64-encoded JSON containing user info, scopes, and expiration -- **Database Storage**: Argon2 hash of the complete token for verification - -**Security Features:** -- No client-side signature verification required -- Embedded user information reduces database lookups -- Cryptographically secure random component -- Server-side hash verification prevents tampering - -### Token Expiration - -**Access Tokens**: 1 week (604,800 seconds) -- Provides reasonable balance between security and usability -- Reduces frequent re-authentication during development -- Long enough for typical CLI usage patterns -- Short enough to limit exposure if compromised - -**Refresh Tokens**: 30 days -- Enables seamless token renewal -- Longer lifetime for better user experience -- Stored securely alongside access tokens -- Automatically used for token refresh - -### Token Validation - -The gateway implements comprehensive token validation: - -**Local Validation:** -- Checks token expiration with 5-minute buffer -- Validates token format and structure -- Prevents unnecessary API calls with expired tokens - -**Server Validation:** -- Backend verifies token hash using Argon2 -- Checks database expiration timestamps -- Validates user permissions and scopes - -## Network Security - -### HTTPS Enforcement - -The gateway enforces secure communication: - -**Production Requirements:** -- All API communications must use HTTPS -- SSL certificate validation is strictly enforced -- Self-signed certificates are rejected -- Insecure HTTP connections are blocked - -**Development Flexibility:** -- Localhost connections allow HTTP for development -- Self-signed certificates accepted for local testing -- Security warnings displayed for non-production servers -- Clear distinction between development and production modes - -### Request Security - -All API requests include comprehensive security headers: - -**Standard Headers:** -- **Authorization**: Bearer token authentication -- **Content-Type**: Proper content type specification -- **User-Agent**: Identifies the CLI client and version - -**Security Measures:** -- Bearer token authentication for all authenticated requests -- Proper content type validation -- Request timeout protection -- Automatic retry logic with exponential backoff - -### Callback Server Security - -The temporary OAuth callback server implements multiple security layers: - -**Network Security:** -- **Binding**: Only accepts connections from localhost/127.0.0.1 -- **Port**: Fixed port 8976 for consistency -- **Protocol**: HTTP (acceptable for localhost) - -**Request Validation:** -- **Path Validation**: Only `/oauth/callback` path is handled -- **Parameter Validation**: Required OAuth parameters are verified -- **State Validation**: CSRF protection through state parameter - -**Lifecycle Management:** -- **Auto-cleanup**: Server automatically shuts down after callback -- **Timeout Protection**: Configurable timeout (default: 5 minutes) -- **Resource Cleanup**: Proper cleanup of server resources - -## OAuth2 Security (PKCE) - -The gateway implements PKCE (Proof Key for Code Exchange) following RFC 7636: - -### Code Verifier Generation - -**Specifications:** -- **Length**: 128 characters (96 random bytes base64url encoded) -- **Entropy**: Cryptographically secure random generation -- **Format**: Base64url encoding without padding -- **Uniqueness**: New verifier generated for each authentication - -### Code Challenge Generation - -**Process:** -- **Input**: Code verifier string -- **Hashing**: SHA256 hash of the verifier -- **Encoding**: Base64url encoding of the hash -- **Method**: Always uses `S256` (SHA256) - -### State Parameter Security - -**Generation:** -- **Length**: 32 random bytes base64url encoded -- **Purpose**: CSRF protection -- **Validation**: Strict comparison with received state -- **Storage**: Temporarily stored during OAuth flow - -**PKCE Security Benefits:** -- Prevents authorization code interception attacks -- Eliminates need for client secrets in public clients -- Provides cryptographic proof of authorization request origin -- Protects against malicious applications - -## Error Handling Security - -### Secure Error Messages - -The gateway implements secure error handling principles: - -**User-Facing Messages:** -- Generic error descriptions to avoid information disclosure -- Helpful guidance without revealing system internals -- No exposure of tokens, credentials, or sensitive data -- Clear action items for users to resolve issues - -**Error Categories:** -- **Authentication Errors**: Login and token-related issues -- **Network Errors**: Connectivity and communication problems -- **Storage Errors**: Credential storage and retrieval issues -- **Authorization Errors**: Permission and scope-related problems - -### Timeout Protection - -All network operations include timeout protection: - -**Timeout Types:** -- **OAuth Callback**: 5-minute default timeout for user authorization -- **API Requests**: Reasonable timeouts for backend communication -- **Token Refresh**: Quick timeout for refresh operations -- **Browser Opening**: Timeout for automatic browser launch - -**Security Benefits:** -- Prevents indefinite resource consumption -- Limits exposure time for temporary servers -- Provides clear failure modes -- Enables graceful error recovery - -## File System Security - -### Directory Permissions - -The gateway creates secure directories for credential storage: - -**Directory Structure:** -- **Base Directory**: `~/.deploystack/` -- **Permissions**: `0o700` (owner read/write/execute only) -- **Creation**: Automatic creation with secure permissions -- **Platform Compatibility**: Works across Windows, macOS, and Linux - -### File Permissions - -**Credential Files:** -- **Encrypted Credentials**: `0o600` (owner read/write only) -- **Account List**: `0o644` (owner write, others read - no sensitive data) -- **Temporary Files**: Secure permissions and automatic cleanup - -### Secure File Cleanup - -Credential removal includes comprehensive cleanup: - -**Cleanup Process:** -- **Keychain Removal**: Credentials removed from OS keychain -- **File Deletion**: Encrypted files securely deleted -- **Account List**: Account entries removed from tracking -- **Directory Cleanup**: Empty directories removed when appropriate - -**Security Considerations:** -- Multiple cleanup attempts for reliability -- Graceful handling of partial failures -- No sensitive data left in temporary files -- Proper error handling during cleanup - -## Development vs Production Security - -### Environment Detection - -The gateway automatically detects and adapts to different environments: - -**Development Mode Indicators:** -- URLs containing `localhost` -- Non-HTTPS protocols for local servers -- Development-specific configuration options - -**Production Mode Requirements:** -- HTTPS enforcement for all communications -- Strict SSL certificate validation -- Limited error information exposure -- Enhanced security warnings - -### Security Warnings - -The CLI provides appropriate security warnings: - -**Development Warnings:** -- Alerts when connecting to non-production servers -- Warnings about HTTP usage in development -- Reminders about development-only features - -**Production Safeguards:** -- Blocks insecure connections -- Validates server certificates -- Limits debug information exposure - -## Security Best Practices - -### 1. Credential Protection -- Never log credentials or tokens in plain text -- Use OS keychain as primary storage mechanism -- Encrypt fallback storage with strong encryption -- Restrict file permissions to owner-only access -- Implement secure credential cleanup - -### 2. Network Security -- Enforce HTTPS in production environments -- Validate SSL certificates strictly -- Use secure headers in all requests -- Implement proper request timeouts -- Handle network errors gracefully - -### 3. OAuth2 Security -- Always use PKCE for authorization code flow -- Validate state parameters to prevent CSRF attacks -- Use cryptographically secure random values -- Implement proper token refresh logic -- Handle authorization errors appropriately - -### 4. Error Handling -- Avoid exposing sensitive data in error messages -- Log detailed errors for debugging (server-side only) -- Provide helpful user guidance without revealing internals -- Implement proper timeout handling -- Use structured error codes for programmatic handling - -### 5. Process Security -- Exit cleanly after operations complete -- Clean up temporary resources properly -- Handle interruption signals gracefully -- Validate all user inputs -- Implement proper resource management - -For OAuth2 flow details and implementation specifics, see the [Gateway OAuth Guide](/development/gateway/oauth). - -## Security Auditing - -### Credential Audit - -**File System Checks:** -- Verify credential directory permissions (`~/.deploystack/`) -- Check encrypted file permissions (`credentials.enc`) -- Validate account list format (`accounts.json`) - -**Keychain Verification:** -- Check for stored credentials in OS keychain -- Verify service name and account identifiers -- Validate keychain access permissions - -### Network Security Audit - -**Connection Monitoring:** -- Monitor HTTPS usage in production -- Verify SSL certificate validation -- Check for secure header usage - -**Certificate Validation:** -- Verify SSL certificate chains -- Check certificate expiration dates -- Validate certificate authority trust - -### Security Monitoring - -**Authentication Events:** -- Monitor login success and failure rates -- Track token refresh patterns -- Identify unusual authentication behavior - -**Error Analysis:** -- Review authentication error patterns -- Monitor network connectivity issues -- Analyze credential storage problems - -The gateway's security implementation follows industry best practices and provides multiple layers of protection for user credentials and system integrity. diff --git a/_DEPRECATED/gateway/session-management.mdx b/_DEPRECATED/gateway/session-management.mdx deleted file mode 100644 index 36d2e95..0000000 --- a/_DEPRECATED/gateway/session-management.mdx +++ /dev/null @@ -1,320 +0,0 @@ ---- -title: Session Management -description: Cryptographically secure session lifecycle management for SSE and Streamable HTTP connections -sidebar: Session Management -icon: Key ---- - -import { Card, Cards } from 'fumadocs-ui/components/card'; -import { Key, Clock, Shield, Trash2 } from 'lucide-react'; - -# Session Management - -The DeployStack Gateway implements a robust session management system that provides cryptographically secure session handling for both persistent SSE connections and optional Streamable HTTP sessions while ensuring automatic cleanup and resource management. - -## Architecture Overview - -The session management system consists of multiple components working together to provide secure connections across different transport protocols: - -- **SessionManager**: Handles session lifecycle, validation, and SSE stream management -- **SSEHandler**: Manages Server-Sent Events connections and message routing -- **StreamableHTTPHandler**: Manages Streamable HTTP connections with optional session support -- **Transport Layer**: Intelligent routing between SSE and Streamable HTTP based on client capabilities - -## Core Components - - - } - title="Cryptographic Security" - > - 256-bit entropy session IDs with base64url encoding for maximum security - - - } - title="Lifecycle Management" - > - Automatic session creation, validation, activity tracking, and timeout handling - - - } - title="Connection Validation" - > - Session-bound SSE streams with comprehensive validation and error handling - - - } - title="Automatic Cleanup" - > - Resource cleanup on disconnect, timeout, or error conditions - - - -## Session ID Generation - -### Cryptographic Properties -- **Algorithm**: Node.js `crypto.randomBytes(32)` -- **Entropy**: 256 bits (32 bytes) of cryptographically secure randomness -- **Encoding**: Base64url for URL safety and compatibility -- **Format**: `L8B-xaw3HBZEftyo-JCrHoGWb_iikRZiwGfp9B71-GA` - -### Security Features -- **Unpredictability**: Cryptographically secure random number generation -- **Collision Resistance**: 2^256 possible values make collisions virtually impossible -- **URL Safety**: Base64url encoding ensures compatibility in query parameters -- **No Sequential Patterns**: Each session ID is completely independent - -### Validation Process -```typescript -private validateSessionId(sessionId: string): boolean { - if (!sessionId || typeof sessionId !== 'string') return false; - if (sessionId.length < 32) return false; - if (!/^[A-Za-z0-9_-]+$/.test(sessionId)) return false; - return true; -} -``` - -## Session Lifecycle - -### 1. Creation Phase -**Triggers**: -- SSE connection establishment via `GET /sse` -- Optional session creation for Streamable HTTP via `POST /mcp` with session headers - -**Process:** -1. Generate cryptographically secure session ID -2. Create session object with metadata -3. Associate with SSE stream (for SSE transport) or track session state (for Streamable HTTP) -4. Schedule automatic cleanup timer -5. Send endpoint event to client (SSE) or return session headers (Streamable HTTP) - -**Session Object:** -```typescript -interface SessionInfo { - id: string; - createdAt: number; - lastActivity: number; - sseStream: ServerResponse; - clientInfo?: { name: string; version: string }; - mcpInitialized: boolean; - requestCount: number; - errorCount: number; -} -``` - -### 2. Active Phase -**Duration**: Until timeout or disconnect - -**Activities:** -- **Activity Tracking**: Updated on every JSON-RPC request -- **Request Counting**: Incremented for each message processed -- **Error Tracking**: Incremented on processing failures -- **Client Info Storage**: MCP client metadata stored during initialization - -### 3. Cleanup Phase -**Triggers:** -- Client disconnect (`close` event) -- Connection error (`error` event) -- Stream finish (`finish` event) -- 30-minute inactivity timeout - -**Process:** -1. Close SSE stream if still open -2. Remove session from active sessions map -3. Log cleanup completion -4. Free associated resources - -## Connection Management - -### SSE Stream Handling -The session manager maintains direct references to SSE streams for efficient message delivery: - -```typescript -sendToSession(sessionId: string, event: { id?: string; event?: string; data: string }): boolean { - const session = this.sessions.get(sessionId); - if (!session || session.sseStream.destroyed) { - return false; - } - - try { - let sseData = ''; - if (event.id) sseData += `id: ${event.id}\n`; - if (event.event) sseData += `event: ${event.event}\n`; - sseData += `data: ${event.data}\n\n`; - - session.sseStream.write(sseData); - return true; - } catch (error) { - this.cleanupSession(sessionId); - return false; - } -} -``` - -### Connection State Tracking -- **Stream Health**: Monitors SSE stream status and handles disconnects -- **Activity Monitoring**: Tracks last activity timestamp for timeout detection -- **Error Handling**: Graceful handling of connection failures and cleanup -- **Resource Management**: Prevents memory leaks through automatic cleanup - -## Security Considerations - -### Session Security -- **Unpredictable IDs**: Impossible to guess or enumerate session IDs -- **Time-Limited**: Automatic expiration prevents indefinite access -- **Connection-Bound**: Sessions tied to specific SSE connections -- **Validation**: Comprehensive validation on every request - -### Timeout Management -- **Inactivity Timeout**: 30 minutes of inactivity triggers cleanup -- **Automatic Scheduling**: Cleanup scheduled at session creation -- **Activity Extension**: Timeout reset on each valid request -- **Resource Protection**: Prevents accumulation of stale sessions - -### Error Handling -- **Graceful Degradation**: Connection errors don't crash the system -- **Automatic Recovery**: Failed connections cleaned up automatically -- **Error Isolation**: Session errors don't affect other sessions -- **Logging**: Comprehensive error logging for debugging - -## Performance Optimization - -### Memory Management -- **Efficient Storage**: Sessions stored in Map for O(1) lookup -- **Automatic Cleanup**: Prevents memory leaks through timeout handling -- **Resource Tracking**: Monitors session count and resource usage -- **Garbage Collection**: Proper cleanup enables efficient garbage collection - -### Connection Efficiency -- **Persistent Connections**: SSE streams maintained for duration of session -- **Minimal Overhead**: Lightweight session objects with essential data only -- **Fast Lookup**: Session validation and retrieval optimized for speed -- **Batch Operations**: Efficient handling of multiple concurrent sessions - -## Monitoring and Debugging - -### Session Statistics -The session manager provides comprehensive statistics for monitoring: - -```typescript -getStatus() { - return { - activeCount: this.sessions.size, - sessions: Array.from(this.sessions.values()).map(session => ({ - id: session.id, - createdAt: session.createdAt, - lastActivity: session.lastActivity, - uptime: Date.now() - session.createdAt, - requestCount: session.requestCount, - errorCount: session.errorCount, - clientInfo: session.clientInfo, - mcpInitialized: session.mcpInitialized - })) - }; -} -``` - -### Logging and Observability -- **Session Creation**: Logged with session ID for tracking -- **Activity Updates**: Request and error counts tracked -- **Cleanup Events**: Cleanup reasons and timing logged -- **Error Conditions**: Detailed error logging for troubleshooting - -## Transport-Specific Session Handling - -### SSE Transport Sessions -SSE transport requires persistent sessions for connection management: - -- **Mandatory Sessions**: All SSE connections must have associated sessions -- **Stream Binding**: Sessions are bound to specific SSE streams -- **Real-time Communication**: Messages sent via SSE events in real-time -- **Connection Lifecycle**: Session lifecycle tied to SSE connection state - -### Streamable HTTP Transport Sessions -Streamable HTTP transport supports optional sessions for enhanced functionality: - -- **Optional Sessions**: Sessions can be used but are not required -- **Stateless Operation**: Supports both stateless and session-based operation -- **Header-Based**: Session IDs passed via `Mcp-Session-Id` header -- **Flexible Lifecycle**: Sessions can span multiple HTTP requests - -## Integration Points - -### SSE Handler Integration -The session manager works closely with the SSE handler: - -```typescript -// Session creation during SSE establishment -const sessionId = this.sessionManager.createSession(reply.raw); - -// Message routing through sessions -this.sseHandler.sendMessage(sessionId, response); - -// Error handling via sessions -this.sseHandler.sendError(sessionId, errorResponse); -``` - -### Streamable HTTP Handler Integration -The session manager provides optional session support for Streamable HTTP: - -```typescript -// Optional session validation for Streamable HTTP -const sessionId = request.headers['mcp-session-id']; -if (sessionId) { - const session = this.sessionManager.getSession(sessionId); - if (session) { - this.sessionManager.updateActivity(sessionId); - } -} - -// Stateless operation when no session provided -if (!sessionId) { - // Handle request without session context -} -``` - -### HTTP Proxy Integration -Session validation across both transports in the HTTP proxy: - -```typescript -// Transport-aware session handling -if (isSSETransport) { - // SSE requires session validation - const session = this.sessionManager.getSession(sessionId); - if (!session) { - throw new Error('Invalid session for SSE transport'); - } - this.sessionManager.updateActivity(sessionId); -} else if (isStreamableHTTP && sessionId) { - // Streamable HTTP optional session support - const session = this.sessionManager.getSession(sessionId); - if (session) { - this.sessionManager.updateActivity(sessionId); - } -} -``` - -## Best Practices - -### Session Lifecycle -- **Immediate Creation**: Sessions created immediately on SSE connection -- **Activity Tracking**: Update activity on every valid request -- **Graceful Cleanup**: Always clean up resources on session end -- **Error Handling**: Handle all error conditions gracefully - -### Security Practices -- **Validate Always**: Validate session ID on every request -- **Time Limits**: Enforce reasonable session timeouts -- **Resource Limits**: Monitor and limit concurrent sessions if needed -- **Audit Trail**: Log session activities for security monitoring - -### Performance Practices -- **Efficient Lookup**: Use Map for O(1) session lookup -- **Minimal Data**: Store only essential session data -- **Cleanup Scheduling**: Schedule cleanup to prevent resource leaks -- **Error Recovery**: Implement robust error recovery mechanisms - -The session management system provides a secure, efficient, and robust foundation for persistent SSE connections while maintaining enterprise-grade security and operational requirements. diff --git a/_DEPRECATED/gateway/sse-transport.mdx b/_DEPRECATED/gateway/sse-transport.mdx deleted file mode 100644 index 64f7ec6..0000000 --- a/_DEPRECATED/gateway/sse-transport.mdx +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: SSE Transport Implementation -description: Server-Sent Events transport layer for VS Code compatibility and dual-endpoint architecture -sidebar: SSE Transport -icon: Radio ---- - -import { Card, Cards } from 'fumadocs-ui/components/card'; -import { Radio, MessageSquare, Shield, Zap } from 'lucide-react'; - -# SSE Transport Implementation - -The DeployStack Gateway implements Server-Sent Events (SSE) transport to provide VS Code compatibility through a clean dual-endpoint architecture. - -## Architecture Overview - -The Gateway uses a **dual-endpoint architecture** for SSE-based communication: - -- **GET /sse**: Establishes SSE connection and returns session endpoint -- **POST /message**: Handles JSON-RPC requests with session context - -## Core Components - - - } - title="SSE Handler" - > - Manages Server-Sent Events connections, event formatting, and message routing - - - } - title="Session Manager" - > - Handles cryptographically secure session lifecycle with automatic cleanup - - - } - title="Dual Endpoints" - > - Supports both SSE and traditional HTTP clients with intelligent routing - - - } - title="Real-time Communication" - > - Persistent connections enable real-time bidirectional communication - - - -## Connection Flow - -### 1. SSE Connection Establishment -```http -GET /sse HTTP/1.1 -Accept: text/event-stream -``` - -**Response:** -``` -HTTP/1.1 200 OK -Content-Type: text/event-stream -Cache-Control: no-cache -Connection: keep-alive - -event: endpoint -data: /message?session=L8B-xaw3HBZEftyo-JCrHoGWb_iikRZiwGfp9B71-GA -``` - -### 2. Session-Based JSON-RPC -```http -POST /message?session=L8B-xaw3HBZEftyo-JCrHoGWb_iikRZiwGfp9B71-GA -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "id": 1, - "method": "initialize", - "params": { - "clientInfo": {"name": "vscode", "version": "1.0.0"}, - "protocolVersion": "2025-03-26" - } -} -``` - -**HTTP Response:** -```json -{"status": "accepted", "messageId": 1} -``` - -**SSE Response:** -``` -id: msg-1753710728979-95czkmmq8 -event: message -data: {"jsonrpc":"2.0","id":1,"result":{"serverInfo":{"name":"deploystack-gateway","version":"1.0.0"},"protocolVersion":"2025-03-26","capabilities":{"tools":{"listChanged":false}}}} -``` - -## Session Management - -### Session ID Generation -- **Algorithm**: Cryptographically secure random bytes (32 bytes = 256 bits) -- **Encoding**: Base64url for URL safety -- **Format**: `L8B-xaw3HBZEftyo-JCrHoGWb_iikRZiwGfp9B71-GA` -- **Validation**: Length and character set validation - -### Session Lifecycle -1. **Creation**: Generated on SSE connection establishment -2. **Validation**: Verified on each JSON-RPC request -3. **Activity Tracking**: Updated on every message -4. **Timeout**: 30-minute inactivity timeout -5. **Cleanup**: Automatic resource cleanup on disconnect - -### Security Features -- **Cryptographic Security**: 256-bit entropy prevents session prediction -- **Automatic Expiration**: Sessions expire after 30 minutes of inactivity -- **Connection Validation**: Session tied to specific SSE stream -- **Resource Cleanup**: Automatic cleanup prevents memory leaks - -## Message Routing - -### Supported Methods -The SSE transport handles all standard MCP protocol methods: - -- **initialize**: Gateway initialization with capabilities -- **notifications/initialized**: Client initialization confirmation -- **tools/list**: Returns available MCP servers as toggleable tools -- **tools/call**: Executes MCP server management actions -- **resources/list**: Returns empty resources (handled locally) -- **resources/templates/list**: Returns empty templates (handled locally) -- **prompts/list**: Returns empty prompts (handled locally) - -### Error Handling -Errors are sent via SSE with proper JSON-RPC error format: - -``` -id: err-1753710744580-061x9gi8x -event: error -data: {"jsonrpc":"2.0","error":{"code":-32603,"message":"Internal server error","data":"Server not available"},"id":2} -``` - -## VS Code Integration - -### Expected Client Behavior -1. **Connection**: Client connects to `http://localhost:9095/sse` via SSE -2. **Endpoint Discovery**: Receives session endpoint via `endpoint` event -3. **Initialization**: Sends `initialize` request to session endpoint -4. **Tool Discovery**: Calls `tools/list` to discover available MCP servers -5. **Tool Management**: Uses `tools/call` to enable/disable/status MCP servers - -### Configuration -VS Code MCP client configuration: -```json -{ - "mcpServers": { - "deploystack": { - "url": "http://localhost:9095/sse" - } - } -} -``` - -## Performance Considerations - -### Connection Management -- **Keep-Alive**: Persistent SSE connections reduce connection overhead -- **Heartbeat**: Optional heartbeat messages maintain connection health -- **Timeout Handling**: Automatic cleanup prevents resource exhaustion - -### Memory Management -- **Session Cleanup**: Automatic cleanup on disconnect or timeout -- **Stream Management**: Proper SSE stream lifecycle management -- **Error Recovery**: Graceful handling of connection failures - -### Client Detection -The Gateway detects SSE clients based on: -- **Accept Header**: `text/event-stream` indicates SSE client -- **User-Agent**: VS Code, Cursor, or other MCP clients -- **Request Method**: GET for SSE establishment, POST for session-based messaging - -## Implementation Details - -### SSE Event Format -All SSE events follow this structure: -``` -id: -event: -data: - -``` - -### Event Types -- **endpoint**: Session endpoint URL -- **message**: JSON-RPC response -- **error**: JSON-RPC error response -- **notification**: Server notifications - -### Connection Cleanup -Cleanup triggers include: -- Client disconnect (`close` event) -- Connection error (`error` event) -- Stream finish (`finish` event) -- Session timeout (30 minutes) - -## Security Considerations - -### Session Security -- **Unpredictable IDs**: Cryptographically secure generation -- **Time-Limited**: Automatic expiration prevents indefinite access -- **Connection-Bound**: Sessions tied to specific SSE connections - -### Network Security -- **Localhost Only**: Server binds only to localhost interface -- **No External Access**: No exposure to external networks -- **CORS Configuration**: Restricted to authorized origins - -The SSE transport implementation provides a robust, secure, and performant foundation for VS Code integration with clean dual-endpoint architecture. diff --git a/_DEPRECATED/gateway/structure.mdx b/_DEPRECATED/gateway/structure.mdx deleted file mode 100644 index cae4dd9..0000000 --- a/_DEPRECATED/gateway/structure.mdx +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: Gateway Project Structure -description: Directory structure and architecture of the DeployStack Gateway CLI -sidebar: Project Structure -icon: FolderTree ---- - -# Gateway Project Structure - -The DeployStack Gateway is structured as a TypeScript CLI application using Commander.js with a modular architecture designed for maintainability and extensibility. - -## Directory Overview - -```bash -services/gateway/ -├── src/ # Source code -│ ├── index.ts # CLI entry point and command registration -│ ├── commands/ # Command implementations -│ │ ├── login.ts # Authentication with cloud.deploystack.io -│ │ ├── start.ts # Start the gateway server -│ │ ├── refresh.ts # Root-level refresh command -│ │ └── ... # Other CLI commands -│ ├── core/ # Core business logic -│ │ ├── auth/ # Authentication handling -│ │ ├── server/ # HTTP proxy server with SSE support -│ │ ├── process/ # MCP process management -│ │ ├── mcp/ # MCP configuration management -│ │ └── config/ # Configuration utilities -│ ├── services/ # Shared business services -│ │ ├── refresh-service.ts # Shared MCP configuration refresh logic -│ │ ├── server-start-service.ts # Centralized server startup logic -│ │ └── ... # Other shared services -│ ├── utils/ # Shared utilities -│ │ ├── logger.ts # Centralized logging -│ │ └── ... # Other utilities -│ └── types/ # TypeScript type definitions -├── bin/gateway.js # Executable entry point -├── dist/ # Compiled JavaScript (gitignored) -├── tests/ # Test suite -├── package.json # Dependencies and scripts -├── tsconfig.json # TypeScript configuration -└── README.md # Gateway-specific documentation -``` - -## Key Design Decisions - -### Modular Architecture -The codebase is organized into distinct modules: -- **Commands**: User-facing CLI commands -- **Core**: Business logic separated by domain -- **Services**: Shared business services for cross-command functionality -- **Utils**: Reusable utilities and helpers - -### Process Management -The `process/` module handles the complexity of: -- Managing persistent background MCP server processes -- Runtime state tracking and team isolation -- Managing stdio communication with running processes -- Injecting environment variables securely at startup -- Graceful process lifecycle management following MCP protocol - -### Security First -- Credentials are never stored in plain text -- All sensitive data is encrypted at rest -- Environment injection happens at runtime only - -### Developer Experience -- Intuitive command structure (`deploystack login`, `deploystack start`, `deploystack mcp`) -- Rich CLI feedback with colors and progress indicators -- Clear error messages with actionable solutions -- MCP server management and tool discovery capabilities - -## Module Responsibilities - -### Commands Layer -Each command file exports a function that registers itself with Commander.js: -```typescript -export function registerLoginCommand(program: Command) { - program - .command('login') - .description('Authenticate with DeployStack cloud') - .action(async () => { - // Implementation - }); -} -``` - -### Core Modules - -**auth/**: Handles OAuth flow and token management -- Secure storage of access tokens -- Automatic token refresh -- Session management - -**server/**: HTTP proxy server with dual transport support -- **proxy.ts**: Dual-endpoint routing (GET /sse for SSE connections, POST /message for session-based JSON-RPC) -- **session-manager.ts**: Cryptographically secure session lifecycle management -- **sse-handler.ts**: Server-Sent Events implementation for VS Code compatibility - -**process/**: MCP server process lifecycle -- Persistent background process management -- Runtime state tracking with team isolation -- Stdio transport implementation for continuous communication -- Graceful lifecycle management following MCP protocol -- Enterprise management layer (MCP servers as toggleable tools) - -**mcp/**: Configuration management and processing -- Team configuration synchronization with cloud control plane -- Raw API data storage and processed config generation -- Secure credential injection and environment variable management -- MCP server tool discovery and capability exploration -- Team-aware tool caching system as detailed in [Caching System](/development/gateway/caching-system) -- Installation method processing for correct server spawning - -**services/**: Shared business services for cross-command functionality -- **refresh-service.ts**: Centralized MCP configuration refresh logic used by both `deploystack refresh` and `deploystack mcp --refresh` commands -- Eliminates code duplication while maintaining identical behavior across commands -- Provides consistent error handling and user feedback - -**utils/**: Shared utilities and centralized services -- **tool-discovery-manager.ts**: Centralized tool discovery eliminating code duplication across commands -- Logging, configuration, and encryption utilities -- Progress indicators and error handling - -**config/**: Configuration utilities and defaults -- Default gateway settings and validation -- Configuration file management -- Environment-specific overrides - -### Build Output -The TypeScript code is compiled to CommonJS for maximum compatibility: -- Source maps for debugging -- Minified for production -- External dependencies preserved diff --git a/_DEPRECATED/gateway/teams.mdx b/_DEPRECATED/gateway/teams.mdx deleted file mode 100644 index b31e18a..0000000 --- a/_DEPRECATED/gateway/teams.mdx +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: Team Context in Gateway CLI -description: Understanding team-scoped operations and MCP server installations in the DeployStack Gateway CLI -sidebar: Team Context -icon: Users ---- - -# Team Context in Gateway CLI - -The DeployStack Gateway CLI is fundamentally **team-centric**. All MCP server installations and operations are scoped to the currently selected team, reflecting the architectural design where teams serve as isolated workspaces for deployment resources. - -## Team Selection Architecture - -### Secure Storage Location - -Team selection is stored securely alongside authentication credentials using: -- **Primary**: OS keychain (macOS Keychain, Windows Credential Manager, Linux Secret Service) -- **Fallback**: Encrypted file at `~/.deploystack/credentials.enc` - -The selected team information is part of the `StoredCredentials` interface: - -```typescript -interface StoredCredentials { - // ... other credential fields - selectedTeam?: { - id: string; // Team ID for API operations - name: string; // Team name for display - }; -} -``` - -### Automatic Default Selection - -When users authenticate via `deploystack login`, the CLI automatically: - -1. Fetches user's teams from `/api/teams/me` -2. Identifies the default team (`is_default: true`) -3. Sets it as the selected team in secure storage -4. Confirms selection to the user - -### Team Switching - -Users can change their active team context using: - -```bash -deploystack teams --switch -``` - -This updates the stored team selection, affecting all subsequent CLI operations. - -## MCP Server Installation Scope - -### Database Architecture - -MCP server installations are stored in the `mcpServerInstallations` table with team-based foreign keys: - -```sql -mcpServerInstallations: - - team_id (FK to teams.id) -- Scopes installation to specific team - - server_id (FK to mcpServers.id) -- References the MCP server definition - - user_environment_variables -- Team-specific encrypted credentials -``` - -### Team-Scoped Operations - -All MCP-related CLI operations operate within the selected team context: - -- **Credential Injection**: Environment variables are team-specific -- **Server Availability**: Only team's installed servers are accessible -- **Configuration Sync**: Gateway downloads only selected team's configurations -- **Process Management**: Spawned MCP processes use team-scoped credentials - -> **MCP Configuration Management**: For detailed information about how the Gateway downloads, processes, and stores MCP server configurations from the backend API, see the [Gateway MCP Configuration documentation](/development/gateway/mcp). - -### Cross-Team Isolation - -The architecture ensures complete isolation between teams: - -- Team A cannot access Team B's MCP server installations -- Credentials are encrypted per team context -- No cross-team data leakage in local processes - -## CLI Implementation Details - -### Storage Methods - -The `CredentialStorage` class provides team selection methods: - -- `updateSelectedTeam(teamId, teamName)` - Updates selected team -- `getSelectedTeam()` - Retrieves current selection -- Team data is persisted with other authentication credentials - -### Team-Aware Commands - -Key commands that depend on team context: - -- `deploystack start` - Starts gateway for selected team's MCP servers -- `deploystack teams` - Shows selection status and switching options -- Future MCP management commands will operate on selected team - -### API Integration - -Team context affects backend communication: - -- All MCP-related API calls include team context -- Configuration sync requests are team-scoped -- Credential retrieval is filtered by team membership - -## Developer Guidelines - -### Working with Team Context - -When developing CLI features that interact with MCP servers: - -1. **Always check team selection** before MCP operations -2. **Use team ID for API calls** (not just team name) -3. **Scope local storage** by team when caching configurations -4. **Validate team access** before exposing functionality - -### Future Considerations - -The team context system is designed to support: - -- Multi-team development workflows -- Team-specific MCP server catalogs -- Role-based access to different tool sets -- Enterprise governance and audit trails - -For complete team management information, see the [Teams documentation](/teams). - -## Error Handling - -CLI commands should gracefully handle team context issues: - -- **No team selected**: Prompt user to select a team -- **Invalid team**: Guide user to available teams -- **Team access revoked**: Require re-authentication -- **Team deleted**: Clear selection and prompt for new team - -This team-centric design ensures that the Gateway CLI operates as a secure, isolated workspace aligned with organizational boundaries while maintaining a smooth developer experience. diff --git a/_DEPRECATED/gateway/tech-stack.mdx b/_DEPRECATED/gateway/tech-stack.mdx deleted file mode 100644 index 1564320..0000000 --- a/_DEPRECATED/gateway/tech-stack.mdx +++ /dev/null @@ -1,264 +0,0 @@ ---- -title: Gateway Tech Stack -description: CLI framework and npm packages used in the DeployStack Gateway -sidebar: Tech Stack -icon: Package ---- - -# Gateway Tech Stack - -The DeployStack Gateway is built with a carefully selected set of Node.js packages that prioritize developer experience, security, and performance. - -## Core Framework - -### Commander.js -Our CLI framework of choice for building the gateway's command-line interface. - -**Why Commander?** -- Battle-tested by major CLIs (Vue CLI, Create React App) -- Excellent TypeScript support -- Simple yet powerful API -- Extensive documentation and community - -### Fastify -High-performance HTTP server framework for the proxy server implementation. - -**Why Fastify?** -- Excellent TypeScript support with built-in type definitions -- High performance with low overhead -- Rich plugin ecosystem for middleware -- Built-in JSON schema validation -- Comprehensive logging and error handling - -## UI and Feedback - -### Chalk -Terminal string styling for colorful and readable output. - -**Features:** -- Semantic color methods for different message types -- Support for 256 colors and Truecolor -- Auto-detects color support -- Respects NO_COLOR environment variable - -### Ora -Elegant terminal spinners for long-running operations. - -**Use Cases:** -- Authentication flows -- Configuration syncing -- Process spawning feedback -- Network operations - -### CLI-Progress -Customizable progress bars for detailed operation feedback. - -**Features:** -- Single and multi-bar support -- Customizable formats and styles -- Ideal for file operations and bulk processing - -## Interactive Components - -### Inquirer.js -Interactive command line prompts for user input. - -**Prompt Types:** -- Text input for credentials -- Password input with masking -- Selection lists for configuration options -- Confirmations for destructive operations - -## Development Tools - -### TypeScript -Full TypeScript support for type safety and better developer experience. - -**Benefits:** -- Type safety catches errors at compile time -- Better IDE support with autocomplete -- Self-documenting code through types -- Easier refactoring - -### tsx -Run TypeScript files directly without compilation during development. - -### Build Tool - tsup -Fast TypeScript bundler powered by esbuild. - -**Why tsup?** -- Lightning fast builds using esbuild -- Zero config with sensible defaults -- Built-in TypeScript support -- Generates CommonJS and ESM outputs - -**Configuration Example:** -```typescript -export default defineConfig({ - entry: ['src/index.ts'], - format: ['cjs'], - target: 'node16', - clean: true, - sourcemap: true, -}); -``` - -## Utility Libraries - -### File System Operations - -**fs-extra** -Enhanced file system module with promise support and extra methods. -- Includes all standard fs methods -- Adds useful methods like `copy`, `remove`, `ensureDir` -- Promise-based API for cleaner async code -- Essential for team-aware tool caching system - -**glob** -File pattern matching using shell-style wildcards. -- Find files matching patterns like `*.ts` or `src/**/*.js` -- Essential for batch operations - -### Process Management - -**execa** -Better child process execution for spawning MCP servers. -- Improved error handling -- Promise-based interface -- Better Windows support -- Automatic escaping of arguments - -**ps-tree** -Process tree management for proper cleanup. -- Find all child processes of a parent -- Ensure clean shutdown of spawned MCP servers - -### Configuration - -**cosmiconfig** -Flexible configuration file loader. -- Searches for config in multiple formats (.json, .yml, .js) -- Supports `.deploystackrc`, `deploystack.config.js`, package.json -- Follows common patterns used by ESLint, Prettier, etc. - -**dotenv** -Environment variable loading from .env files. -- Load configuration from `.env` files -- Support for different environments (.env.local, .env.production) - -### Security - -**keytar** -Native OS keychain integration for secure credential storage. -- macOS: Keychain Access -- Windows: Credential Manager -- Linux: Secret Service API -- No plain text passwords on disk - -**crypto-js** -Additional encryption for sensitive data. -- AES encryption for config files -- Secure hashing for verification - -**crypto (Node.js built-in)** -Native cryptographic functionality for session management. -- Cryptographically secure random bytes generation -- Session ID generation with 256-bit entropy -- Base64url encoding for URL-safe session identifiers - -### Networking - -**axios** -Feature-rich HTTP client for cloud API communication. -- Interceptors for auth token injection -- Automatic retry logic -- Request/response transformation - -**http-proxy** -HTTP proxy for routing MCP requests to appropriate servers. -- Route requests based on MCP server name -- Inject authentication headers -- Handle stdio-to-HTTP translation - -## Testing Stack - -**vitest** -Fast unit testing framework with native TypeScript support. -- Compatible with Jest API -- Built-in TypeScript support -- Extremely fast execution - -**supertest** -HTTP assertion library for testing the proxy server. -- Test HTTP endpoints -- Assert response status, headers, and body -- Works seamlessly with vitest - -**msw (Mock Service Worker)** -API mocking for integration tests. -- Mock cloud API responses -- Test error scenarios -- Intercept HTTP requests - -## Why This Stack? - -### 1. **Developer Experience** -- Commander provides intuitive command structure -- Chalk + Ora + CLI-Progress create rich, informative output -- TypeScript ensures type safety and better IDE support - -### 2. **Security First** -- Keytar integrates with OS keychains (macOS Keychain, Windows Credential Manager, Linux Secret Service) -- Crypto-js for additional encryption layers -- No plain text credential storage - -### 3. **Performance** -- tsup/esbuild for fast builds -- Minimal dependencies for quick startup -- Lazy loading of heavy operations - -### 4. **Cross-Platform** -- All packages support Windows, macOS, and Linux -- Platform-specific features handled gracefully - -### 5. **Enterprise Ready** -- Comprehensive error handling -- Detailed logging capabilities -- Extensible architecture - -## Installation - -All dependencies are managed through npm: - -```bash -cd services/gateway -npm install -``` - -## Development Workflow - -```bash -# Development with hot reload -npm run dev - -# Run TypeScript directly -npm run start:dev - -# Build for production -npm run build - -# Run tests -npm test -``` - -## Package Selection Criteria - -When adding new packages, we consider: - -1. **Security**: Regular updates, no known vulnerabilities -2. **Maintenance**: Active development, responsive maintainers -3. **Size**: Minimal impact on CLI startup time -4. **Compatibility**: Works across all target platforms -5. **TypeScript**: First-class TypeScript support preferred - -This tech stack provides a solid foundation for building a secure, performant, and user-friendly CLI that meets enterprise requirements while maintaining excellent developer experience. diff --git a/_DEPRECATED/gateway/testing.mdx b/_DEPRECATED/gateway/testing.mdx deleted file mode 100644 index ef12d8b..0000000 --- a/_DEPRECATED/gateway/testing.mdx +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Gateway Testing -description: Testing commands and workflows for the DeployStack Gateway -sidebar: Testing -icon: TestTube ---- - -# Gateway Testing - -The DeployStack Gateway includes testing infrastructure for ensuring reliability and quality of the CLI application. - -## Test Commands - -### Unit Tests -```bash -npm run test:unit -``` -Currently displays a placeholder message as tests are not yet implemented. - -### Linting -```bash -npm run lint -``` -Runs ESLint with automatic fixing of common issues. Essential for maintaining code quality. - -### Build Verification -```bash -npm run build -``` -Compiles TypeScript to JavaScript and verifies the build process. - -## Development Workflow - -### Local Development -```bash -npm run dev -``` -Starts the gateway in development mode with hot reload using `ts-node-dev`. - -### Manual Testing -```bash -npm run link -``` -Links the local gateway for testing CLI commands globally. - -After linking, test commands directly: -```bash -deploystack version -deploystack status -deploystack --help -``` - -## Release Testing - -### Pre-release Checks -```bash -npm run release -``` -Runs linting checks before creating a release through `release-it`. - -### CI/CD Testing -The GitHub Actions workflow automatically runs: -- Build verification -- Linting checks -- Unit tests (when implemented) - -## Testing Strategy - -### CLI-Specific Testing -- **Command validation**: Ensure all commands parse correctly -- **Output formatting**: Verify chalk styling and user messages -- **Error handling**: Test failure scenarios and exit codes -- **Cross-platform**: Validate behavior on Windows, macOS, and Linux - -### Integration Points -- **Authentication flows**: Test login/logout workflows -- **Configuration management**: Verify config file operations -- **Process management**: Test MCP server spawning and cleanup -- **Proxy functionality**: Validate HTTP proxy routing - -## Future Testing Implementation - -The gateway will include comprehensive testing using: -- **vitest** for unit testing -- **supertest** for HTTP endpoint testing -- **msw** for API mocking -- Cross-platform testing in CI/CD - -## Development Tips - -### Quick Validation -```bash -# Check command structure -deploystack --help - -# Verify version info -deploystack version - -# Test error handling -deploystack invalid-command -``` - -### Build and Test Cycle -```bash -npm run lint # Fix code style issues -npm run build # Verify compilation -npm run link # Test locally -``` - -This testing approach ensures the gateway maintains high quality while remaining focused on the essential CLI functionality. \ No newline at end of file diff --git a/app/[[...slug]]/page.tsx b/app/[[...slug]]/page.tsx deleted file mode 100644 index 8984065..0000000 --- a/app/[[...slug]]/page.tsx +++ /dev/null @@ -1,129 +0,0 @@ -import type { Metadata } from 'next'; -import { DocsLayout } from 'fumadocs-ui/layouts/docs'; -import { DocsPage, DocsBody } from 'fumadocs-ui/page'; -import { notFound } from 'next/navigation'; -import { source } from '@/lib/source'; -import { generatePageMetadata, getCanonicalUrl } from '@/lib/seo-utils'; -import { getFinalPageTitle } from '@/lib/h1-extractor'; -import { readFile } from 'fs/promises'; -import { getMDXComponents } from '@/mdx-components'; -import { docsOptions } from '../layout.config'; -import { generateTechArticleSchema, generateBreadcrumbSchema, combineSchemas } from '@/lib/structured-data'; - -export default async function Page({ - params, -}: { - params: Promise<{ slug?: string[] }>; -}) { - const { slug } = await params; - const page = source.getPage(slug); - - if (!page) { - notFound(); - } - - const MDX = page.data.body; - - // Generate structured data for all pages with content - let structuredData = ''; - if (slug && slug.length > 0) { - const slugString = slug.join('/'); - const url = `https://deploystack.io/docs/${slugString}`; - - // Get the final title (same logic as in generateMetadata) - let finalTitle = page.data.title; - try { - const filePath = page.file.path; - const absolutePath = `./docs/${filePath}`; - const rawContent = await readFile(absolutePath, 'utf-8'); - finalTitle = getFinalPageTitle(rawContent, page.data.title); - } catch (error) { - finalTitle = page.data.title; - } - - const articleSchema = generateTechArticleSchema({ - title: finalTitle, - description: page.data.description, - slug, - url, - }); - - const breadcrumbSchema = generateBreadcrumbSchema(slug); - structuredData = combineSchemas(articleSchema, breadcrumbSchema); - } - - // Always use the unified source pageTree that includes all sections - // Instead of switching between different trees, show all sections together - const pageTree = source.pageTree; - - // Always use DocsLayout with sidebar for all pages including root - return ( - <> - {structuredData && ( -