diff --git a/AGENTS.md b/AGENTS.md index a98df5bf..83d3e306 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,107 +1,815 @@ -# Xyzen AGENTS.md - -This file provides guidance for AI Agents and LLMs working on the Xyzen codebase. Follow these instructions to ensure consistency, quality, and maintainability. - -## Project Overview - -Xyzen is an AI Laboratory Server - a full-stack application with a Python FastAPI backend and React/TypeScript frontend. The project uses containerized development with Docker and modern development practices. - -## Code Style & Philosophy - -**Goal:** Write professional, scalable, and clean code. - -### Frontend (`/web`) - -- **Architecture:** Follow the **Layered Architecture**: - 1. **Component** (`components/`): UI rendering only. No business logic or direct API calls. - 2. **Hook** (`hooks/`): Encapsulates capabilities, subscribes to Store. - 3. **Core** (`core/`) ⭐: **The Heart.** Contains all business logic, flow orchestration, and side effects. - 4. **Service** (`service/`): Pure HTTP/WebSocket requests. - 5. **Store** (`store/`): Client-side state (UI/Session) using **Zustand**. - 6. **Query** (`hooks/queries/`): Server state caching using **TanStack Query**. -- **State Management Rules:** - - **Server State:** Use TanStack Query. Do NOT duplicate server data into Zustand. - - **Client State:** Use Zustand for UI state (modals, theme, active panel). - - **Data Flow:** Component → Query Hook → Service. (Bypass Store for data fetching). -- **UI Library:** Use **shadcn/ui** components located in `src/components/ui`. Use Tailwind CSS for styling. -- **Type Definitions:** - - Backend types: `service//types.ts` - - Slice types: `store/slices//types.ts` - - Global types: `types/` - -### Backend (`/service`) - -- **Framework:** FastAPI with Uvicorn. -- **Language:** Python 3.12. Use modern union syntax (`str | None` instead of `Optional[str]`). -- **Database:** **SQLModel** with PostgreSQL. - - **Constraint:** Use a **No-Foreign-Key** model approach. - - **Pattern:** Use the **Repository Pattern** (`repos/`) for all database operations. -- **AI/Agents:** - - **Framework:** **LangGraph** for multi-agent workflows and state management. - - **Integration:** LangChain-compatible provider system. - - **MCP:** FastMCP integration for Model Context Protocol. -- **Testing:** - - **Runner:** `uv run pytest`. - - **Structure:** - - `tests/unit/`: Logic tests without external dependencies. - - `tests/integration/`: API and database integration tests. - - **Style:** - - Use `async_client` fixture for API tests. - - Use `db_session` fixture for database operations. - - Ensure tests clean up created resources or rely on database transaction rollbacks. - -## Project Structure - -### Backend (`/service`) - -``` -service/ -├── app/ -│ ├── main.py # Entry point -│ ├── agents/ # Builtin Agents -│ ├── api/v1/ # API Endpoints -│ ├── core/ # Business Logic, Chat & LangChain logic -│ ├── models/ # SQLModel definitions (No FKs) -│ ├── repos/ # Database Repositories -│ └── schemas/ # Pydantic Schemas -├── tests/ # Tests -├── migrations/ # Alembic Migrations -├── alembic.ini -└── pyproject.toml -``` - -### Frontend (`/web`) +# Xyzen Developer Guide + +This comprehensive guide provides architectural guidance for AI Agents and LLMs working on the Xyzen codebase. Follow these instructions to ensure consistency, quality, and maintainability across all system components. + +## 1. Project Overview & Philosophy + +### 1.1 Project Mission +Xyzen is an **AI Laboratory Server** - a sophisticated full-stack application designed for advanced AI research and development. It provides: +- Multi-agent LLM orchestration with LangGraph +- Real-time collaborative chat interfaces +- Advanced document processing capabilities +- Extensible plugin architecture via Model Context Protocol (MCP) +- Enterprise-grade file processing and storage systems + +### 1.2 Architecture Philosophy +**Goal:** Build a professional, scalable, and maintainable AI platform. + +**Core Principles:** +- **Layered Architecture**: Clear separation of concerns across all layers +- **Agent-First Design**: AI agents as first-class citizens in the system +- **Real-Time by Default**: WebSocket-driven communication for responsive UX +- **No-Foreign-Key Database**: Flexible data relationships without rigid constraints +- **Plugin Extensibility**: MCP-based tool and capability extension +- **Type Safety**: Comprehensive TypeScript and Python typing + +## 2. System Architecture + +### 2.1 Backend Architecture + +``` +service/app/ +├── agents/ # LangGraph-based AI agents +│ ├── base_graph_agent.py # Abstract base for graph agents +│ ├── base_agent.py # Flexible agent execution patterns +│ └── [specific_agents]/ # Domain-specific agent implementations +├── api/ # FastAPI REST and WebSocket endpoints +│ ├── v1/ # Versioned REST APIs +│ └── ws/ # WebSocket endpoints (real-time communication) +├── core/ # Business logic and orchestration +│ ├── chat/ # Chat system and message handling +│ ├── llm/ # LLM provider integration +│ ├── providers/ # Multi-provider system management +│ └── storage/ # File storage and processing +├── infra/ # Infrastructure services +│ ├── database/ # Database connections and management +│ └── storage/ # Object storage (S3/MinIO) integration +├── middleware/ # Cross-cutting concerns +│ └── auth.py # Authentication and authorization +├── models/ # SQLModel definitions (no foreign keys) +├── repos/ # Repository pattern for data access +├── schemas/ # Pydantic request/response schemas +└── mcp/ # Model Context Protocol integration +``` + +**Key Technologies:** +- **FastAPI**: Modern async web framework +- **SQLModel**: Type-safe database ORM +- **LangGraph**: Multi-agent workflow orchestration +- **Redis**: Real-time messaging and caching +- **PostgreSQL**: Primary data store +- **Casdoor**: Authentication and user management + +### 2.2 Frontend Architecture ``` web/src/ -├── app/ # Pages/Routes -├── components/ # React Components -│ ├── features/ # Feature-specific components -│ ├── knowledge/ # Knowledge Base components -│ ├── layouts/ # Layout wrappers -│ ├── preview/ # File Preview System -│ └── ui/ # shadcn UI components -├── core/ # Core Business Logic -├── hooks/ # Custom Hooks -├── service/ # API Services -└── store/ # Zustand Store -``` - -## Dev Environment Tips - -- **Docker First:** The project is designed to run in Docker. - - Start Dev: `./launch/dev.sh` -- **Frontend Commands:** - - `yarn lint`: Check for linting errors. **Run this after every major change.** - - `yarn type-check`: Verify TypeScript types. - - `yarn shadcn add `: Add new UI components. -- **Backend Commands:** - - `uv run pytest`: Run backend tests. - - `uv run pyright .`: Run static analysis. - -## Important Things to Remember - -1. **Linting is Critical:** The frontend relies heavily on TypeScript and ESLint. Always run `yarn lint` and `yarn type-check` in the `web/` directory to catch errors early. -2. **No Direct S3 Access in Browser:** When previewing or downloading files in the frontend, do NOT use direct S3/MinIO URLs (e.g., `http://host.docker.internal...`). Use the backend proxy endpoints (`/xyzen/api/v1/files/.../download`) to ensure accessibility and correct authentication. -3. **Strict Layering:** Do not put business logic in Components. Do not put HTTP requests in Components. Respect the frontend layering. -4. **Database:** Remember the "no foreign key" rule in SQLModel definitions. Handle relationships logically in the service/repo layer. +├── app/ # Next.js-style routing and pages +├── components/ # React components (UI rendering only) +│ ├── features/ # Feature-specific UI components +│ ├── chat/ # Chat interface components +│ ├── layouts/ # Application layout wrappers +│ ├── preview/ # File preview system +│ └── ui/ # shadcn/ui design system components +├── core/ # ⭐ HEART: Business logic and orchestration +│ ├── chat/ # Chat system business logic +│ ├── document/ # Document processing logic +│ └── auth/ # Authentication logic +├── hooks/ # Custom React hooks +│ ├── queries/ # TanStack Query hooks (server state) +│ └── [feature]/ # Feature-specific hooks +├── service/ # Pure HTTP/WebSocket communication +│ ├── api/ # REST API services +│ └── websocket/ # WebSocket services +├── store/ # Zustand state management +│ └── slices/ # Feature-specific state slices +└── types/ # Global TypeScript definitions +``` + +**Frontend Layered Architecture:** +1. **Component Layer**: Pure UI rendering, no business logic +2. **Hook Layer**: Capability encapsulation, state subscription +3. **Core Layer** ⭐: Business logic, flow orchestration, side effects +4. **Service Layer**: HTTP/WebSocket requests only +5. **Store Layer**: Client-side UI state (Zustand) +6. **Query Layer**: Server state caching (TanStack Query) + +## 3. Agent System + +### 3.1 Agent Architecture + +Xyzen uses a sophisticated agent system built on **LangGraph** for multi-agent workflows and state management. + +#### Base Agent Patterns + +**BaseBuiltinGraphAgent** - Abstract base for LangGraph agents: +```python +# service/app/agents/base_graph_agent.py +class BaseBuiltinGraphAgent(ABC): + name: str + description: str + version: str + capabilities: list[str] + tags: list[str] + + @abstractmethod + def build_graph(self) -> CompiledStateGraph: + """Build and return the LangGraph StateGraph""" + + @abstractmethod + def get_state_schema(self) -> dict[str, Any]: + """Return the state schema for this agent""" +``` + +**BaseAgent** - Flexible execution patterns: +```python +# service/app/agents/base_agent.py +class BaseAgent(ABC): + # Supports multiple execution modes: + # - Simple: Single LLM call + # - ReAct: Iterative LLM call with validation + # - Parallel: Concurrent processing + # - Graph: LangGraph-based tool calling + + async def execute(self, state: MainState, use_agent: bool = False) -> MainState: + # Dynamic execution strategy selection +``` + +#### Agent Registration and Discovery + +Agents are automatically discovered using `__init_subclass__`: +```python +class MyCustomAgent(BaseBuiltinGraphAgent): + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + # Automatic registration with agent registry +``` + +### 3.2 Agent Development Patterns + +#### Creating New Agents + +1. **Inherit from BaseBuiltinGraphAgent**: +```python +class DocumentProcessingAgent(BaseBuiltinGraphAgent): + def __init__(self): + super().__init__( + name="Document Processing Agent", + description="Advanced document processing capabilities", + version="1.0.0", + capabilities=["pdf-conversion", "analysis"], + tags=["document", "processing"] + ) +``` + +2. **Define State Schema**: +```python +def get_state_schema(self) -> dict[str, Any]: + return { + "input_file": "File path or content to process", + "processing_type": "Type of processing to perform", + "progress": "Processing progress (0-100)", + "results": "Processing results and outputs" + } +``` + +3. **Build Graph Workflow**: +```python +def build_graph(self) -> CompiledStateGraph: + workflow = StateGraph(MyState) + + # Add nodes + workflow.add_node("analyze_input", self._analyze_input) + workflow.add_node("process_data", self._process_data) + workflow.add_node("format_output", self._format_output) + + # Define flow + workflow.add_edge(START, "analyze_input") + workflow.add_edge("analyze_input", "process_data") + workflow.add_edge("process_data", "format_output") + workflow.add_edge("format_output", END) + + return workflow.compile() +``` + +#### Execution Modes + +**Simple Mode**: Direct LLM call with prompt template +**ReAct Mode**: Iterative reasoning with validation loops +**Parallel Mode**: Concurrent processing of multiple inputs +**Graph Mode**: Full LangGraph workflow execution + +#### Agent-as-Tool Pattern +Agents can be used as tools within other agents: +```python +# Register agent as tool for use in other workflows +@tool +def document_processing_agent(input_data: str) -> str: + """Process documents using the Document Processing Agent""" + return agent.execute(input_data) +``` + +## 4. Real-Time Communication + +### 4.1 WebSocket Architecture + +Xyzen uses WebSocket-first architecture for real-time communication: + +```python +# service/app/api/ws/v1/chat.py +class WebSocketManager: + def __init__(self): + self.connections: Dict[str, WebSocket] = {} + self.redis_client = get_redis_client() + + async def connect(self, websocket: WebSocket, session_id: str): + # Connection management with Redis pub/sub + + async def send_to_session(self, session_id: str, message: dict): + # Real-time message delivery +``` + +**Key Features:** +- Connection lifecycle management +- Redis pub/sub for scalability +- Authentication integration +- Message routing and delivery +- Error handling and reconnection + +### 4.2 WebSocket Message Types + +**Standard Message Structure:** +```typescript +interface WebSocketMessage { + type: string; + data: any; + session_id?: string; + timestamp?: string; + message_id?: string; +} +``` + +**Common Message Types:** +- `chat_message`: User chat messages +- `agent_response`: Agent responses +- `tool_call`: Tool execution messages +- `progress_update`: Real-time progress updates +- `file_upload`: File upload notifications +- `document_processing_*`: Document processing events + +## 5. Chat & Messaging System + +### 5.1 Message Architecture + +The chat system supports rich message types with comprehensive metadata: + +```python +# Backend message structure +class Message(SQLModel, table=True): + id: str + session_id: str + user_id: str + content: str + message_type: MessageType + attachments: Optional[List[dict]] = None + metadata: Optional[dict] = None + created_at: datetime +``` + +```typescript +// Frontend message structure +interface ChatMessage { + id: string; + content: string; + role: 'user' | 'assistant' | 'system'; + type: MessageType; + attachments?: Attachment[]; + metadata?: Record; + timestamp: Date; + status?: 'sending' | 'sent' | 'error'; +} +``` + +### 5.2 Message Processing Pipeline + +1. **Input Validation**: Content, attachments, permissions +2. **Agent Routing**: Select appropriate agent for response +3. **Processing**: Execute agent workflow with real-time updates +4. **Response Generation**: Format and deliver agent response +5. **Storage**: Persist conversation history +6. **Notifications**: Real-time delivery to connected clients + +### 5.3 Attachment Handling + +Supports multiple attachment types: +- **Documents**: PDF, DOCX, PPTX with processing capabilities +- **Images**: With thumbnail generation and preview +- **Audio**: With transcription support +- **Code Files**: With syntax highlighting +- **Generated Files**: Agent-created content + +## 6. File Processing & Storage + +### 6.1 File Upload System + +**Upload Flow:** +```python +# service/app/api/v1/files.py +@router.post("/upload") +async def upload_file( + file: UploadFile, + scope: str = FileScope.PRIVATE, + category: str | None = None, + user_id: str = Depends(get_current_user), + storage: StorageServiceProto = Depends(get_storage_service) +): + # 1. Validation (size, type, quota) + # 2. Hash calculation + # 3. Storage upload + # 4. Database record creation + # 5. Metadata extraction +``` + +**File Categories:** +- `images`: Image files with thumbnail generation +- `documents`: PDF, Office documents with conversion +- `audio`: Audio files with transcription +- `others`: General file category + +### 6.2 Storage Architecture + +**Storage Service Pattern:** +```python +class StorageServiceProto(Protocol): + async def upload(self, file_data: bytes, key: str) -> str + async def download(self, key: str) -> bytes + async def delete(self, key: str) -> bool + async def get_download_url(self, key: str) -> str +``` + +**Storage Implementations:** +- **LocalStorage**: Development environment +- **S3Storage**: Production object storage +- **MinIOStorage**: Self-hosted S3-compatible storage + +### 6.3 File Processing Capabilities + +**Document Conversion:** +- PDF generation from DOCX/XLSX/PPTX +- OCR text extraction +- Metadata extraction +- Preview generation + +**Image Processing:** +- Thumbnail generation +- Format conversion +- Metadata extraction +- Preview optimization + +**Audio Processing:** +- Transcription services +- Format conversion +- Metadata extraction + +## 7. Database Patterns + +### 7.1 No-Foreign-Key Architecture + +**Philosophy**: Avoid rigid database constraints to maintain flexibility and scalability. + +**Relationship Handling:** +```python +# Instead of foreign keys, use logical relationships +class User(SQLModel, table=True): + id: str = Field(primary_key=True) + name: str + +class Session(SQLModel, table=True): + id: str = Field(primary_key=True) + user_id: str # Logical reference, not foreign key + title: str + +# Handle relationships in service layer +class SessionRepository: + async def get_sessions_for_user(self, user_id: str) -> List[Session]: + # Query sessions by user_id + # Validate user existence in business logic +``` + +### 7.2 Repository Pattern + +**Repository Structure:** +```python +class BaseRepository(Generic[T]): + def __init__(self, session: AsyncSession, model: Type[T]): + self.session = session + self.model = model + + async def create(self, item: T) -> T: + self.session.add(item) + await self.session.commit() + await self.session.refresh(item) + return item + + async def get_by_id(self, id: str) -> T | None: + return await self.session.get(self.model, id) + + async def update(self, item: T) -> T: + self.session.add(item) + await self.session.commit() + await self.session.refresh(item) + return item + + async def delete(self, id: str) -> bool: + item = await self.get_by_id(id) + if item: + await self.session.delete(item) + await self.session.commit() + return True + return False +``` + +**Specific Repositories:** +```python +class UserRepository(BaseRepository[User]): + async def get_by_email(self, email: str) -> User | None: + # Custom query methods + + async def get_active_users(self) -> List[User]: + # Business-specific queries +``` + +### 7.3 Transaction Management + +**Service Layer Transactions:** +```python +class ChatService: + async def create_message_with_attachments( + self, + message_data: MessageCreate, + attachments: List[FileCreate] + ) -> Message: + async with self.db.begin(): + # All operations in single transaction + message = await self.message_repo.create(message_data) + + for attachment in attachments: + attachment.message_id = message.id + await self.file_repo.create(attachment) + + return message +``` + +## 8. Authentication & Security + +### 8.1 Authentication System + +**Casdoor Integration:** +```python +# service/app/middleware/auth.py +async def get_current_user( + authorization: str | None = Header(None) +) -> str: + if not authorization: + raise HTTPException(401, "Authorization header missing") + + # Validate token with Casdoor + user_info = await casdoor_client.parse_jwt_token(token) + return user_info.user_id +``` + +**Token Management:** +- JWT-based authentication +- Token refresh mechanisms +- Session management +- Multi-provider support + +### 8.2 Authorization Patterns + +**Resource Access Control:** +```python +class FileService: + async def get_file(self, file_id: str, user_id: str) -> File: + file = await self.file_repo.get_by_id(file_id) + + # Authorization check + if not self.can_access_file(file, user_id): + raise HTTPException(403, "Access denied") + + return file + + def can_access_file(self, file: File, user_id: str) -> bool: + # Public files accessible to all + if file.scope == FileScope.PUBLIC: + return True + + # Private files only for owner + return file.user_id == user_id +``` + +**Permission Levels:** +- **Public**: Accessible to all users +- **Private**: User-specific access +- **Shared**: Group or team access +- **Generated**: System-generated content + +## 9. MCP & Provider Integration + +### 9.1 Model Context Protocol (MCP) + +**MCP Server Management:** +```python +# service/app/mcp/__init__.py +class MCPManager: + def __init__(self): + self.servers: Dict[str, MCPServer] = {} + self.tools: Dict[str, Tool] = {} + + async def register_server(self, server_config: MCPServerConfig): + # Dynamic server registration + server = await self.connect_to_server(server_config) + self.servers[server_config.name] = server + + # Register available tools + tools = await server.list_tools() + for tool in tools: + self.tools[f"{server_config.name}:{tool.name}"] = tool + + async def call_tool(self, tool_name: str, arguments: dict) -> dict: + # Route tool calls to appropriate MCP server +``` + +**MCP Integration Features:** +- Automatic server discovery +- Dynamic tool registration +- Flexible routing and calling +- Authentication handling +- Error management and retries + +### 9.2 Provider System + +**LLM Provider Configuration:** +```python +# service/app/core/providers/ +class ProviderManager: + def __init__(self): + self.providers: Dict[str, LLMProvider] = {} + + async def get_provider(self, provider_name: str) -> LLMProvider: + # Provider selection and configuration + + async def call_llm( + self, + provider: str, + messages: List[dict], + **kwargs + ) -> LLMResponse: + # Unified LLM calling interface +``` + +**Supported Providers:** +- OpenAI (GPT-3.5, GPT-4, GPT-4o) +- Anthropic (Claude family) +- Google (Gemini) +- Local models (Ollama, vLLM) +- Custom providers via MCP + +## 10. Testing Strategies + +### 10.1 Testing Architecture + +**Test Structure:** +``` +tests/ +├── unit/ # Pure logic tests +│ ├── test_agents/ # Agent logic testing +│ ├── test_services/ # Business logic testing +│ └── test_utils/ # Utility function testing +├── integration/ # System integration tests +│ ├── test_api/ # API endpoint testing +│ ├── test_db/ # Database integration testing +│ └── test_websocket/ # WebSocket testing +└── fixtures/ # Test data and mocks +``` + +**Testing Patterns:** + +**Agent Testing:** +```python +@pytest.mark.asyncio +async def test_document_processing_agent(): + agent = DocumentProcessingAgent() + + # Test state schema + schema = agent.get_state_schema() + assert "input_file" in schema + + # Test graph building + graph = agent.build_graph() + assert graph is not None + + # Test execution + initial_state = MyState(input_file="test.pdf") + result = await graph.ainvoke(initial_state) + assert result.status == "completed" +``` + +**WebSocket Testing:** +```python +@pytest.mark.asyncio +async def test_websocket_message_flow(websocket_client): + # Connect to WebSocket + async with websocket_client.websocket_connect("/ws/chat") as websocket: + # Send message + await websocket.send_json({"type": "chat_message", "content": "Hello"}) + + # Receive response + response = await websocket.receive_json() + assert response["type"] == "agent_response" +``` + +### 10.2 Testing Best Practices + +**Database Testing:** +```python +@pytest.fixture +async def db_session(): + # Create test database session + async with AsyncSession(test_engine) as session: + yield session + await session.rollback() # Rollback after test + +@pytest.mark.asyncio +async def test_user_repository(db_session): + repo = UserRepository(db_session) + + # Test create + user = await repo.create(User(name="Test User")) + assert user.id is not None + + # Test retrieve + found_user = await repo.get_by_id(user.id) + assert found_user.name == "Test User" +``` + +**API Testing:** +```python +@pytest.mark.asyncio +async def test_file_upload(async_client, auth_headers): + # Test file upload endpoint + files = {"file": ("test.txt", b"test content", "text/plain")} + response = await async_client.post( + "/api/v1/files/upload", + files=files, + headers=auth_headers + ) + + assert response.status_code == 201 + data = response.json() + assert "id" in data + assert data["filename"] == "test.txt" +``` + +## 11. Configuration & Environment + +### 11.1 Configuration Management + +**Environment Variables:** +```python +# service/app/core/config.py +class Settings(BaseSettings): + # Database + DATABASE_URL: str + + # Authentication + CASDOOR_ENDPOINT: str + CASDOOR_CLIENT_ID: str + + # Storage + STORAGE_TYPE: Literal["local", "s3", "minio"] = "local" + S3_BUCKET: str | None = None + + # LLM Providers + OPENAI_API_KEY: str | None = None + ANTHROPIC_API_KEY: str | None = None + + # MCP Servers + MCP_SERVERS: List[str] = [] + + class Config: + env_file = ".env" +``` + +**Configuration Hierarchy:** +1. Environment variables +2. `.env` file +3. Default values +4. Runtime overrides + +### 11.2 Development Environment + +**Docker Setup:** +```yaml +# docker-compose.yml +services: + backend: + build: ./service + environment: + - DATABASE_URL=postgresql://user:pass@db:5432/xyzen + - REDIS_URL=redis://redis:6379/0 + + frontend: + build: ./web + environment: + - NEXT_PUBLIC_API_URL=http://backend:8000 + + db: + image: postgres:15 + environment: + - POSTGRES_DB=xyzen + + redis: + image: redis:7-alpine +``` + +**Development Commands:** +```bash +# Start development environment +./launch/dev.sh -d + +# Backend commands +cd service +uv run pytest # Run tests +uv run pyright . # Type checking + +# Frontend commands +cd web +yarn dev # Start dev server +yarn lint # Linting +yarn type-check # Type checking +yarn test # Run tests +``` + +## 12. Development Workflows & Best Practices + +### 12.1 Code Quality Guidelines + +**Backend (Python):** +```python +# Use modern Python syntax +def process_data(items: list[dict[str, Any]]) -> list[ProcessedItem]: + # Use list[T] instead of List[T] + # Use dict[K, V] instead of Dict[K, V] + # Use str | None instead of Optional[str] + +# Async by default +async def fetch_data() -> DataResponse: + # Prefer async operations + async with httpx.AsyncClient() as client: + response = await client.get(url) + return response.json() + +# Comprehensive error handling +try: + result = await process_operation() +except ProcessingError as e: + logger.error(f"Processing failed: {e}") + raise HTTPException(500, f"Processing error: {str(e)}") +``` + +**Frontend (TypeScript):** +```typescript +// Strict typing +interface UserProfile { + id: string; + name: string; + email: string; + preferences?: UserPreferences; +} + +// Component structure +export const UserProfileCard: React.FC<{ + user: UserProfile; + onUpdate: (user: UserProfile) => void; +}> = ({ user, onUpdate }) => { + // UI rendering only, no business logic + return ( + + + {user.name} + + + ); +}; + +// Business logic in Core layer +// core/user/userManager.ts +export class UserManager { + async updateUserProfile( + userId: string, + updates: Partial + ): Promise { + // Business logic and orchestration + } +} +``` diff --git a/ROADMAP.md b/ROADMAP.md deleted file mode 100644 index 06b69cc3..00000000 --- a/ROADMAP.md +++ /dev/null @@ -1,41 +0,0 @@ -# Xyzen Roadmap - -This roadmap outlines the development stages of the Xyzen AI Laboratory Server. It serves as a high-level guide for tracking major feature implementation and system architecture evolution. - -## Phase 1: Core Consolidation (Current) -Focus: Cleaning up the legacy structure, unifying models, and establishing best practices. - -- [ ] **Unified Agent System**: Complete the migration to a single `Agent` model for both regular and graph-based agents. -- [ ] **Idiomatic FastAPI Refactor**: Implement Dependency Injection (DI) for resource fetching and authorization across all API handlers. -- [ ] **Frontend State Management**: Finalize the migration of all server-side state to TanStack Query and clean up Zustand slices. -- [ ] **Error Handling**: Implement a global exception handler and unified error code system across backend and frontend. - -## Phase 2: Agent Intelligence & Workflows -Focus: Expanding the capabilities of the agent engine. - -- [ ] **LangGraph Orchestration**: Full integration of LangGraph for complex, stateful multi-agent workflows. -- [ ] **Advanced MCP Integration**: Dynamic discovery and management of Model Context Protocol (MCP) servers. -- [ ] **Tool Confirmation UI**: A robust interface for users to inspect and approve agent tool calls before execution. -- [ ] **Streaming Optimization**: Enhancing WebSocket performance for real-time agent thought process visualization. - -## Phase 3: Knowledge Base & RAG -Focus: Providing agents with memory and specialized knowledge. - -- [ ] **Vector Database Support**: Integration with PostgreSQL (pgvector) or a dedicated vector DB for RAG capabilities. -- [ ] **File Processing Pipeline**: Automated ingestion and chunking of documents (PDF, Markdown, Code). -- [ ] **Knowledge Graphs**: Exploring graph-based retrieval to complement vector search. - -## Phase 4: Infrastructure & Scale -Focus: Making Xyzen production-ready. - -- [ ] **Multi-Provider Support**: Seamless switching between OpenAI, Anthropic, Gemini, and local models (Ollama). -- [ ] **User Usage Tracking**: Monitoring token consumption and execution costs. -- [ ] **Deployment Templates**: Easy-to-use Docker Compose and Kubernetes configurations for various environments. - ---- - -## Done ✅ -- [x] **Project Foundation**: Initial FastAPI + SQLModel backend setup. -- [x] **Frontend Shell**: React + Tailwind + shadcn/ui dashboard layout. -- [x] **Basic Agent Chat**: Functional WebSocket-based chat with regular agents. -- [x] **Dockerized Environment**: Fully containerized development setup with PostgreSQL and MinIO. diff --git a/TODO.md b/TODO.md deleted file mode 100644 index e56ae538..00000000 --- a/TODO.md +++ /dev/null @@ -1,30 +0,0 @@ -# Xyzen Task Tracker (TODO) - -This file tracks tactical, short-term tasks and immediate technical debt. For high-level milestones, see `ROADMAP.md`. - -## 🛠️ Immediate Priorities -- [ ] **Dependency Injection Refactor**: Move `auth_service` and `agent` fetching into FastAPI dependencies in `agents.py`. -- [ ] **Agent Repository Cleanup**: Remove legacy methods in `AgentRepository` that supported the old unified agent service (e.g., `get_agent_with_mcp_servers`). -- [ ] **Frontend Type Alignment**: Update `web/src/types/agents.ts` to match the simplified `AgentReadWithDetails` model from the backend. - -## 🚀 Backend Tasks -- [ ] **Pydantic V2 Migration**: Verify all SQLModels and Schemas are utilizing Pydantic V2 features optimally. -- [ ] **Logging Middleware**: Add request/response logging for better debugging in the Docker environment. -- [ ] **Auth Error Mapping**: Finish mapping `ErrCodeError` to appropriate FastAPI `HTTPException` responses in `middleware/auth`. - -## 🎨 Frontend Tasks -- [ ] **TanStack Query Refactor**: Move agent fetching from `agentSlice.ts` (Zustand) to a dedicated hook in `hooks/queries/useAgents.ts`. -- [ ] **AddAgentModal UI**: Allow users to select a specific LLM Provider during the creation of a regular agent. -- [ ] **Loading States**: Add skeleton loaders to the `AgentExplorer` sidebar. - -## 🧪 Testing & Quality -- [ ] **Backend Unit Tests**: Add test cases for the newly unified `get_agent` endpoint. -- [ ] **Frontend Linting**: Fix existing `yarn lint` warnings in `web/src/components/layouts/ChatToolbar.tsx`. -- [ ] **API Documentation**: Update docstrings in `handler/api/v1/` to ensure Swagger UI is accurate. - -## ✅ Completed Tasks -- [x] **Agent Unification**: Unified `get_agent` endpoint to return `AgentReadWithDetails` and removed `UnifiedAgentRead` dependencies. -- [x] **Default Agent Cloning**: Implemented logic in `SystemAgentManager` to clone system agents as user-owned default agents. -- [x] **Tag-based Identification**: Updated frontend (Chat, Agent List, Avatars) to identify default agents via tags (e.g., `default_chat`) rather than hardcoded UUIDs. -- [x] **Workshop Removal**: Completely removed the legacy "Workshop" feature from both backend and frontend to simplify the core agent experience. -- [x] **Policy Update**: Updated `AgentPolicy` to allow reading of system-scoped reference agents. diff --git a/service/app/configs/llm.py b/service/app/configs/llm.py index 1a9668ce..865e0e42 100644 --- a/service/app/configs/llm.py +++ b/service/app/configs/llm.py @@ -80,6 +80,8 @@ class LLMConfig(BaseModel): openai: LLMProviderConfig = Field(default_factory=LLMProviderConfig, description="OpenAI config") google: LLMProviderConfig = Field(default_factory=LLMProviderConfig, description="Google GenAI config") googlevertex: LLMProviderConfig = Field(default_factory=LLMProviderConfig, description="Google Vertex config") + gpugeek: LLMProviderConfig = Field(default_factory=LLMProviderConfig, description="GPUGeek config") + qwen: LLMProviderConfig = Field(default_factory=LLMProviderConfig, description="Qwen config") # Legacy single-provider fields provider: ProviderType | None = Field(default=None, description="(Legacy) Provider type") @@ -111,6 +113,10 @@ def normalize(s: str) -> str: return ProviderType.AZURE_OPENAI.value if s == "googlevertex": return ProviderType.GOOGLE_VERTEX.value + if s == "gpugeek": + return ProviderType.GPUGEEK.value + if s == "qwen": + return ProviderType.QWEN.value return s return [ProviderType(normalize(item)) for item in items] @@ -140,6 +146,10 @@ def get_provider_config(self, provider: ProviderType) -> LLMProviderConfig: return self.google case ProviderType.GOOGLE_VERTEX: return self.googlevertex + case ProviderType.GPUGEEK: + return self.gpugeek + case ProviderType.QWEN: + return self.qwen def iter_enabled(self) -> list[tuple[ProviderType, LLMProviderConfig]]: """Return enabled provider configs. diff --git a/service/app/core/chat/langchain.py b/service/app/core/chat/langchain.py index 7df80af1..3b2d93b1 100644 --- a/service/app/core/chat/langchain.py +++ b/service/app/core/chat/langchain.py @@ -26,6 +26,7 @@ GeneratedFileHandler, StreamContext, StreamingEventHandler, + ThinkingEventHandler, TokenStreamProcessor, ToolEventHandler, ) @@ -301,7 +302,7 @@ async def _handle_updates_mode(data: Any, ctx: StreamContext) -> AsyncGenerator[ async def _handle_messages_mode(data: Any, ctx: StreamContext) -> AsyncGenerator[StreamingEvent, None]: - """Handle 'messages' mode events (token streaming).""" + """Handle 'messages' mode events (token streaming and thinking content).""" if not isinstance(data, tuple): return @@ -331,7 +332,27 @@ async def _handle_messages_mode(data: Any, ctx: StreamContext) -> AsyncGenerator if node and node not in ("model", "agent"): return - # Extract and emit token + # Check for thinking content first (from reasoning models like Claude, DeepSeek R1, Gemini 3) + thinking_content = ThinkingEventHandler.extract_thinking_content(message_chunk) + + if thinking_content: + # Start thinking if not already + if not ctx.is_thinking: + logger.debug("Emitting thinking_start for stream_id=%s", ctx.stream_id) + ctx.is_thinking = True + yield ThinkingEventHandler.create_thinking_start(ctx.stream_id) + + ctx.thinking_buffer.append(thinking_content) + yield ThinkingEventHandler.create_thinking_chunk(ctx.stream_id, thinking_content) + return + + # If we were thinking but now have regular content, end thinking first + if ctx.is_thinking: + logger.debug("Emitting thinking_end for stream_id=%s", ctx.stream_id) + ctx.is_thinking = False + yield ThinkingEventHandler.create_thinking_end(ctx.stream_id) + + # Extract and emit token for regular streaming token_text = TokenStreamProcessor.extract_token_text(message_chunk) if not token_text: return @@ -347,6 +368,12 @@ async def _handle_messages_mode(data: Any, ctx: StreamContext) -> AsyncGenerator async def _finalize_streaming(ctx: StreamContext) -> AsyncGenerator[StreamingEvent, None]: """Finalize the streaming session.""" + # If still thinking when finalizing, emit thinking_end + if ctx.is_thinking: + logger.debug("Emitting thinking_end (in finalize) for stream_id=%s", ctx.stream_id) + ctx.is_thinking = False + yield ThinkingEventHandler.create_thinking_end(ctx.stream_id) + if ctx.is_streaming: logger.debug( "Emitting streaming_end for stream_id=%s (total tokens: %d)", diff --git a/service/app/core/chat/stream_handlers.py b/service/app/core/chat/stream_handlers.py index 304cf583..44e93762 100644 --- a/service/app/core/chat/stream_handlers.py +++ b/service/app/core/chat/stream_handlers.py @@ -25,6 +25,9 @@ StreamingEndData, StreamingEvent, StreamingStartData, + ThinkingChunkData, + ThinkingEndData, + ThinkingStartData, TokenUsageData, ToolCallRequestData, ToolCallResponseData, @@ -55,6 +58,9 @@ class StreamContext: total_input_tokens: int = 0 total_output_tokens: int = 0 total_tokens: int = 0 + # Thinking/reasoning content state + is_thinking: bool = False + thinking_buffer: list[str] = field(default_factory=list) class ToolEventHandler: @@ -79,7 +85,7 @@ def create_tool_request_event(tool_call: dict[str, Any]) -> StreamingEvent: "status": ToolCallStatus.EXECUTING, "timestamp": asyncio.get_event_loop().time(), } - return {"type": ChatEventType.TOOL_CALL_REQUEST, "data": data} # type: ignore[return-value] + return {"type": ChatEventType.TOOL_CALL_REQUEST, "data": data} @staticmethod def create_tool_response_event( @@ -101,7 +107,7 @@ def create_tool_response_event( "status": status, "result": result, } - return {"type": ChatEventType.TOOL_CALL_RESPONSE, "data": data} # type: ignore[return-value] + return {"type": ChatEventType.TOOL_CALL_RESPONSE, "data": data} class StreamingEventHandler: @@ -111,13 +117,13 @@ class StreamingEventHandler: def create_streaming_start(stream_id: str) -> StreamingEvent: """Create streaming start event.""" data: StreamingStartData = {"id": stream_id} - return {"type": ChatEventType.STREAMING_START, "data": data} # type: ignore[return-value] + return {"type": ChatEventType.STREAMING_START, "data": data} @staticmethod def create_streaming_chunk(stream_id: str, content: str) -> StreamingEvent: """Create streaming chunk event.""" data: StreamingChunkData = {"id": stream_id, "content": content} - return {"type": ChatEventType.STREAMING_CHUNK, "data": data} # type: ignore[return-value] + return {"type": ChatEventType.STREAMING_CHUNK, "data": data} @staticmethod def create_streaming_end(stream_id: str) -> StreamingEvent: @@ -126,7 +132,7 @@ def create_streaming_end(stream_id: str) -> StreamingEvent: "id": stream_id, "created_at": asyncio.get_event_loop().time(), } - return {"type": ChatEventType.STREAMING_END, "data": data} # type: ignore[return-value] + return {"type": ChatEventType.STREAMING_END, "data": data} @staticmethod def create_token_usage_event(input_tokens: int, output_tokens: int, total_tokens: int) -> StreamingEvent: @@ -136,17 +142,102 @@ def create_token_usage_event(input_tokens: int, output_tokens: int, total_tokens "output_tokens": output_tokens, "total_tokens": total_tokens, } - return {"type": ChatEventType.TOKEN_USAGE, "data": data} # type: ignore[return-value] + return {"type": ChatEventType.TOKEN_USAGE, "data": data} @staticmethod def create_processing_event(status: str = ProcessingStatus.PREPARING_REQUEST) -> StreamingEvent: """Create processing status event.""" - return {"type": ChatEventType.PROCESSING, "data": {"status": status}} # type: ignore[return-value] + return {"type": ChatEventType.PROCESSING, "data": {"status": status}} @staticmethod def create_error_event(error: str) -> StreamingEvent: """Create error event.""" - return {"type": ChatEventType.ERROR, "data": {"error": error}} # type: ignore[return-value] + return {"type": ChatEventType.ERROR, "data": {"error": error}} + + +class ThinkingEventHandler: + """Handle thinking/reasoning content streaming events.""" + + @staticmethod + def create_thinking_start(stream_id: str) -> StreamingEvent: + """Create thinking start event.""" + data: ThinkingStartData = {"id": stream_id} + return {"type": ChatEventType.THINKING_START, "data": data} + + @staticmethod + def create_thinking_chunk(stream_id: str, content: str) -> StreamingEvent: + """Create thinking chunk event.""" + data: ThinkingChunkData = {"id": stream_id, "content": content} + return {"type": ChatEventType.THINKING_CHUNK, "data": data} + + @staticmethod + def create_thinking_end(stream_id: str) -> StreamingEvent: + """Create thinking end event.""" + data: ThinkingEndData = {"id": stream_id} + return {"type": ChatEventType.THINKING_END, "data": data} + + @staticmethod + def extract_thinking_content(message_chunk: Any) -> str | None: + """ + Extract thinking/reasoning content from message chunk. + + Checks various provider-specific locations: + - Anthropic Claude: content blocks with type="thinking" + - DeepSeek R1: additional_kwargs.reasoning_content + - Gemini 3: content blocks with type="thought" or response_metadata.reasoning + - Generic: response_metadata.reasoning_content or thinking + + Args: + message_chunk: Message chunk from LLM streaming + + Returns: + Extracted thinking content or None + """ + # Check for DeepSeek/OpenAI style reasoning_content in additional_kwargs + if hasattr(message_chunk, "additional_kwargs"): + additional_kwargs = message_chunk.additional_kwargs + if isinstance(additional_kwargs, dict): + reasoning = additional_kwargs.get("reasoning_content") + if reasoning: + logger.debug("Found thinking in additional_kwargs.reasoning_content") + return reasoning + + # Check for thinking/thought blocks in content (Anthropic, Gemini 3) + if hasattr(message_chunk, "content"): + content = message_chunk.content + if isinstance(content, list): + for block in content: + if isinstance(block, dict): + block_type = block.get("type", "") + # Anthropic Claude uses "thinking" type + if block_type == "thinking": + thinking_text = block.get("thinking", "") + if thinking_text: + logger.debug("Found thinking in content block type='thinking'") + return thinking_text + # Gemini 3 uses "thought" type + elif block_type == "thought": + thought_text = block.get("thought", "") or block.get("text", "") + if thought_text: + logger.debug("Found thinking in content block type='thought'") + return thought_text + + # Check response_metadata for thinking content + if hasattr(message_chunk, "response_metadata"): + metadata = message_chunk.response_metadata + if isinstance(metadata, dict): + # Gemini 3 uses "reasoning" key + thinking = ( + metadata.get("thinking") + or metadata.get("reasoning_content") + or metadata.get("reasoning") + or metadata.get("thoughts") + ) + if thinking: + logger.debug("Found thinking in response_metadata: %s", list(metadata.keys())) + return thinking + + return None class CitationExtractor: @@ -264,7 +355,7 @@ def _deduplicate_citations(citations: list[CitationData]) -> list[CitationData]: def create_citations_event(citations: list[CitationData]) -> StreamingEvent: """Create search citations event.""" data: SearchCitationsData = {"citations": citations} - return {"type": ChatEventType.SEARCH_CITATIONS, "data": data} # type: ignore[return-value] + return {"type": ChatEventType.SEARCH_CITATIONS, "data": data} class GeneratedFileHandler: @@ -397,7 +488,7 @@ async def process_generated_content( def create_generated_files_event(files: list[GeneratedFileInfo]) -> StreamingEvent: """Create generated files event.""" data: GeneratedFilesData = {"files": files} - return {"type": ChatEventType.GENERATED_FILES, "data": data} # type: ignore[return-value] + return {"type": ChatEventType.GENERATED_FILES, "data": data} class TokenStreamProcessor: diff --git a/service/app/core/chat/topic_generator.py b/service/app/core/chat/topic_generator.py index 55333f15..a66091e2 100644 --- a/service/app/core/chat/topic_generator.py +++ b/service/app/core/chat/topic_generator.py @@ -29,6 +29,10 @@ def _select_title_generation_model( return "gemini-2.5-flash" if provider_type == ProviderType.AZURE_OPENAI: return "gpt-5-mini" + if provider_type == ProviderType.GPUGEEK: + return "Vendor2/Gemini-2.5-Flash" + if provider_type == ProviderType.QWEN: + return "qwen3-0.6b" return session_model or default_model diff --git a/service/app/core/llm/service.py b/service/app/core/llm/service.py index 2948b36b..5be60a62 100644 --- a/service/app/core/llm/service.py +++ b/service/app/core/llm/service.py @@ -10,6 +10,88 @@ logger = logging.getLogger(__name__) +# Manually configured model list for GPUGeek provider +# Add models in the format "Vendor/Model-Name" (e.g., "Vendor2/Gemini-2.5-Flash") +GPUGEEK_MODELS: list[str] = [ + # Add your GPUGeek models here + # Example: "Vendor2/Gemini-2.5-Flash", + # "Vendor2/Gemini-2.5-Pro", + # "Vendor2/Gemini-2.5-Flash", + # "Vendor2/Gemini-2.5-Flash-Image", + # "Vendor2/Gemini-3-Pro", + # "Vendor2/Gemini-3-Flash", + # "Vendor2/Gemini-3-Pro-Image", + "Vendor2/Claude-3.7-Sonnet", + "Vendor2/Claude-4-Sonnet", + "Vendor2/Claude-4.5-Opus", + "Vendor2/Claude-4.5-Sonnet", + # "Vendor2/GPT-5.2", + # "Vendor2/GPT-5.1", + # "Vendor2/GPT-5", + # "OpenAI/Azure-GPT-5.1", + # "OpenAI/Azure-GPT-5.2", + # "OpenAI/Azure-GPT-5", + "DeepSeek/DeepSeek-V3-0324", + "DeepSeek/DeepSeek-V3.1-0821", + "DeepSeek/DeepSeek-R1-671B", +] + + +def _map_gpugeek_to_base_model(gpugeek_model: str) -> str | None: + """ + Map GPUGeek vendor-prefixed model names to their base model names for pricing lookup. + + Most model names can be used directly after normalization, except for DeepSeek models + which require special mapping based on version patterns. + + Args: + gpugeek_model: GPUGeek model name (e.g., "Vendor2/Gemini-2.5-Flash") + + Returns: + Base model name for LiteLLM lookup, or None if no mapping exists + + Examples: + "Vendor2/Gemini-2.5-Flash" -> "gemini-2.5-flash" + "Vendor2/Claude-4.5-Sonnet" -> "claude-4.5-sonnet" + "Vendor2/GPT-5.2" -> "gpt-5.2" + "DeepSeek/DeepSeek-V3-0324" -> "deepseek-chat" + "DeepSeek/DeepSeek-R1-671B" -> "deepseek-reasoner" + """ + # Extract the model part after the vendor prefix + if "/" not in gpugeek_model: + return None + + _, model_part = gpugeek_model.split("/", 1) + model_lower = model_part.lower() + + # Special handling for DeepSeek models + if "deepseek" in model_lower: + if "v" in model_lower and any(c.isdigit() for c in model_lower.split("v")[1][:3]): + return "deepseek-chat" + if "r" in model_lower and any(c.isdigit() for c in model_lower.split("r")[1][:3]): + return "deepseek-reasoner" + return "deepseek-chat" + + # Special handling for Azure models + # if "azure-" in model_lower: + # model_lower = model_lower.replace("azure-", "") + + # Special handling for Anthropic models + if "gemini-3-flash" in model_lower: + return "gemini-3-flash-preview" + if "gemini-3-pro" in model_lower: + return "gemini-3-pro-preview" + if "claude-3.7-sonnet" in model_lower: + return "anthropic.claude-3-7-sonnet-20250219-v1:0" + if "claude-4-sonnet" in model_lower: + return "anthropic.claude-sonnet-4-20250514-v1:0" + if "claude-4.5-sonnet" in model_lower: + return "anthropic.claude-sonnet-4-5-20250929-v1:0" + if "claude-4.5-opus" in model_lower: + return "anthropic.claude-opus-4-5-20251101-v1:0" + + return model_lower + class ModelFilter: """ @@ -235,6 +317,12 @@ def get_model_info(model_name: str) -> ModelInfo: Returns: Dictionary containing model metadata (max_tokens, input_cost_per_token, etc.) """ + if "qwen" in model_name: + converted_model_name = "dashscope/" + model_name + else: + converted_model_name = _map_gpugeek_to_base_model(model_name) + if converted_model_name: + model_name = converted_model_name try: info = litellm.get_model_info(model_name) return info @@ -300,6 +388,11 @@ def _get_provider_filter(provider_type: str) -> Callable[[str], bool]: ModelFilter.version_filter(min_version=2.5), ModelFilter.no_slash_filter(), ), + "qwen": ModelFilter.combined_filter( + ModelFilter.no_date_suffix_filter(), + ModelFilter.substring_filter("qwen"), + # ModelFilter.no_slash_filter(), + ), } # Return the filter or a default that accepts all @@ -311,7 +404,7 @@ def get_models_by_provider(provider_type: str) -> list[ModelInfo]: Get all models for a specific provider type with their metadata. Args: - provider_type: The provider type (e.g., 'openai', 'azure_openai', 'google') + provider_type: The provider type (e.g., 'openai', 'azure_openai', 'google', 'gpugeek') Returns: List of ModelInfo objects with model metadata @@ -319,11 +412,53 @@ def get_models_by_provider(provider_type: str) -> list[ModelInfo]: models: list[ModelInfo] = [] logger.debug(f"Provider type: {provider_type}") + # Handle GPUGeek provider with manual model list + if provider_type == "gpugeek": + for model_name in GPUGEEK_MODELS: + # Try to get pricing from base model + base_model = _map_gpugeek_to_base_model(model_name) + + # Default model info + model_data: dict[str, Any] = { + "key": model_name, + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": True, + "supports_parallel_function_calling": True, + "supports_vision": "image" in model_name.lower(), + "supported_openai_params": None, + } + + # Try to get real pricing from LiteLLM if we have a base model mapping + if base_model: + try: + base_info = litellm.model_cost.get(base_model) + if base_info: + # Update with real pricing data + model_data["input_cost_per_token"] = base_info.get("input_cost_per_token", 0.0) + model_data["output_cost_per_token"] = base_info.get("output_cost_per_token", 0.0) + model_data["max_tokens"] = base_info.get("max_tokens", 4096) + model_data["max_input_tokens"] = base_info.get("max_input_tokens", 128000) + model_data["max_output_tokens"] = base_info.get("max_output_tokens", 4096) + logger.debug(f"Mapped {model_name} -> {base_model} for pricing") + except Exception as e: + logger.warning(f"Failed to get pricing for {model_name} (base: {base_model}): {e}") + + models.append(cast(ModelInfo, model_data)) + logger.debug(f"Returning {len(models)} manually configured models for GPUGeek") + return models + provider_type_mapping = { "openai": "openai", "azure_openai": "azure", "google": "google", "google_vertex": "vertex_ai", + "qwen": "dashscope", } litellm_provider_type = provider_type_mapping.get(provider_type) @@ -344,6 +479,24 @@ def get_models_by_provider(provider_type: str) -> list[ModelInfo]: # Add supported_openai_params if missing (required by ModelInfo) if "supported_openai_params" not in model_data: model_data["supported_openai_params"] = None + + # Handle tiered pricing - extract the first tier as default pricing + if "tiered_pricing" in model_data and isinstance(model_data.get("tiered_pricing"), list): + tiered = model_data["tiered_pricing"] + if tiered and len(tiered) > 0: + first_tier = tiered[0] + # Add flat pricing fields from first tier if not present + if "input_cost_per_token" not in model_data: + model_data["input_cost_per_token"] = first_tier.get("input_cost_per_token", 0.0) + if "output_cost_per_token" not in model_data: + model_data["output_cost_per_token"] = first_tier.get("output_cost_per_token", 0.0) + + # Ensure required pricing fields exist (fallback to 0.0 if not present) + if "input_cost_per_token" not in model_data: + model_data["input_cost_per_token"] = 0.0 + if "output_cost_per_token" not in model_data: + model_data["output_cost_per_token"] = 0.0 + # Add supports_web_search for models that support built-in web search if provider_type in ["google", "google_vertex"] and "gemini" in model_name.lower(): # Gemini 2.0 and later support built-in web search @@ -364,7 +517,7 @@ def get_all_providers_with_models() -> dict[str, list[ModelInfo]]: Returns: Dictionary mapping provider type to list of ModelInfo """ - provider_types = ["openai", "azure_openai", "google", "google_vertex"] + provider_types = ["openai", "azure_openai", "google", "google_vertex", "gpugeek", "qwen"] result: dict[str, list[ModelInfo]] = {} for provider_type in provider_types: diff --git a/service/app/core/providers/factory.py b/service/app/core/providers/factory.py index 4802647d..e2a3451e 100644 --- a/service/app/core/providers/factory.py +++ b/service/app/core/providers/factory.py @@ -4,6 +4,7 @@ from langchain_core.language_models import BaseChatModel from langchain_google_genai import ChatGoogleGenerativeAI from langchain_openai import AzureChatOpenAI, ChatOpenAI +from langchain_qwq import ChatQwen from app.common.code import ErrCode from app.core.llm.service import LiteLLMService @@ -44,6 +45,12 @@ def create( case ProviderType.GOOGLE_VERTEX: logger.info(f"Creating Google Vertex model {model}") llm = self._create_google_vertex(model, credentials, runtime_kwargs) + case ProviderType.GPUGEEK: + logger.info(f"Creating GPUGeek model {model}") + llm = self._create_gpugeek(model, credentials, runtime_kwargs) + case ProviderType.QWEN: + logger.info(f"Creating Qwen model {model}") + llm = self._create_qwen(model, credentials, runtime_kwargs) return ModelInstance(llm=llm, config=config) @@ -104,7 +111,6 @@ def _create_google(self, model: str, credentials: LLMCredentials, runtime_kwargs # Extract google_search_enabled from runtime_kwargs google_search_enabled = runtime_kwargs.pop("google_search_enabled", False) - # Create the base model llm = ChatGoogleGenerativeAI( model=model, google_api_key=credentials["api_key"], @@ -161,3 +167,65 @@ def _create_google_vertex( llm = cast(BaseChatModel, llm.bind_tools([{"google_search": {}}])) return llm + + def _create_gpugeek(self, model: str, credentials: LLMCredentials, runtime_kwargs: dict[str, Any]) -> BaseChatModel: + """ + Create GPUGeek model instance using OpenAI-compatible API. + + GPUGeek provides an OpenAI-compatible endpoint that supports multiple model vendors. + """ + # Get base_url from credentials, default to GPUGeek endpoint + web_search_enabled = runtime_kwargs.pop("google_search_enabled", False) + base_url = credentials.get("api_endpoint", "https://api.gpugeek.com/v1") + if "image" in model.lower(): + base_url = "https://api.gpugeek.com/v1/predictions" + + if "deepseek-r1" in model.lower(): + llm = ChatOpenAI( + model=model, + api_key=credentials["api_key"], + base_url=base_url, + extra_body={"thinking": {"type": "enabled"}}, + **runtime_kwargs, + ) + else: + llm = ChatOpenAI( + model=model, + api_key=credentials["api_key"], + base_url=base_url, + **runtime_kwargs, + ) + + if web_search_enabled: + logger.info(f"Enabling native web search for OpenAI model {model}") + llm = cast(BaseChatModel, llm.bind_tools([{"type": "web_search_preview"}])) + + return llm + + def _create_qwen(self, model: str, credentials: LLMCredentials, runtime_kwargs: dict[str, Any]) -> BaseChatModel: + """ + Create Qwen model instance. + + Qwen provides OpenAI-compatible API through DashScope. + For vision models, we use langchain-qwq's ChatQwen integration. + """ + if "dashscope" in model: + model = model.replace("dashscope/", "") + # Extract generic search flag + web_search_enabled = runtime_kwargs.pop("google_search_enabled", False) + + # Get base_url from credentials, default to DashScope endpoint + base_url = credentials.get("api_endpoint", "https://dashscope.aliyuncs.com/compatible-mode/v1") + + llm = ChatQwen( + model=model, + api_key=credentials["api_key"], + base_url=base_url, + **runtime_kwargs, + ) + + if web_search_enabled: + logger.info(f"Enabling native web search for Qwen model {model}") + llm = cast(BaseChatModel, llm.bind_tools([{"type": "web_search_preview"}])) + + return llm diff --git a/service/app/core/providers/startup.py b/service/app/core/providers/startup.py index ab5bec9e..8accb10e 100644 --- a/service/app/core/providers/startup.py +++ b/service/app/core/providers/startup.py @@ -32,6 +32,8 @@ async def ensure_system_providers(self, llm_config: LLMConfig) -> list[Provider] "google_vertex": "GoogleVertex", "openai": "OpenAI", "google": "Google", + "gpugeek": "GPUGeek", + "qwen": "Qwen", }.get(provider_type.value, provider_type.value) existing = await self.repo.get_system_provider_by_type(provider_type) diff --git a/service/app/models/message.py b/service/app/models/message.py index e3a8f62e..0a6be71d 100644 --- a/service/app/models/message.py +++ b/service/app/models/message.py @@ -16,6 +16,8 @@ class MessageBase(SQLModel): role: str content: str topic_id: UUID = Field(index=True) + # Thinking/reasoning content from models like Claude, DeepSeek R1, Gemini 3 + thinking_content: str | None = None class Message(MessageBase, table=True): @@ -63,3 +65,4 @@ class MessageReadWithFilesAndCitations(MessageBase): class MessageUpdate(SQLModel): role: str | None = None content: str | None = None + thinking_content: str | None = None diff --git a/service/app/repos/message.py b/service/app/repos/message.py index 4a706e0a..f8350e74 100644 --- a/service/app/repos/message.py +++ b/service/app/repos/message.py @@ -395,6 +395,7 @@ async def get_messages_with_files_and_citations( created_at=message.created_at, attachments=file_reads_with_urls, citations=citations, + thinking_content=message.thinking_content, ) messages_with_files_and_citations.append(message_with_files_and_citations) diff --git a/service/app/schemas/chat_event_types.py b/service/app/schemas/chat_event_types.py index a0ceb2f0..2023d889 100644 --- a/service/app/schemas/chat_event_types.py +++ b/service/app/schemas/chat_event_types.py @@ -150,6 +150,25 @@ class InsufficientBalanceData(TypedDict): action_required: str +class ThinkingStartData(TypedDict): + """Data payload for THINKING_START event.""" + + id: str + + +class ThinkingChunkData(TypedDict): + """Data payload for THINKING_CHUNK event.""" + + id: str + content: str + + +class ThinkingEndData(TypedDict): + """Data payload for THINKING_END event.""" + + id: str + + # ============================================================================= # Full Event Structures (type + data) # ============================================================================= @@ -253,6 +272,27 @@ class InsufficientBalanceEvent(TypedDict): data: InsufficientBalanceData +class ThinkingStartEvent(TypedDict): + """Full event structure for thinking start.""" + + type: Literal[ChatEventType.THINKING_START] + data: ThinkingStartData + + +class ThinkingChunkEvent(TypedDict): + """Full event structure for thinking chunk.""" + + type: Literal[ChatEventType.THINKING_CHUNK] + data: ThinkingChunkData + + +class ThinkingEndEvent(TypedDict): + """Full event structure for thinking end.""" + + type: Literal[ChatEventType.THINKING_END] + data: ThinkingEndData + + # ============================================================================= # Union type for generic event handling # ============================================================================= @@ -273,6 +313,9 @@ class InsufficientBalanceEvent(TypedDict): | MessageSavedEvent | MessageEvent | InsufficientBalanceEvent + | ThinkingStartEvent + | ThinkingChunkEvent + | ThinkingEndEvent ) @@ -294,6 +337,9 @@ class InsufficientBalanceEvent(TypedDict): "MessageSavedData", "MessageData", "InsufficientBalanceData", + "ThinkingStartData", + "ThinkingChunkData", + "ThinkingEndData", # Event types "StreamingStartEvent", "StreamingChunkEvent", @@ -309,6 +355,9 @@ class InsufficientBalanceEvent(TypedDict): "MessageSavedEvent", "MessageEvent", "InsufficientBalanceEvent", + "ThinkingStartEvent", + "ThinkingChunkEvent", + "ThinkingEndEvent", # Union "StreamingEvent", ] diff --git a/service/app/schemas/chat_events.py b/service/app/schemas/chat_events.py index ad5f3adf..5207ee45 100644 --- a/service/app/schemas/chat_events.py +++ b/service/app/schemas/chat_events.py @@ -47,6 +47,11 @@ class ChatEventType(StrEnum): # Balance/billing events INSUFFICIENT_BALANCE = "insufficient_balance" + # Thinking/reasoning content (for models like Claude, DeepSeek R1, OpenAI o1) + THINKING_START = "thinking_start" + THINKING_CHUNK = "thinking_chunk" + THINKING_END = "thinking_end" + class ChatClientEventType(StrEnum): """Client -> Server event types (messages coming from the frontend).""" diff --git a/service/app/schemas/provider.py b/service/app/schemas/provider.py index d1f70511..e0aeac80 100644 --- a/service/app/schemas/provider.py +++ b/service/app/schemas/provider.py @@ -17,6 +17,8 @@ class ProviderType(StrEnum): AZURE_OPENAI = "azure_openai" GOOGLE = "google" GOOGLE_VERTEX = "google_vertex" + GPUGEEK = "gpugeek" + QWEN = "qwen" class LLMCredentials(TypedDict): diff --git a/service/app/tasks/chat.py b/service/app/tasks/chat.py index a5be161f..7daf5677 100644 --- a/service/app/tasks/chat.py +++ b/service/app/tasks/chat.py @@ -145,6 +145,7 @@ async def _process_chat_message_async( ai_message_id = None ai_message_obj: Message | None = None full_content = "" + full_thinking_content = "" # Track thinking content for persistence citations_data: List[CitationData] = [] generated_files_count = 0 @@ -286,6 +287,23 @@ async def _process_chat_message_async( elif stream_event["type"] == ChatEventType.ERROR: await publisher.publish(json.dumps(stream_event)) break + + # Handle thinking events + elif stream_event["type"] == ChatEventType.THINKING_START: + # Create message object if not exists + if not ai_message_obj: + ai_message_create = MessageCreate(role="assistant", content="", topic_id=topic_id) + ai_message_obj = await message_repo.create_message(ai_message_create) + await publisher.publish(json.dumps(stream_event)) + + elif stream_event["type"] == ChatEventType.THINKING_CHUNK: + chunk_content = stream_event["data"].get("content", "") + full_thinking_content += chunk_content + await publisher.publish(json.dumps(stream_event)) + + elif stream_event["type"] == ChatEventType.THINKING_END: + await publisher.publish(json.dumps(stream_event)) + else: await publisher.publish(json.dumps(stream_event)) @@ -296,6 +314,11 @@ async def _process_chat_message_async( ai_message_obj.content = full_content db.add(ai_message_obj) + # Update thinking content + if full_thinking_content: + ai_message_obj.thinking_content = full_thinking_content + db.add(ai_message_obj) + # Save citations if citations_data: try: diff --git a/service/migrations/versions/03630403f8c2_add_thinking_content_to_message.py b/service/migrations/versions/03630403f8c2_add_thinking_content_to_message.py new file mode 100644 index 00000000..6a9b0356 --- /dev/null +++ b/service/migrations/versions/03630403f8c2_add_thinking_content_to_message.py @@ -0,0 +1,34 @@ +"""add_thinking_content_to_message + +Revision ID: 03630403f8c2 +Revises: 70ee7fb4d40b +Create Date: 2026-01-04 15:46:02.555769 + +""" + +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +import sqlmodel + + +# revision identifiers, used by Alembic. +revision: str = "03630403f8c2" +down_revision: Union[str, Sequence[str], None] = "70ee7fb4d40b" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Upgrade schema.""" + # ### commands auto generated by Alembic - please adjust! ### + op.add_column("message", sa.Column("thinking_content", sqlmodel.sql.sqltypes.AutoString(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade() -> None: + """Downgrade schema.""" + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("message", "thinking_content") + # ### end Alembic commands ### diff --git a/service/migrations/versions/d25101ce4d9a_add_gpugeek_and_qwen.py b/service/migrations/versions/d25101ce4d9a_add_gpugeek_and_qwen.py new file mode 100644 index 00000000..06b21a18 --- /dev/null +++ b/service/migrations/versions/d25101ce4d9a_add_gpugeek_and_qwen.py @@ -0,0 +1,68 @@ +"""add_gpugeek_and_qwen + +Revision ID: d25101ce4d9a +Revises: 03630403f8c2 +Create Date: 2026-01-05 19:31:01.741842 + +""" + +from typing import Sequence, Union + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "d25101ce4d9a" +down_revision: Union[str, Sequence[str], None] = "03630403f8c2" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Upgrade schema.""" + # Add 'gpugeek' and 'qwen' to the providertype enum + # PostgreSQL requires ALTER TYPE to add new enum values + bind = op.get_bind() + if bind.dialect.name == "postgresql": + # Add gpugeek if it doesn't exist + op.execute( + """ + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_enum + WHERE enumlabel = 'gpugeek' + AND enumtypid = (SELECT oid FROM pg_type WHERE typname = 'providertype') + ) THEN + ALTER TYPE providertype ADD VALUE 'gpugeek'; + END IF; + END$$; + """ + ) + + # Add qwen if it doesn't exist + op.execute( + """ + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_enum + WHERE enumlabel = 'qwen' + AND enumtypid = (SELECT oid FROM pg_type WHERE typname = 'providertype') + ) THEN + ALTER TYPE providertype ADD VALUE 'qwen'; + END IF; + END$$; + """ + ) + + +def downgrade() -> None: + """Downgrade schema.""" + # Note: PostgreSQL does not support removing enum values directly + # This would require recreating the enum type, which is complex + # For safety, we'll leave the enum values in place + # If you need to remove them, you'll need to: + # 1. Create a new enum without these values + # 2. Migrate all data + # 3. Drop the old enum and rename the new one + pass diff --git a/service/pyproject.toml b/service/pyproject.toml index f0fdf653..9f51a16c 100644 --- a/service/pyproject.toml +++ b/service/pyproject.toml @@ -40,6 +40,7 @@ dependencies = [ "python-pptx>=0.6.23", "websockets>=13.0,<14.0", "reportlab>=4.4.7", + "langchain-qwq>=0.3.1", ] [dependency-groups] @@ -57,6 +58,7 @@ dev = [ "aiosqlite>=0.20.0", "pytest-xdist>=3.8.0", "polyfactory>=3.2.0", + "watchdog>=3.0.0", ] [tool.ruff] diff --git a/service/pyright_output.txt b/service/pyright_output.txt deleted file mode 100644 index 986cd0fb..00000000 --- a/service/pyright_output.txt +++ /dev/null @@ -1,440 +0,0 @@ -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/content_utils.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/content_utils.py:45:43 - warning: Argument type is partially unknown -   Argument corresponds to parameter "content" in function "_extract_from_dict_content" -   Argument type is "dict[Unknown, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/content_utils.py:164:47 - warning: Argument type is partially unknown -   Argument corresponds to parameter "content" in function "_extract_from_dict_content" -   Argument type is "dict[Unknown, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/content_utils.py:176:43 - warning: Argument type is partially unknown -   Argument corresponds to parameter "content" in function "_extract_from_dict_content" -   Argument type is "dict[Unknown, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/content_utils.py:207:22 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "dict[Unknown, Unknown]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:102:40 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:102:66 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:104:61 - warning: Argument type is unknown -   Argument corresponds to parameter "s" in function "b64encode" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:114:42 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:114:92 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:117:58 - warning: Argument type is unknown -   Argument corresponds to parameter "object" in function "append" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:392:53 - warning: Argument type is unknown -   Argument corresponds to parameter "args" in function "debug" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:396:66 - warning: Argument type is unknown -   Argument corresponds to parameter "args" in function "debug" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:396:81 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "len" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:404:32 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:405:78 - warning: Argument type is unknown -   Argument corresponds to parameter "args" in function "debug" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:405:89 - warning: Argument type is unknown -   Argument corresponds to parameter "args" in function "debug" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:409:33 - warning: Argument type is unknown -   Argument corresponds to parameter "args" in function "debug" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:410:33 - warning: Argument type is unknown -   Argument corresponds to parameter "args" in function "debug" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:425:34 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:426:48 - warning: Argument type is unknown -   Argument corresponds to parameter "o" in function "getattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:431:29 - warning: Argument type is unknown -   Argument corresponds to parameter "args" in function "debug" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:434:85 - warning: Argument type is unknown -   Argument corresponds to parameter "tool_name" in function "format_tool_result" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:434:85 - error: "tool_call" is possibly unbound (reportPossiblyUnboundVariable) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:446:33 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:448:42 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:525:55 - warning: Argument type is unknown -   Argument corresponds to parameter "object" in function "__new__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:540:36 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:561:65 - warning: Argument type is unknown -   Argument corresponds to parameter "iterable" in function "__new__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:618:63 - warning: Argument type is unknown -   Argument corresponds to parameter "element" in function "add" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:619:73 - warning: Argument type is unknown -   Argument corresponds to parameter "object" in function "append" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:623:65 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:640:65 - warning: Argument type is partially unknown -   Argument corresponds to parameter "args" in function "debug" -   Argument type is "tuple[Unknown, ...]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:644:28 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:653:29 - warning: Argument type is unknown -   Argument corresponds to parameter "args" in function "debug" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:654:29 - warning: Argument type is unknown -   Argument corresponds to parameter "args" in function "debug" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:655:29 - warning: Argument type is unknown -   Argument corresponds to parameter "args" in function "debug" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:664:25 - warning: Argument type is partially unknown -   Argument corresponds to parameter "args" in function "debug" -   Argument type is "Unknown | None" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:665:25 - warning: Argument type is partially unknown -   Argument corresponds to parameter "args" in function "debug" -   Argument type is "Unknown | None" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:666:25 - warning: Argument type is partially unknown -   Argument corresponds to parameter "args" in function "debug" -   Argument type is "Unknown | None" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:681:30 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:682:42 - warning: Argument type is unknown -   Argument corresponds to parameter "o" in function "getattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:683:30 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:684:42 - warning: Argument type is unknown -   Argument corresponds to parameter "o" in function "getattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:688:42 - warning: Argument type is unknown -   Argument corresponds to parameter "object" in function "__new__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:724:21 - warning: Argument type is partially unknown -   Argument corresponds to parameter "args" in function "info" -   Argument type is "Unknown | Literal[0]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:725:21 - warning: Argument type is partially unknown -   Argument corresponds to parameter "args" in function "info" -   Argument type is "Unknown | Literal[0]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langchain.py:726:21 - warning: Argument type is partially unknown -   Argument corresponds to parameter "args" in function "info" -   Argument type is "Unknown | Literal[0]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py:63:15 - warning: Return type, "CompiledStateGraph[Unknown, None, Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py:234:9 - warning: Type of parameter "compiled_graph" is partially unknown -   Parameter type is "CompiledStateGraph[Unknown, None, Unknown, Unknown]" (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py:297:29 - warning: Argument type is partially unknown -   Argument corresponds to parameter "args" in function "debug" -   Argument type is "Unknown | None" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py:310:34 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py:311:46 - warning: Argument type is unknown -   Argument corresponds to parameter "o" in function "getattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py:312:34 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py:313:46 - warning: Argument type is unknown -   Argument corresponds to parameter "o" in function "getattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py:317:46 - warning: Argument type is unknown -   Argument corresponds to parameter "object" in function "__new__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py:471:46 - warning: Argument type is partially unknown -   Argument corresponds to parameter "input" in function "ainvoke" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/langgraph.py:568:90 - warning: Argument type is partially unknown -   Argument corresponds to parameter "input_state" in function "execute_graph_agent_sync" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/multimodal.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/multimodal.py:134:49 - warning: Argument type is unknown -   Argument corresponds to parameter "s" in function "b64encode" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/multimodal.py:148:49 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/multimodal.py:315:79 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:62:37 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:68:63 - warning: Argument type is unknown -   Argument corresponds to parameter "server_id" in function "async_check_mcp_server_status" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:69:31 - warning: Argument type is unknown -   Argument corresponds to parameter "coros_or_futures" in function "gather" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:75:68 - warning: Argument type is unknown -   Argument corresponds to parameter "server_id" in function "get_mcp_server_by_id" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:90:34 - warning: Argument type is partially unknown -   Argument corresponds to parameter "object" in function "append" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:92:62 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:179:91 - warning: Argument type is partially unknown -   Argument corresponds to parameter "args_dict" in function "call_mcp_tool" -   Argument type is "Any | dict[Unknown, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:222:68 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:224:76 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:225:45 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "join" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:226:63 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:230:28 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:233:39 - warning: Argument type is unknown -   Argument corresponds to parameter "object" in function "__new__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:240:24 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:243:32 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "hasattr" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:243:72 - warning: Argument type is partially unknown -   Argument corresponds to parameter "object" in function "__new__" -   Argument type is "type[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:243:77 - warning: Argument type is unknown -   Argument corresponds to parameter "o" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:244:67 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:260:65 - warning: Argument type is unknown -   Argument corresponds to parameter "object" in function "append" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:267:52 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "join" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:284:31 - warning: Argument type is partially unknown -   Argument corresponds to parameter "object" in function "__new__" -   Argument type is "list[Unknown] | Any" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:287:31 - warning: Argument type is partially unknown -   Argument corresponds to parameter "s" in function "loads" -   Argument type is "Unknown | str" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:299:48 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "__init__" -   Argument type is "dict_items[Unknown, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:301:32 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "dict[Unknown, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:302:57 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "dict[Unknown, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:305:29 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "__init__" -   Argument type is "dict_keys[Unknown, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:308:71 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:313:16 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "Unknown | str" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/core/chat/tools.py:315:48 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "Unknown | str" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/api/v1/knowledge_sets.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/api/v1/knowledge_sets.py:218:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/api/v1/knowledge_sets.py:334:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/api/v1/knowledge_sets.py:383:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/api/v1/redemption.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/api/v1/redemption.py:566:11 - warning: Return type, "list[Unknown]", is partially unknown (reportUnknownParameterType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/__init__.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/__init__.py:295:55 - warning: Argument type is unknown -   Argument corresponds to parameter "name" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/__init__.py:295:55 - warning: Argument type is unknown -   Argument corresponds to parameter "description" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/__init__.py:295:55 - warning: Argument type is unknown -   Argument corresponds to parameter "state_schema" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/__init__.py:295:55 - warning: Argument type is unknown -   Argument corresponds to parameter "parent_agent_id" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/__init__.py:295:55 - warning: Argument type is unknown -   Argument corresponds to parameter "is_published" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/__init__.py:295:55 - warning: Argument type is unknown -   Argument corresponds to parameter "is_official" in function "__init__" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/base_graph_agent.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/base_graph_agent.py:54:9 - warning: Return type, "CompiledStateGraph[Unknown, None, Unknown, Unknown]", is partially unknown (reportUnknownParameterType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/scientific_figure_agent.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/scientific_figure_agent.py:55:9 - warning: Return type, "CompiledStateGraph[Unknown, None, Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/scientific_figure_agent.py:164:42 - warning: Argument type is partially unknown -   Argument corresponds to parameter "input" in function "ainvoke" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/scientific_figure_agent.py:169:40 - warning: Argument type is partially unknown -   Argument corresponds to parameter "object" in function "__new__" -   Argument type is "list[str | dict[Unknown, Unknown]]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/scientific_figure_agent.py:300:42 - warning: Argument type is partially unknown -   Argument corresponds to parameter "input" in function "ainvoke" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/scientific_figure_agent.py:305:40 - warning: Argument type is partially unknown -   Argument corresponds to parameter "object" in function "__new__" -   Argument type is "list[str | dict[Unknown, Unknown]]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/scientific_figure_agent.py:395:42 - warning: Argument type is partially unknown -   Argument corresponds to parameter "input" in function "ainvoke" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/builtin_agents/scientific_figure_agent.py:400:40 - warning: Argument type is partially unknown -   Argument corresponds to parameter "object" in function "__new__" -   Argument type is "list[str | dict[Unknown, Unknown]]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/dify.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/dify.py:15:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/dify.py:48:18 - warning: Argument type is partially unknown -   Argument corresponds to parameter "json" in function "post" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/dify.py:50:20 - warning: Argument type is unknown -   Argument corresponds to parameter "stream" in function "post" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/dify.py:103:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/dify.py:135:18 - warning: Argument type is partially unknown -   Argument corresponds to parameter "json" in function "post" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/dify.py:137:20 - warning: Argument type is unknown -   Argument corresponds to parameter "stream" in function "post" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/dynamic_mcp_server.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/dynamic_mcp_server.py:65:30 - warning: Argument type is unknown -   Argument corresponds to parameter "backend" in function "as_proxy" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py:81:42 - warning: Argument type is unknown -   Argument corresponds to parameter "data" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py:88:32 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "join" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py:127:26 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "join" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py:170:26 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "join" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py:189:20 - error: "append" is not a known attribute of "None" (reportOptionalMemberAccess) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py:216:39 - warning: Argument type is unknown -   Argument corresponds to parameter "object" in function "append" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py:216:45 - error: Cannot access attribute "text" for class "BaseShape" -   Attribute "text" is unknown (reportAttributeAccessIssue) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py:218:26 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "join" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py:243:25 - error: "text" is not a known attribute of "None" (reportOptionalMemberAccess) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/file_handlers.py:245:28 - error: Cannot assign to attribute "text" for class "BaseShape" -   Attribute "text" is unknown (reportAttributeAccessIssue) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/graph_tools.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/graph_tools.py:528:78 - warning: Argument type is partially unknown -   Argument corresponds to parameter "input_state" in function "execute_graph_agent_sync" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/graph_tools.py:723:34 - warning: Argument type is unknown -   Argument corresponds to parameter "from_node_id" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/graph_tools.py:724:32 - warning: Argument type is unknown -   Argument corresponds to parameter "to_node_id" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/graph_tools.py:1152:48 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "set[Unknown]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/knowledge.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/knowledge.py:191:29 - error: Argument of type "TextContent" cannot be assigned to parameter "object" of type "Image" in function "append" -   "TextContent" is not assignable to "Image" (reportArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/lab.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/lab.py:72:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/lab.py:124:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/lab.py:191:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/lab.py:255:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/lab.py:325:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/lab.py:406:55 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "__init__" -   Argument type is "dict_keys[str, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/lab.py:409:61 - warning: Argument type is partially unknown -   Argument corresponds to parameter "json" in function "post" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/lab.py:567:62 - warning: Argument type is partially unknown -   Argument corresponds to parameter "params" in function "get" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/lab.py:732:62 - warning: Argument type is partially unknown -   Argument corresponds to parameter "json" in function "patch" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/osdl.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/osdl.py:58:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/osdl.py:127:61 - warning: Argument type is partially unknown -   Argument corresponds to parameter "json" in function "post" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/osdl.py:152:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/osdl.py:228:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/osdl.py:265:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/osdl.py:306:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/osdl.py:344:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/mcp/osdl.py:360:11 - warning: Return type, "dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:43:35 - warning: Argument type is unknown -   Argument corresponds to parameter "object" in function "append" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:44:24 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "join" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:88:76 - warning: Type of parameter "callback" is partially unknown -   Parameter type is "(...) -> Unknown" (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:92:9 - warning: Return type, "Dict[Unknown, Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:183:70 - warning: Argument type is unknown -   Argument corresponds to parameter "tool_calls" in function "execute_tool_calls" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:188:43 - warning: Argument type is unknown -   Argument corresponds to parameter "key" in function "get" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:228:17 - warning: Variable "tool_call_id_inner" is not accessed (reportUnusedVariable) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:242:40 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "len" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:243:37 - warning: Argument type is unknown -   Argument corresponds to parameter "iterable" in function "__new__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:250:69 - warning: Argument type is unknown -   Argument corresponds to parameter "content" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:252:68 - warning: Argument type is unknown -   Argument corresponds to parameter "content" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:254:65 - warning: Argument type is unknown -   Argument corresponds to parameter "content" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:287:42 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "join" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:688:41 - warning: Argument type is unknown -   Argument corresponds to parameter "url" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:689:43 - warning: Argument type is unknown -   Argument corresponds to parameter "title" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:690:48 - warning: Argument type is unknown -   Argument corresponds to parameter "cited_text" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:691:49 - warning: Argument type is unknown -   Argument corresponds to parameter "start_index" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:692:47 - warning: Argument type is unknown -   Argument corresponds to parameter "end_index" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:693:52 - warning: Argument type is unknown -   Argument corresponds to parameter "search_queries" in function "__init__" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/handler/ws/v1/chat.py:697:89 - warning: Argument type is partially unknown -   Argument corresponds to parameter "citations_data" in function "bulk_create_citations" -   Argument type is "list[Unknown]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/auth/__init__.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/auth/__init__.py:92:77 - warning: Argument type is unknown -   Argument corresponds to parameter "obj" in function "len" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/auth/__init__.py:286:12 - warning: Condition will always evaluate to True since the types "BaseAuthProvider" and "None" have no overlap (reportUnnecessaryComparison) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/auth/casdoor.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/auth/casdoor.py:132:48 - warning: Argument type is partially unknown -   Argument corresponds to parameter "data" in function "post" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:33:43 - warning: "_connect" is protected and used outside of the class in which it is declared (reportPrivateUsage) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:35:62 - warning: Type of parameter "call_next" is partially unknown -   Parameter type is "(...) -> Unknown" (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:53:49 - warning: "_tool_ownership" is protected and used outside of the class in which it is declared (reportPrivateUsage) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:54:50 - warning: "_tool_ownership" is protected and used outside of the class in which it is declared (reportPrivateUsage) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:100:20 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "hasattr" -   Argument type is "ToolResult | Unknown" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:103:43 - warning: Argument type is partially unknown -   Argument corresponds to parameter "o" in function "getattr" -   Argument type is "TextContent | ImageContent | AudioContent | ResourceLink | EmbeddedResource | Unknown" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:103:74 - warning: Argument type is partially unknown -   Argument corresponds to parameter "o" in function "__init__" -   Argument type is "TextContent | ImageContent | AudioContent | ResourceLink | EmbeddedResource | Unknown" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:104:47 - warning: Argument type is partially unknown -   Argument corresponds to parameter "o" in function "__init__" -   Argument type is "ToolResult | Unknown" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:104:70 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[TextContent | ImageContent | AudioContent | ResourceLink | EmbeddedResource] | Unknown" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:106:47 - warning: Argument type is partially unknown -   Argument corresponds to parameter "o" in function "__init__" -   Argument type is "ToolResult | Unknown" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:106:70 - warning: Argument type is partially unknown -   Argument corresponds to parameter "obj" in function "len" -   Argument type is "list[TextContent | ImageContent | AudioContent | ResourceLink | EmbeddedResource] | Unknown" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:108:43 - warning: Argument type is partially unknown -   Argument corresponds to parameter "o" in function "__init__" -   Argument type is "ToolResult | Unknown" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:111:15 - warning: Return type, "List[Unknown]", is partially unknown (reportUnknownParameterType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/middleware/dynamic_mcp_server.py:111:63 - warning: Type of parameter "call_next" is partially unknown -   Parameter type is "(...) -> Unknown" (reportUnknownParameterType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/tests/integration/test_handlers/test_marketplace_features.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/tests/integration/test_handlers/test_marketplace_features.py:36:14 - warning: Argument type is partially unknown -   Argument corresponds to parameter "json" in function "post" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/tests/integration/test_handlers/test_marketplace_features.py:102:14 - warning: Argument type is partially unknown -   Argument corresponds to parameter "json" in function "post" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/tests/integration/test_handlers/test_marketplace_features.py:116:14 - warning: Argument type is partially unknown -   Argument corresponds to parameter "json" in function "patch" -   Argument type is "dict[str, Unknown]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/tests/integration/test_handlers/test_provider_api.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/tests/integration/test_handlers/test_provider_api.py:44:21 - warning: Argument type is partially unknown -   Argument corresponds to parameter "iterable" in function "any" -   Argument type is "Generator[Unknown, None, None]" (reportUnknownArgumentType) -/Users/xinquiry/Projects/ScienceOL/Xyzen/service/tests/unit/handler/mcp/test_knowledge_limits.py - /Users/xinquiry/Projects/ScienceOL/Xyzen/service/tests/unit/handler/mcp/test_knowledge_limits.py:44:86 - warning: Unnecessary "# type: ignore" comment (reportUnnecessaryTypeIgnoreComment) -6 errors, 182 warnings, 0 informations diff --git a/service/tests/factories/file.py b/service/tests/factories/file.py new file mode 100644 index 00000000..1b586d15 --- /dev/null +++ b/service/tests/factories/file.py @@ -0,0 +1,24 @@ +from polyfactory.factories.pydantic_factory import ModelFactory + +from app.models.file import File, FileCreate + + +class FileFactory(ModelFactory[File]): + """Factory for File model.""" + + __model__ = File + + +class FileCreateFactory(ModelFactory[FileCreate]): + """Factory for FileCreate schema.""" + + __model__ = FileCreate + + scope = "private" + category = "documents" + is_deleted = False + status = "pending" + message_id = None + folder_id = None + metainfo = None + file_hash = None diff --git a/service/tests/factories/message.py b/service/tests/factories/message.py new file mode 100644 index 00000000..84613278 --- /dev/null +++ b/service/tests/factories/message.py @@ -0,0 +1,22 @@ +from uuid import uuid4 + +from polyfactory import Use +from polyfactory.factories.pydantic_factory import ModelFactory + +from app.models.message import Message, MessageCreate + + +class MessageFactory(ModelFactory[Message]): + """Factory for Message model.""" + + __model__ = Message + + +class MessageCreateFactory(ModelFactory[MessageCreate]): + """Factory for MessageCreate schema.""" + + __model__ = MessageCreate + + role = "user" + topic_id = Use(uuid4) + thinking_content = None diff --git a/service/tests/factories/session.py b/service/tests/factories/session.py new file mode 100644 index 00000000..08b5f30d --- /dev/null +++ b/service/tests/factories/session.py @@ -0,0 +1,21 @@ +from polyfactory.factories.pydantic_factory import ModelFactory + +from app.models.sessions import Session, SessionCreate + + +class SessionFactory(ModelFactory[Session]): + """Factory for Session model.""" + + __model__ = Session + + +class SessionCreateFactory(ModelFactory[SessionCreate]): + """Factory for SessionCreate schema.""" + + __model__ = SessionCreate + + is_active = True + agent_id = None + provider_id = None + model = None + google_search_enabled = False diff --git a/service/tests/factories/topic.py b/service/tests/factories/topic.py new file mode 100644 index 00000000..3a9a9769 --- /dev/null +++ b/service/tests/factories/topic.py @@ -0,0 +1,21 @@ +from uuid import uuid4 + +from polyfactory import Use +from polyfactory.factories.pydantic_factory import ModelFactory + +from app.models.topic import Topic, TopicCreate + + +class TopicFactory(ModelFactory[Topic]): + """Factory for Topic model.""" + + __model__ = Topic + + +class TopicCreateFactory(ModelFactory[TopicCreate]): + """Factory for TopicCreate schema.""" + + __model__ = TopicCreate + + is_active = True + session_id = Use(uuid4) diff --git a/service/tests/integration/test_repo/test_file_repo.py b/service/tests/integration/test_repo/test_file_repo.py new file mode 100644 index 00000000..1c145032 --- /dev/null +++ b/service/tests/integration/test_repo/test_file_repo.py @@ -0,0 +1,301 @@ +from uuid import uuid4 + +import pytest +from sqlmodel.ext.asyncio.session import AsyncSession + +from app.repos.file import FileRepository +from tests.factories.file import FileCreateFactory + + +@pytest.mark.integration +class TestFileRepository: + """Integration tests for FileRepository.""" + + @pytest.fixture + def file_repo(self, db_session: AsyncSession) -> FileRepository: + return FileRepository(db_session) + + def _make_unique_storage_key(self, prefix: str = "test") -> str: + """Generate a unique storage key for tests.""" + return f"{prefix}/{uuid4().hex[:8]}/file.txt" + + async def test_create_and_get_file(self, file_repo: FileRepository): + """Test creating a file and retrieving it.""" + user_id = "test-user-file-create" + storage_key = self._make_unique_storage_key() + file_create = FileCreateFactory.build( + user_id=user_id, + storage_key=storage_key, + original_filename="test.txt", + content_type="text/plain", + file_size=1024, + ) + + # Create + created_file = await file_repo.create_file(file_create) + assert created_file.id is not None + assert created_file.user_id == user_id + assert created_file.original_filename == "test.txt" + assert created_file.storage_key == storage_key + + # Get by ID + fetched_file = await file_repo.get_file_by_id(created_file.id) + assert fetched_file is not None + assert fetched_file.id == created_file.id + + async def test_get_file_by_storage_key(self, file_repo: FileRepository): + """Test retrieving file by storage key.""" + user_id = "test-user-file-key" + storage_key = self._make_unique_storage_key("key-test") + file_create = FileCreateFactory.build( + user_id=user_id, + storage_key=storage_key, + ) + + await file_repo.create_file(file_create) + + fetched = await file_repo.get_file_by_storage_key(storage_key) + assert fetched is not None + assert fetched.storage_key == storage_key + + # Non-existent key + not_found = await file_repo.get_file_by_storage_key("non/existent/key") + assert not_found is None + + async def test_get_files_by_user(self, file_repo: FileRepository): + """Test listing files for a user.""" + user_id = "test-user-file-list" + + # Create 3 files + for i in range(3): + await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key(f"list-{i}"), + ) + ) + + # Create file for another user + await file_repo.create_file( + FileCreateFactory.build( + user_id="other-user", + storage_key=self._make_unique_storage_key("other"), + ) + ) + + files = await file_repo.get_files_by_user(user_id) + assert len(files) == 3 + for f in files: + assert f.user_id == user_id + + async def test_get_files_by_user_with_scope_filter(self, file_repo: FileRepository): + """Test filtering files by scope.""" + user_id = "test-user-file-scope" + + await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("public"), + scope="public", + ) + ) + await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("private"), + scope="private", + ) + ) + + public_files = await file_repo.get_files_by_user(user_id, scope="public") + assert len(public_files) == 1 + assert public_files[0].scope == "public" + + async def test_update_file(self, file_repo: FileRepository): + """Test updating a file.""" + user_id = "test-user-file-update" + created = await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("update"), + original_filename="old_name.txt", + ) + ) + + from app.models.file import FileUpdate + + update_data = FileUpdate(original_filename="new_name.txt") + updated = await file_repo.update_file(created.id, update_data) + + assert updated is not None + assert updated.original_filename == "new_name.txt" + + # Verify persistence + fetched = await file_repo.get_file_by_id(created.id) + assert fetched is not None + assert fetched.original_filename == "new_name.txt" + + async def test_soft_delete_and_restore_file(self, file_repo: FileRepository): + """Test soft delete and restore functionality.""" + user_id = "test-user-file-soft-delete" + created = await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("soft-del"), + ) + ) + + # Soft delete + success = await file_repo.soft_delete_file(created.id) + assert success is True + + fetched = await file_repo.get_file_by_id(created.id) + assert fetched is not None + assert fetched.is_deleted is True + assert fetched.deleted_at is not None + + # Restore + restored = await file_repo.restore_file(created.id) + assert restored is True + + fetched = await file_repo.get_file_by_id(created.id) + assert fetched is not None + assert fetched.is_deleted is False + assert fetched.deleted_at is None + + async def test_hard_delete_file(self, file_repo: FileRepository): + """Test permanent file deletion.""" + user_id = "test-user-file-hard-delete" + created = await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("hard-del"), + ) + ) + + success = await file_repo.hard_delete_file(created.id) + assert success is True + + fetched = await file_repo.get_file_by_id(created.id) + assert fetched is None + + async def test_get_files_by_hash(self, file_repo: FileRepository): + """Test deduplication lookup by file hash.""" + user_id = "test-user-file-hash" + file_hash = "abc123def456" + + # Create 2 files with same hash + await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("hash1"), + file_hash=file_hash, + ) + ) + await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("hash2"), + file_hash=file_hash, + ) + ) + + files = await file_repo.get_files_by_hash(file_hash, user_id) + assert len(files) == 2 + + async def test_get_total_size_by_user(self, file_repo: FileRepository): + """Test calculating total file size for a user.""" + user_id = "test-user-file-size" + + await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("size1"), + file_size=1000, + ) + ) + await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("size2"), + file_size=2000, + ) + ) + + total_size = await file_repo.get_total_size_by_user(user_id) + assert total_size == 3000 + + async def test_get_file_count_by_user(self, file_repo: FileRepository): + """Test counting files for a user.""" + user_id = "test-user-file-count" + + for i in range(4): + await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key(f"count-{i}"), + ) + ) + + count = await file_repo.get_file_count_by_user(user_id) + assert count == 4 + + async def test_bulk_soft_delete_by_user(self, file_repo: FileRepository): + """Test bulk soft delete with user validation.""" + user_id = "test-user-file-bulk-del" + + file1 = await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("bulk1"), + ) + ) + file2 = await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("bulk2"), + ) + ) + file3 = await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("bulk3"), + ) + ) + + count = await file_repo.bulk_soft_delete_by_user(user_id, [file1.id, file2.id]) + assert count == 2 + + # file3 should not be deleted + fetched3 = await file_repo.get_file_by_id(file3.id) + assert fetched3 is not None + assert fetched3.is_deleted is False + + async def test_update_files_message_id(self, file_repo: FileRepository): + """Test linking files to a message.""" + user_id = "test-user-file-msg-link" + message_id = uuid4() + + file1 = await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("msg1"), + status="pending", + ) + ) + file2 = await file_repo.create_file( + FileCreateFactory.build( + user_id=user_id, + storage_key=self._make_unique_storage_key("msg2"), + status="pending", + ) + ) + + count = await file_repo.update_files_message_id([file1.id, file2.id], message_id, user_id) + assert count == 2 + + # Verify files are linked and confirmed + fetched1 = await file_repo.get_file_by_id(file1.id) + assert fetched1 is not None + assert fetched1.message_id == message_id + assert fetched1.status == "confirmed" diff --git a/service/tests/integration/test_repo/test_message_repo.py b/service/tests/integration/test_repo/test_message_repo.py new file mode 100644 index 00000000..ee08161e --- /dev/null +++ b/service/tests/integration/test_repo/test_message_repo.py @@ -0,0 +1,146 @@ +import pytest +from sqlmodel.ext.asyncio.session import AsyncSession + +from app.models.topic import Topic +from app.repos.message import MessageRepository +from app.repos.session import SessionRepository +from app.repos.topic import TopicRepository +from tests.factories.message import MessageCreateFactory +from tests.factories.session import SessionCreateFactory +from tests.factories.topic import TopicCreateFactory + + +@pytest.mark.integration +class TestMessageRepository: + """Integration tests for MessageRepository.""" + + @pytest.fixture + def message_repo(self, db_session: AsyncSession) -> MessageRepository: + return MessageRepository(db_session) + + @pytest.fixture + def session_repo(self, db_session: AsyncSession) -> SessionRepository: + return SessionRepository(db_session) + + @pytest.fixture + def topic_repo(self, db_session: AsyncSession) -> TopicRepository: + return TopicRepository(db_session) + + @pytest.fixture + async def test_topic(self, session_repo: SessionRepository, topic_repo: TopicRepository): + """Create a test session and topic for message tests.""" + session = await session_repo.create_session(SessionCreateFactory.build(), "test-user-message") + topic = await topic_repo.create_topic(TopicCreateFactory.build(session_id=session.id)) + return topic + + async def test_create_and_get_message(self, message_repo: MessageRepository, test_topic: Topic): + """Test creating a message and retrieving it.""" + message_create = MessageCreateFactory.build(topic_id=test_topic.id, role="user", content="Hello!") + + # Create + created_message = await message_repo.create_message(message_create) + assert created_message.id is not None + assert created_message.content == "Hello!" + assert created_message.role == "user" + assert created_message.topic_id == test_topic.id + + # Get by ID + fetched_message = await message_repo.get_message_by_id(created_message.id) + assert fetched_message is not None + assert fetched_message.id == created_message.id + + async def test_get_messages_by_topic(self, message_repo: MessageRepository, test_topic: Topic): + """Test listing messages for a topic.""" + # Create 3 messages + await message_repo.create_message( + MessageCreateFactory.build(topic_id=test_topic.id, role="user", content="First") + ) + await message_repo.create_message( + MessageCreateFactory.build(topic_id=test_topic.id, role="assistant", content="Second") + ) + await message_repo.create_message( + MessageCreateFactory.build(topic_id=test_topic.id, role="user", content="Third") + ) + + messages = await message_repo.get_messages_by_topic(test_topic.id) + assert len(messages) == 3 + for msg in messages: + assert msg.topic_id == test_topic.id + + async def test_get_messages_by_topic_ordered(self, message_repo: MessageRepository, test_topic: Topic): + """Test messages are ordered by created_at ascending.""" + msg1 = await message_repo.create_message(MessageCreateFactory.build(topic_id=test_topic.id, content="First")) + msg2 = await message_repo.create_message(MessageCreateFactory.build(topic_id=test_topic.id, content="Second")) + + messages = await message_repo.get_messages_by_topic(test_topic.id, order_by_created=True) + assert len(messages) == 2 + assert messages[0].id == msg1.id + assert messages[1].id == msg2.id + + async def test_get_messages_by_topic_with_limit(self, message_repo: MessageRepository, test_topic: Topic): + """Test limiting number of messages returned.""" + for i in range(5): + await message_repo.create_message( + MessageCreateFactory.build(topic_id=test_topic.id, content=f"Message {i}") + ) + + messages = await message_repo.get_messages_by_topic(test_topic.id, limit=3) + assert len(messages) == 3 + + async def test_delete_message(self, message_repo: MessageRepository, test_topic: Topic): + """Test deleting a single message.""" + created = await message_repo.create_message(MessageCreateFactory.build(topic_id=test_topic.id)) + + # Delete without cascade (no files to delete) + success = await message_repo.delete_message(created.id, cascade_files=False) + assert success is True + + fetched = await message_repo.get_message_by_id(created.id) + assert fetched is None + + async def test_delete_messages_by_topic(self, message_repo: MessageRepository, test_topic: Topic): + """Test deleting all messages for a topic.""" + # Create 3 messages + for _ in range(3): + await message_repo.create_message(MessageCreateFactory.build(topic_id=test_topic.id)) + + count = await message_repo.delete_messages_by_topic(test_topic.id, cascade_files=False) + assert count == 3 + + messages = await message_repo.get_messages_by_topic(test_topic.id) + assert len(messages) == 0 + + async def test_bulk_delete_messages(self, message_repo: MessageRepository, test_topic: Topic): + """Test deleting multiple messages by ID.""" + msg1 = await message_repo.create_message(MessageCreateFactory.build(topic_id=test_topic.id)) + msg2 = await message_repo.create_message(MessageCreateFactory.build(topic_id=test_topic.id)) + msg3 = await message_repo.create_message(MessageCreateFactory.build(topic_id=test_topic.id)) + + count = await message_repo.bulk_delete_messages([msg1.id, msg2.id], cascade_files=False) + assert count == 2 + + # msg3 should still exist + assert await message_repo.get_message_by_id(msg3.id) is not None + assert await message_repo.get_message_by_id(msg1.id) is None + + @pytest.mark.parametrize("role", ["user", "assistant", "system", "tool"]) + async def test_create_message_with_different_roles( + self, message_repo: MessageRepository, test_topic: Topic, role: str + ): + """Test creating messages with different roles.""" + created = await message_repo.create_message( + MessageCreateFactory.build(topic_id=test_topic.id, role=role, content="Test") + ) + assert created.role == role + + async def test_create_message_with_thinking_content(self, message_repo: MessageRepository, test_topic: Topic): + """Test creating a message with thinking content (AI reasoning).""" + created = await message_repo.create_message( + MessageCreateFactory.build( + topic_id=test_topic.id, + role="assistant", + content="Final answer", + thinking_content="Let me think about this...", + ) + ) + assert created.thinking_content == "Let me think about this..." diff --git a/service/tests/integration/test_repo/test_session_repo.py b/service/tests/integration/test_repo/test_session_repo.py new file mode 100644 index 00000000..eb5769a3 --- /dev/null +++ b/service/tests/integration/test_repo/test_session_repo.py @@ -0,0 +1,130 @@ +import pytest +from sqlmodel.ext.asyncio.session import AsyncSession + +from app.repos.session import SessionRepository +from tests.factories.session import SessionCreateFactory + + +@pytest.mark.integration +class TestSessionRepository: + """Integration tests for SessionRepository.""" + + @pytest.fixture + def session_repo(self, db_session: AsyncSession) -> SessionRepository: + return SessionRepository(db_session) + + async def test_create_and_get_session(self, session_repo: SessionRepository): + """Test creating a session and retrieving it.""" + user_id = "test-user-session-create" + session_create = SessionCreateFactory.build() + + # Create + created_session = await session_repo.create_session(session_create, user_id) + assert created_session.id is not None + assert created_session.name == session_create.name + assert created_session.user_id == user_id + + # Get by ID + fetched_session = await session_repo.get_session_by_id(created_session.id) + assert fetched_session is not None + assert fetched_session.id == created_session.id + assert fetched_session.name == created_session.name + + async def test_get_sessions_by_user(self, session_repo: SessionRepository): + """Test listing sessions for a user.""" + user_id = "test-user-session-list" + + # Create 2 sessions for the user + await session_repo.create_session(SessionCreateFactory.build(), user_id) + await session_repo.create_session(SessionCreateFactory.build(), user_id) + + # Create session for another user + await session_repo.create_session(SessionCreateFactory.build(), "other-user") + + sessions = await session_repo.get_sessions_by_user(user_id) + assert len(sessions) == 2 + for session in sessions: + assert session.user_id == user_id + + async def test_get_session_by_user_and_agent(self, session_repo: SessionRepository): + """Test fetching session by user and agent combination.""" + user_id = "test-user-session-agent" + session_create = SessionCreateFactory.build(agent_id=None) + + await session_repo.create_session(session_create, user_id) + + # Find session with no agent + found = await session_repo.get_session_by_user_and_agent(user_id, None) + assert found is not None + assert found.user_id == user_id + assert found.agent_id is None + + async def test_update_session(self, session_repo: SessionRepository): + """Test updating a session.""" + user_id = "test-user-session-update" + created = await session_repo.create_session(SessionCreateFactory.build(), user_id) + + from app.models.sessions import SessionUpdate + + update_data = SessionUpdate(name="Updated Session Name", is_active=False) + updated = await session_repo.update_session(created.id, update_data) + + assert updated is not None + assert updated.name == "Updated Session Name" + assert updated.is_active is False + + # Verify persistence + fetched = await session_repo.get_session_by_id(created.id) + assert fetched is not None + assert fetched.name == "Updated Session Name" + + async def test_delete_session(self, session_repo: SessionRepository): + """Test deleting a session.""" + user_id = "test-user-session-delete" + created = await session_repo.create_session(SessionCreateFactory.build(), user_id) + + success = await session_repo.delete_session(created.id) + assert success is True + + fetched = await session_repo.get_session_by_id(created.id) + assert fetched is None + + async def test_delete_session_not_found(self, session_repo: SessionRepository): + """Test deleting a non-existent session.""" + from uuid import uuid4 + + success = await session_repo.delete_session(uuid4()) + assert success is False + + async def test_get_sessions_ordered_by_activity(self, session_repo: SessionRepository, db_session: AsyncSession): + """Test fetching sessions ordered by recent topic activity.""" + import asyncio + + from app.repos.topic import TopicRepository + from tests.factories.topic import TopicCreateFactory + + user_id = "test-user-session-ordered" + topic_repo = TopicRepository(db_session) + + # Create 3 sessions + session1 = await session_repo.create_session(SessionCreateFactory.build(name="Session 1"), user_id) + session2 = await session_repo.create_session(SessionCreateFactory.build(name="Session 2"), user_id) + session3 = await session_repo.create_session(SessionCreateFactory.build(name="Session 3"), user_id) + + topic1 = await topic_repo.create_topic(TopicCreateFactory.build(session_id=session1.id, name="Topic 1")) + await asyncio.sleep(0.01) + + _topic2 = await topic_repo.create_topic(TopicCreateFactory.build(session_id=session2.id, name="Topic 2")) + await asyncio.sleep(0.01) + + _topic3 = await topic_repo.create_topic(TopicCreateFactory.build(session_id=session3.id, name="Topic 3")) + + await asyncio.sleep(0.01) + await topic_repo.update_topic_timestamp(topic1.id) + + sessions = await session_repo.get_sessions_by_user_ordered_by_activity(user_id) + + assert len(sessions) == 3 + assert sessions[0].id == session1.id + assert sessions[1].id == session3.id + assert sessions[2].id == session2.id diff --git a/service/tests/integration/test_repo/test_topic_repo.py b/service/tests/integration/test_repo/test_topic_repo.py new file mode 100644 index 00000000..5ab316af --- /dev/null +++ b/service/tests/integration/test_repo/test_topic_repo.py @@ -0,0 +1,136 @@ +import pytest +from sqlmodel.ext.asyncio.session import AsyncSession + +from app.models.sessions import Session +from app.repos.session import SessionRepository +from app.repos.topic import TopicRepository +from tests.factories.session import SessionCreateFactory +from tests.factories.topic import TopicCreateFactory + + +@pytest.mark.integration +class TestTopicRepository: + """Integration tests for TopicRepository.""" + + @pytest.fixture + def topic_repo(self, db_session: AsyncSession) -> TopicRepository: + return TopicRepository(db_session) + + @pytest.fixture + def session_repo(self, db_session: AsyncSession) -> SessionRepository: + return SessionRepository(db_session) + + @pytest.fixture + async def test_session(self, session_repo: SessionRepository): + """Create a test session for topic tests.""" + return await session_repo.create_session(SessionCreateFactory.build(), "test-user-topic") + + async def test_create_and_get_topic(self, topic_repo: TopicRepository, test_session: Session): + """Test creating a topic and retrieving it.""" + topic_create = TopicCreateFactory.build(session_id=test_session.id) + + # Create + created_topic = await topic_repo.create_topic(topic_create) + assert created_topic.id is not None + assert created_topic.name == topic_create.name + assert created_topic.session_id == test_session.id + + # Get by ID + fetched_topic = await topic_repo.get_topic_by_id(created_topic.id) + assert fetched_topic is not None + assert fetched_topic.id == created_topic.id + + async def test_get_topics_by_session(self, topic_repo: TopicRepository, test_session: Session): + """Test listing topics for a session.""" + # Create 2 topics + await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id)) + await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id)) + + topics = await topic_repo.get_topics_by_session(test_session.id) + assert len(topics) == 2 + for topic in topics: + assert topic.session_id == test_session.id + + async def test_get_topics_by_session_ordered(self, topic_repo: TopicRepository, test_session: Session): + """Test listing topics ordered by updated_at.""" + await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id, name="Topic 1")) + await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id, name="Topic 2")) + + topics = await topic_repo.get_topics_by_session(test_session.id, order_by_updated=True) + assert len(topics) == 2 + # Most recently updated should be first (descending order) + assert topics[0].name == "Topic 2" + + async def test_update_topic(self, topic_repo: TopicRepository, test_session: Session): + """Test updating a topic.""" + created = await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id)) + + from app.models.topic import TopicUpdate + + update_data = TopicUpdate(name="Updated Topic Name", is_active=False) + updated = await topic_repo.update_topic(created.id, update_data) + + assert updated is not None + assert updated.name == "Updated Topic Name" + assert updated.is_active is False + + # Verify persistence + fetched = await topic_repo.get_topic_by_id(created.id) + assert fetched is not None + assert fetched.name == "Updated Topic Name" + + async def test_delete_topic(self, topic_repo: TopicRepository, test_session: Session): + """Test deleting a topic.""" + created = await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id)) + + success = await topic_repo.delete_topic(created.id) + assert success is True + + fetched = await topic_repo.get_topic_by_id(created.id) + assert fetched is None + + async def test_delete_topic_not_found(self, topic_repo: TopicRepository): + """Test deleting a non-existent topic.""" + from uuid import uuid4 + + success = await topic_repo.delete_topic(uuid4()) + assert success is False + + async def test_bulk_delete_topics(self, topic_repo: TopicRepository, test_session: Session): + """Test deleting multiple topics at once.""" + topic1 = await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id)) + topic2 = await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id)) + topic3 = await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id)) + + count = await topic_repo.bulk_delete_topics([topic1.id, topic2.id]) + assert count == 2 + + # topic3 should still exist + assert await topic_repo.get_topic_by_id(topic3.id) is not None + assert await topic_repo.get_topic_by_id(topic1.id) is None + assert await topic_repo.get_topic_by_id(topic2.id) is None + + async def test_update_topic_timestamp(self, topic_repo: TopicRepository, test_session: Session): + """Test updating a topic's timestamp.""" + created = await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id)) + original_updated_at = created.updated_at + + # Small delay to ensure timestamp difference + import asyncio + + await asyncio.sleep(0.01) + + updated = await topic_repo.update_topic_timestamp(created.id) + assert updated is not None + # Verify the updated_at was changed (comparing without timezone awareness) + assert updated.updated_at is not None + # The timestamp should have been updated (either same or later) + assert updated.updated_at.replace(tzinfo=None) >= original_updated_at.replace(tzinfo=None) + + async def test_get_topic_with_details(self, topic_repo: TopicRepository, test_session: Session): + """Test get_topic_with_details (alias for get_topic_by_id in no-FK architecture).""" + created = await topic_repo.create_topic(TopicCreateFactory.build(session_id=test_session.id)) + + fetched = await topic_repo.get_topic_with_details(created.id) + assert fetched is not None + assert fetched.id == created.id diff --git a/service/tests/unit/test_core/test_thinking_events.py b/service/tests/unit/test_core/test_thinking_events.py new file mode 100644 index 00000000..a4b5bb51 --- /dev/null +++ b/service/tests/unit/test_core/test_thinking_events.py @@ -0,0 +1,146 @@ +""" +Unit tests for thinking event handling. + +Tests the ThinkingEventHandler class and thinking content extraction +from various provider formats (Anthropic, DeepSeek, etc.). +""" + +from app.core.chat.stream_handlers import ThinkingEventHandler +from app.schemas.chat_events import ChatEventType + + +class MockMessageChunk: + """Mock message chunk for testing thinking content extraction.""" + + def __init__( + self, + content: str | list = "", + additional_kwargs: dict | None = None, + response_metadata: dict | None = None, + ): + self.content = content + self.additional_kwargs = additional_kwargs or {} + self.response_metadata = response_metadata or {} + + +class TestThinkingEventHandler: + """Tests for ThinkingEventHandler event creation methods.""" + + def test_create_thinking_start_event(self) -> None: + """Verify thinking_start event has correct structure.""" + event = ThinkingEventHandler.create_thinking_start("stream_123") + + assert event["type"] == ChatEventType.THINKING_START + assert event["data"]["id"] == "stream_123" + + def test_create_thinking_chunk_event(self) -> None: + """Verify thinking_chunk event has correct structure.""" + event = ThinkingEventHandler.create_thinking_chunk("stream_123", "Let me think...") + + assert event["type"] == ChatEventType.THINKING_CHUNK + assert event["data"]["id"] == "stream_123" + assert event["data"]["content"] == "Let me think..." + + def test_create_thinking_end_event(self) -> None: + """Verify thinking_end event has correct structure.""" + event = ThinkingEventHandler.create_thinking_end("stream_123") + + assert event["type"] == ChatEventType.THINKING_END + assert event["data"]["id"] == "stream_123" + + +class TestExtractThinkingContent: + """Tests for ThinkingEventHandler.extract_thinking_content method.""" + + def test_extract_deepseek_reasoning_content(self) -> None: + """Extract thinking from DeepSeek R1 style additional_kwargs.reasoning_content.""" + chunk = MockMessageChunk( + content="", + additional_kwargs={"reasoning_content": "Step 1: Analyze the problem..."}, + ) + + result = ThinkingEventHandler.extract_thinking_content(chunk) + + assert result == "Step 1: Analyze the problem..." + + def test_extract_anthropic_thinking_block(self) -> None: + """Extract thinking from Anthropic Claude style content blocks.""" + chunk = MockMessageChunk( + content=[ + {"type": "thinking", "thinking": "Let me reason through this..."}, + {"type": "text", "text": "The answer is 42."}, + ] + ) + + result = ThinkingEventHandler.extract_thinking_content(chunk) + + assert result == "Let me reason through this..." + + def test_extract_thinking_from_response_metadata(self) -> None: + """Extract thinking from response_metadata.thinking.""" + chunk = MockMessageChunk( + content="", + response_metadata={"thinking": "I need to consider all factors..."}, + ) + + result = ThinkingEventHandler.extract_thinking_content(chunk) + + assert result == "I need to consider all factors..." + + def test_extract_reasoning_content_from_response_metadata(self) -> None: + """Extract from response_metadata.reasoning_content (alternative key).""" + chunk = MockMessageChunk( + content="", + response_metadata={"reasoning_content": "Analyzing the data..."}, + ) + + result = ThinkingEventHandler.extract_thinking_content(chunk) + + assert result == "Analyzing the data..." + + def test_no_thinking_content_returns_none(self) -> None: + """Return None when no thinking content is present.""" + chunk = MockMessageChunk( + content="Hello, world!", + additional_kwargs={}, + response_metadata={}, + ) + + result = ThinkingEventHandler.extract_thinking_content(chunk) + + assert result is None + + def test_empty_thinking_returns_none(self) -> None: + """Return None for empty reasoning_content string.""" + chunk = MockMessageChunk( + content="", + additional_kwargs={"reasoning_content": ""}, + ) + + result = ThinkingEventHandler.extract_thinking_content(chunk) + + assert result is None + + def test_deepseek_takes_priority_over_response_metadata(self) -> None: + """DeepSeek additional_kwargs should be checked first.""" + chunk = MockMessageChunk( + content="", + additional_kwargs={"reasoning_content": "From additional_kwargs"}, + response_metadata={"thinking": "From response_metadata"}, + ) + + result = ThinkingEventHandler.extract_thinking_content(chunk) + + assert result == "From additional_kwargs" + + def test_handles_missing_attributes_gracefully(self) -> None: + """Handle objects without expected attributes.""" + + class MinimalChunk: + pass + + chunk = MinimalChunk() + + result = ThinkingEventHandler.extract_thinking_content(chunk) + + assert result is None diff --git a/service/uv.lock b/service/uv.lock index 08be285f..9e0d0584 100644 --- a/service/uv.lock +++ b/service/uv.lock @@ -1294,6 +1294,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, ] +[[package]] +name = "json-repair" +version = "0.53.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b6/4e/f92634077c08208df2f7d5999af3288f29bd4348849ba99fdd34f59689f4/json_repair-0.53.1.tar.gz", hash = "sha256:9eacf70954399c49f29c4723e3b899088dab096795b70c061d0421af720b35fa", size = 35947, upload-time = "2025-11-18T11:23:14.437Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/52/afc5ba813be79f3d34c4cb88dea63a91c2e403a899168455d2c6e56cadfb/json_repair-0.53.1-py3-none-any.whl", hash = "sha256:6d80d725f747e55373d098f883bd8c48c55c24f80b034458f5cf7e82834c803f", size = 27601, upload-time = "2025-11-18T11:23:13.012Z" }, +] + [[package]] name = "jsonpatch" version = "1.33" @@ -1496,6 +1505,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/11/2b3b4973495fc5f0456ed5c8c88a6ded7ca34c8608c72faafa87088acf5a/langchain_openai-1.1.3-py3-none-any.whl", hash = "sha256:58945d9e87c1ab3a91549c3f3744c6c9571511cdc3cf875b8842aaec5b3e32a6", size = 84585, upload-time = "2025-12-12T22:28:07.066Z" }, ] +[[package]] +name = "langchain-qwq" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "json-repair" }, + { name = "langchain" }, + { name = "langchain-openai" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/24/20d9893d669509caade821322d5c803257806e1b3f284529e4297c384fd4/langchain_qwq-0.3.1.tar.gz", hash = "sha256:9a620cbe82b667784754798ce7ce19cb3ca506a3142d0862037b86a4f0c3257c", size = 16771, upload-time = "2025-11-29T09:36:28.398Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/03/13c78644f1aa23cefef58dc51313cc075d426b7ab1c7cc897f6ce3380eb6/langchain_qwq-0.3.1-py3-none-any.whl", hash = "sha256:afd73d64817e622ba704a223d4451bdb0941e61d4a71d5699432dd7247ad7d04", size = 16007, upload-time = "2025-11-29T09:36:27.16Z" }, +] + [[package]] name = "langgraph" version = "1.0.5" @@ -2812,6 +2835,7 @@ dependencies = [ { name = "langchain-google-genai" }, { name = "langchain-google-vertexai" }, { name = "langchain-openai" }, + { name = "langchain-qwq" }, { name = "langgraph" }, { name = "langgraph-checkpoint-postgres" }, { name = "litellm" }, @@ -2849,6 +2873,7 @@ dev = [ { name = "ruff" }, { name = "types-pyjwt" }, { name = "types-requests" }, + { name = "watchdog" }, ] [package.metadata] @@ -2868,6 +2893,7 @@ requires-dist = [ { name = "langchain-google-genai", specifier = ">=3.0.0" }, { name = "langchain-google-vertexai", specifier = ">=3.0.3" }, { name = "langchain-openai", specifier = ">=1.0.1" }, + { name = "langchain-qwq", specifier = ">=0.3.1" }, { name = "langgraph", specifier = ">=1.0.1" }, { name = "langgraph-checkpoint-postgres", specifier = ">=3.0.0" }, { name = "litellm", specifier = ">=1.80.9" }, @@ -2905,6 +2931,7 @@ dev = [ { name = "ruff", specifier = ">=0.14.3" }, { name = "types-pyjwt", specifier = ">=1.7.1" }, { name = "types-requests", specifier = ">=2.32.4.20250611" }, + { name = "watchdog", specifier = ">=3.0.0" }, ] [[package]] @@ -3280,6 +3307,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/79/0c/c05523fa3181fdf0c9c52a6ba91a23fbf3246cc095f26f6516f9c60e6771/virtualenv-20.35.4-py3-none-any.whl", hash = "sha256:c21c9cede36c9753eeade68ba7d523529f228a403463376cf821eaae2b650f1b", size = 6005095, upload-time = "2025-10-29T06:57:37.598Z" }, ] +[[package]] +name = "watchdog" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/ea/3930d07dafc9e286ed356a679aa02d777c06e9bfd1164fa7c19c288a5483/watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", size = 96471, upload-time = "2024-11-01T14:06:37.745Z" }, + { url = "https://files.pythonhosted.org/packages/12/87/48361531f70b1f87928b045df868a9fd4e253d9ae087fa4cf3f7113be363/watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", size = 88449, upload-time = "2024-11-01T14:06:39.748Z" }, + { url = "https://files.pythonhosted.org/packages/5b/7e/8f322f5e600812e6f9a31b75d242631068ca8f4ef0582dd3ae6e72daecc8/watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", size = 89054, upload-time = "2024-11-01T14:06:41.009Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, + { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, + { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, + { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, + { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, + { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, + { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, +] + [[package]] name = "watchfiles" version = "1.1.1" diff --git a/web/package.json b/web/package.json index f8fdd58b..8702f7a8 100644 --- a/web/package.json +++ b/web/package.json @@ -152,14 +152,12 @@ "react-dnd": "^16.0.1", "react-dnd-html5-backend": "^16.0.1", "react-i18next": "^16.5.0", - "react-image-crop": "^11.0.10", "react-lite-youtube-embed": "^3.3.3", "react-markdown": "^10.1.0", "react-player": "3.3.1", "react-textarea-autosize": "^8.5.9", "react-tweet": "^3.3.0", "react-use-measure": "^2.1.7", - "react-use-websocket": "^4.13.0", "rehype-katex": "^7.0.1", "remark-gfm": "^4.0.1", "remark-math": "^6.0.0", diff --git a/web/src/app/marketplace/AgentMarketplaceDetail.tsx b/web/src/app/marketplace/AgentMarketplaceDetail.tsx index 6521a2e2..08f83baa 100644 --- a/web/src/app/marketplace/AgentMarketplaceDetail.tsx +++ b/web/src/app/marketplace/AgentMarketplaceDetail.tsx @@ -6,6 +6,7 @@ import { useMarketplaceRequirements, useToggleLike, } from "@/hooks/useMarketplace"; +import Markdown from "@/lib/Markdown"; import { useIsMarketplaceOwner } from "@/utils/marketplace"; import { ArrowLeftIcon, @@ -20,8 +21,7 @@ import { } from "@heroicons/react/24/outline"; import { HeartIcon as HeartSolidIcon } from "@heroicons/react/24/solid"; import { useState } from "react"; -import ReactMarkdown from "react-markdown"; -import remarkGfm from "remark-gfm"; +import { useTranslation } from "react-i18next"; interface AgentMarketplaceDetailProps { marketplaceId: string; @@ -39,6 +39,7 @@ export default function AgentMarketplaceDetail({ onBack, onManage, }: AgentMarketplaceDetailProps) { + const { t } = useTranslation(); const [showForkModal, setShowForkModal] = useState(false); const [activeTab, setActiveTab] = useState< "readme" | "config" | "requirements" @@ -89,7 +90,7 @@ export default function AgentMarketplaceDetail({ {/* Loading Icon */}

- Loading agent details... + {t("marketplace.detail.loading")}

@@ -103,9 +104,7 @@ export default function AgentMarketplaceDetail({
-
- Failed to load agent details. Please try again. -
+
{t("marketplace.detail.error")}
@@ -113,7 +112,7 @@ export default function AgentMarketplaceDetail({ } return ( -
+
{/* Header with back button */}
@@ -122,14 +121,14 @@ export default function AgentMarketplaceDetail({ className="group mb-4 flex items-center gap-2 rounded-lg border border-neutral-200 bg-white px-4 py-2 text-sm font-medium text-neutral-700 shadow-sm transition-all hover:border-neutral-300 hover:shadow dark:border-neutral-800 dark:bg-neutral-900 dark:text-neutral-300 dark:hover:border-neutral-700" > - Back to Marketplace + {t("marketplace.detail.back")}
{/* Main Content */}
{/* Left Column - Agent Info */} -
+
{/* Agent Header */}
{/* Gradient background */} @@ -153,13 +152,14 @@ export default function AgentMarketplaceDetail({ {listing.name}

- Published by{" "} + {t("marketplace.detail.publishedBy")}{" "} {listing.user_id.split("@")[0] || listing.user_id}

- {listing.description || "No description provided"} + {listing.description || + t("marketplace.detail.noDescription")}

{listing.tags.map((tag, index) => ( @@ -185,7 +185,9 @@ export default function AgentMarketplaceDetail({
{listing.likes_count}
-
Likes
+
+ {t("marketplace.detail.stats.likes")} +
@@ -208,7 +210,9 @@ export default function AgentMarketplaceDetail({
{listing.forks_count}
-
Forks
+
+ {t("marketplace.detail.stats.forks")} +
@@ -219,7 +223,9 @@ export default function AgentMarketplaceDetail({
{listing.views_count}
-
Views
+
+ {t("marketplace.detail.stats.views")} +
@@ -229,39 +235,39 @@ export default function AgentMarketplaceDetail({ {/* Tabbed Content Section */}
{/* Tab Bar */} -
+
@@ -269,21 +275,22 @@ export default function AgentMarketplaceDetail({
{/* README Tab */} {activeTab === "readme" && ( -
+
{listing.readme ? ( - - {listing.readme} - + ) : (
-

No README provided for this agent.

+

{t("marketplace.detail.readme.empty")}

{isOwner && onManage && ( )}
@@ -309,7 +316,7 @@ export default function AgentMarketplaceDetail({ {listing.snapshot.configuration.model && (

- Model + {t("marketplace.detail.config.model")}

{listing.snapshot.configuration.model} @@ -321,7 +328,7 @@ export default function AgentMarketplaceDetail({ {listing.snapshot.configuration.prompt && (

- System Prompt + {t("marketplace.detail.config.systemPrompt")}

@@ -336,8 +343,10 @@ export default function AgentMarketplaceDetail({
                           listing.snapshot.mcp_server_configs.length > 0 && (
                             

- MCP Servers ( - {listing.snapshot.mcp_server_configs.length}) + {t("marketplace.detail.config.mcpServers", { + count: + listing.snapshot.mcp_server_configs.length, + })}

{listing.snapshot.mcp_server_configs.map( @@ -357,7 +366,7 @@ export default function AgentMarketplaceDetail({ ) : (
-

No configuration available.

+

{t("marketplace.detail.config.empty")}

)}
@@ -374,9 +383,14 @@ export default function AgentMarketplaceDetail({
- LLM Provider Required: You'll - need to configure an AI provider (OpenAI, - Anthropic, etc.) to use this agent. + + {t( + "marketplace.detail.requirements.provider.title", + )} + {" "} + {t( + "marketplace.detail.requirements.provider.description", + )}
@@ -386,7 +400,9 @@ export default function AgentMarketplaceDetail({ {requirements.mcp_servers.length > 0 && (

- MCP Servers ({requirements.mcp_servers.length}) + {t("marketplace.detail.requirements.mcpServers", { + count: requirements.mcp_servers.length, + })}

{requirements.mcp_servers.map((mcp, index) => ( @@ -400,7 +416,10 @@ export default function AgentMarketplaceDetail({ {mcp.name} - ✅ Auto-configured + ✅{" "} + {t( + "marketplace.detail.requirements.autoConfigured", + )}
{mcp.description && ( @@ -421,11 +440,18 @@ export default function AgentMarketplaceDetail({
- Knowledge Base: The original - agent uses{" "} - {requirements.knowledge_base.file_count} files. - These files will be copied to your workspace - when you fork this agent. + + {t( + "marketplace.detail.requirements.knowledgeBase.title", + )} + {" "} + {t( + "marketplace.detail.requirements.knowledgeBase.description", + { + count: + requirements.knowledge_base.file_count, + }, + )}
@@ -439,8 +465,7 @@ export default function AgentMarketplaceDetail({
- No special requirements! This agent is ready - to use after forking. + {t("marketplace.detail.requirements.none")}
@@ -449,7 +474,7 @@ export default function AgentMarketplaceDetail({ ) : (
-

Loading requirements...

+

{t("marketplace.detail.requirements.loading")}

)}
@@ -459,12 +484,12 @@ export default function AgentMarketplaceDetail({
{/* Right Column - Actions */} -
+
{/* Action Card */} -
+

- Actions + {t("marketplace.detail.actions.title")}

@@ -518,7 +545,7 @@ export default function AgentMarketplaceDetail({ >
- Manage Agent + {t("marketplace.detail.actions.manage")}
)} @@ -529,7 +556,7 @@ export default function AgentMarketplaceDetail({ {/* Author Info */}

- Published By + {t("marketplace.detail.meta.publishedBy")}

{listing.user_id} @@ -543,14 +570,18 @@ export default function AgentMarketplaceDetail({

{listing.first_published_at && (
- First Published:{" "} + + {t("marketplace.detail.meta.firstPublished")} + {" "} {new Date( listing.first_published_at, ).toLocaleDateString()}
)}
- Last Updated:{" "} + + {t("marketplace.detail.meta.lastUpdated")} + {" "} {new Date(listing.updated_at).toLocaleDateString()}
@@ -565,11 +596,10 @@ export default function AgentMarketplaceDetail({

- About Forking + {t("marketplace.detail.aboutForking.title")}

- Forking creates your own independent copy. Changes won't - affect the original agent. + {t("marketplace.detail.aboutForking.description")}

diff --git a/web/src/components/features/CheckInCalendar.tsx b/web/src/components/features/CheckInCalendar.tsx index 7b6de1d8..7cd0b104 100644 --- a/web/src/components/features/CheckInCalendar.tsx +++ b/web/src/components/features/CheckInCalendar.tsx @@ -552,7 +552,7 @@ export function CheckInCalendar({ onCheckInSuccess }: CheckInCalendarProps) {
{checkInRecord && (
-
+
@@ -577,7 +577,7 @@ export function CheckInCalendar({ onCheckInSuccess }: CheckInCalendarProps) { {consumption && (
-
+
使用统计 diff --git a/web/src/components/layouts/components/ChatBubble.tsx b/web/src/components/layouts/components/ChatBubble.tsx index 9e6c20c3..c102215a 100644 --- a/web/src/components/layouts/components/ChatBubble.tsx +++ b/web/src/components/layouts/components/ChatBubble.tsx @@ -9,6 +9,7 @@ import { useEffect, useMemo, useRef } from "react"; import LoadingMessage from "./LoadingMessage"; import MessageAttachments from "./MessageAttachments"; import { SearchCitations } from "./SearchCitations"; +import ThinkingBubble from "./ThinkingBubble"; import ToolCallCard from "./ToolCallCard"; interface ChatBubbleProps { @@ -30,6 +31,8 @@ function ChatBubble({ message }: ChatBubbleProps) { toolCalls, attachments, citations, + isThinking, + thinkingContent, } = message; // 流式消息打字效果 @@ -204,6 +207,14 @@ function ChatBubble({ message }: ChatBubbleProps) { : "text-sm text-neutral-700 dark:text-neutral-300" }`} > + {/* Thinking content - shown before main response for assistant messages */} + {!isUserMessage && thinkingContent && ( + + )} + {isLoading ? ( ) : ( diff --git a/web/src/components/layouts/components/MessageAttachments.tsx b/web/src/components/layouts/components/MessageAttachments.tsx index 3b006c3a..18878bc5 100644 --- a/web/src/components/layouts/components/MessageAttachments.tsx +++ b/web/src/components/layouts/components/MessageAttachments.tsx @@ -494,7 +494,7 @@ export default function MessageAttachments({ onClick={(e) => e.stopPropagation()} > {/* Header */} -
+
setIsOpen(!isOpen)} > - + {currentSelection.model || "选择模型"}
选择提供商 @@ -344,9 +357,10 @@ export function ModelSelector({ className={`h-2 w-2 shrink-0 rounded-full ${getProviderDotColor(provider.provider_type)}`} /> - {provider.name === "system" - ? "系统默认" - : provider.name} + {/*{provider.is_system + ? getProviderDisplayName(provider.provider_type) + : provider.name}*/} + {getProviderDisplayName(provider.provider_type)}
@@ -380,7 +394,11 @@ export function ModelSelector({ className={`h-2 w-2 rounded-full ${getProviderDotColor(hoveredProvider.provider_type)}`} /> - {hoveredProvider.name} + {hoveredProvider.is_system + ? getProviderDisplayName( + hoveredProvider.provider_type, + ) + : hoveredProvider.name}
)} @@ -447,8 +465,8 @@ export function ModelSelector({ className={`h-2 w-2 shrink-0 rounded-full ${getProviderDotColor(provider.provider_type)}`} /> - {provider.name === "system" - ? "系统默认" + {provider.is_system + ? getProviderDisplayName(provider.provider_type) : provider.name}
diff --git a/web/src/components/layouts/components/ThinkingBubble.tsx b/web/src/components/layouts/components/ThinkingBubble.tsx new file mode 100644 index 00000000..8bd16344 --- /dev/null +++ b/web/src/components/layouts/components/ThinkingBubble.tsx @@ -0,0 +1,189 @@ +import { AnimatePresence, motion } from "framer-motion"; +import { Brain, ChevronDown, ChevronRight, Sparkles } from "lucide-react"; +import { useEffect, useMemo, useRef, useState } from "react"; +import { useTranslation } from "react-i18next"; +import Markdown from "@/lib/Markdown"; + +interface ThinkingBubbleProps { + content: string; + isThinking: boolean; +} + +/** + * ThinkingBubble displays AI thinking/reasoning content. + * + * Two states: + * 1. Active thinking (isThinking=true): Animated scrolling view showing last 5 lines + * 2. Collapsed (isThinking=false): Expandable accordion to view full thinking content + */ +export default function ThinkingBubble({ + content, + isThinking, +}: ThinkingBubbleProps) { + const { t } = useTranslation(); + const [isExpanded, setIsExpanded] = useState(false); + const scrollRef = useRef(null); + + // Split content into lines for display + const lines = useMemo(() => { + return content.split("\n").filter((line) => line.trim()); + }, [content]); + + // Get last 5 lines for active thinking display + const visibleLines = useMemo(() => { + return lines.slice(-5); + }, [lines]); + + // Auto-scroll to bottom during active thinking + useEffect(() => { + if (isThinking && scrollRef.current) { + scrollRef.current.scrollTop = scrollRef.current.scrollHeight; + } + }, [content, isThinking]); + + // Don't render if no content + if (!content) { + return null; + } + + return ( +
+ + {isThinking ? ( + // Active thinking state - animated scrolling view + + {/* Subtle shimmer effect */} + + + {/* Header with animated icon */} +
+ + + + + {t("app.chat.thinking.label")} + + + + +
+ + {/* Scrolling content - max 5 lines visible */} +
+ {visibleLines.map((line, index) => ( + + {line} + + ))} + {/* Blinking cursor */} + +
+ + {/* Fade overlay at top when more content */} + {lines.length > 5 && ( +
+ )} + + ) : ( + // Collapsed state - expandable accordion + + {/* Collapsible header */} + + + {/* Expanded content */} + + {isExpanded && ( + +
+
+ +
+
+
+ )} +
+
+ )} + +
+ ); +} diff --git a/web/src/components/modals/settings/ProviderList.tsx b/web/src/components/modals/settings/ProviderList.tsx index 3309a15e..a4842dbd 100644 --- a/web/src/components/modals/settings/ProviderList.tsx +++ b/web/src/components/modals/settings/ProviderList.tsx @@ -4,6 +4,7 @@ import { GoogleIcon, OpenAIIcon, } from "@/assets/icons"; +import { getProviderDisplayName } from "@/utils/providerDisplayNames"; import { Tabs, TabsHighlight, @@ -53,6 +54,14 @@ export const ProviderList = () => { return ; case "anthropic": return ; + case "gpugeek": + return ( +
X
+ ); + case "qwen": + return ( +
Q
+ ); default: return ; } @@ -120,7 +129,12 @@ export const ProviderList = () => { {getProviderIcon(provider.provider_type)}
-
{provider.name}
+
+ {/*{provider.is_system + ? getProviderDisplayName(provider.provider_type) + : provider.name}*/} + {getProviderDisplayName(provider.provider_type)} +
@@ -181,7 +195,7 @@ export const ProviderList = () => {
- {template.display_name} + {getProviderDisplayName(template.type)}
diff --git a/web/src/components/ui/3d-pin.tsx b/web/src/components/ui/3d-pin.tsx index 9de190f1..fe3736bc 100644 --- a/web/src/components/ui/3d-pin.tsx +++ b/web/src/components/ui/3d-pin.tsx @@ -164,8 +164,8 @@ export const PinPerspective = ({
<> - - + + diff --git a/web/src/i18n/locales/en/translation.json b/web/src/i18n/locales/en/translation.json index 3396b8ee..276abf1b 100644 --- a/web/src/i18n/locales/en/translation.json +++ b/web/src/i18n/locales/en/translation.json @@ -8,7 +8,12 @@ "chat": { "assistantsTitle": "Assistants", "chooseAgentHint": "Choose an agent to start", - "chatLabel": "Chat" + "chatLabel": "Chat", + "thinking": { + "label": "Thinking...", + "showThinking": "Show thinking", + "hideThinking": "Hide thinking" + } } }, "common": { @@ -284,6 +289,63 @@ "by": "by {{author}}", "noDescription": "No description provided", "tagsMore": "+{{count}} more" + }, + "detail": { + "loading": "Loading agent details...", + "error": "Failed to load agent details. Please try again.", + "back": "Back to Marketplace", + "publishedBy": "Published by", + "noDescription": "No description provided", + "stats": { + "likes": "Likes", + "forks": "Forks", + "views": "Views" + }, + "tabs": { + "readme": "README", + "config": "Configuration", + "requirements": "Requirements" + }, + "readme": { + "empty": "No README provided for this agent.", + "manage": "Manage to add a README" + }, + "config": { + "model": "Model", + "systemPrompt": "System Prompt", + "mcpServers": "MCP Servers ({{count}})", + "empty": "No configuration available." + }, + "requirements": { + "provider": { + "title": "LLM Provider Required:", + "description": "You'll need to configure an AI provider (OpenAI, Anthropic, etc.) to use this agent." + }, + "mcpServers": "MCP Servers ({{count}})", + "autoConfigured": "Auto-configured", + "knowledgeBase": { + "title": "Knowledge Base:", + "description": "The original agent uses {{count}} files. These files will be copied to your workspace when you fork this agent." + }, + "none": "No special requirements! This agent is ready to use after forking.", + "loading": "Loading requirements..." + }, + "actions": { + "title": "Actions", + "fork": "Fork This Agent", + "liked": "Liked", + "like": "Like This Agent", + "manage": "Manage Agent" + }, + "meta": { + "publishedBy": "Published By", + "firstPublished": "First Published:", + "lastUpdated": "Last Updated:" + }, + "aboutForking": { + "title": "About Forking", + "description": "Forking creates your own independent copy. Changes won't affect the original agent." + } } }, "knowledge": { diff --git a/web/src/i18n/locales/zh/translation.json b/web/src/i18n/locales/zh/translation.json index 1cf6c44c..3c9317af 100644 --- a/web/src/i18n/locales/zh/translation.json +++ b/web/src/i18n/locales/zh/translation.json @@ -8,7 +8,12 @@ "chat": { "assistantsTitle": "助手", "chooseAgentHint": "选择一个助手开始", - "chatLabel": "聊天" + "chatLabel": "聊天", + "thinking": { + "label": "思考中...", + "showThinking": "显示思考过程", + "hideThinking": "隐藏思考过程" + } } }, "common": { @@ -263,205 +268,63 @@ "by": "来自 {{author}}", "noDescription": "暂无描述", "tagsMore": "+{{count}} 更多" - } - }, - "knowledge": { - "titles": { - "recents": "最近", - "allFiles": "全部文件", - "myKnowledge": "我的知识库", - "knowledgeBase": "知识库", - "trash": "回收站" - }, - "a11y": { - "navTitle": "导航菜单", - "navDescription": "在你的知识库中进行导航" - }, - "prompts": { - "folderName": "请输入文件夹名称:", - "knowledgeSetName": "请输入知识集名称:", - "knowledgeSetDescription": "请输入描述(可选):" - }, - "errors": { - "createFolderFailed": "创建文件夹失败", - "createKnowledgeSetFailed": "创建知识集失败" - }, - "upload": { - "errors": { - "fileTooLarge": "文件“{{name}}”({{fileSizeMB}}MB)超过最大大小限制 {{maxSizeMB}}MB", - "notEnoughStorage": "存储空间不足。文件大小:{{fileSizeMB}}MB,可用:{{availableMB}}MB。请先删除一些文件。", - "uploadFailed": "上传失败" - } }, - "toolbar": { - "home": "主页", - "searchFilesPlaceholder": "搜索文件...", - "searchPlaceholder": "搜索", - "listView": "列表视图", - "gridView": "网格视图", - "newFolder": "新建文件夹", - "emptyTrash": "清空回收站", - "empty": "清空", - "uploadFile": "上传文件", - "upload": "上传", - "refresh": "刷新" - }, - "sidebar": { - "groups": { - "favorites": "常用", - "media": "媒体", - "locations": "位置" - }, - "items": { - "images": "图片", - "documents": "文档" - }, - "newKnowledgeSet": "新建知识集", - "noKnowledgeSets": "暂无知识集" - }, - "status": { - "items": "{{count}} 项", - "usedOfTotal": "已用 {{used}} / {{total}}", - "available": "可用 {{available}}" - }, - "contextMenu": { - "download": "下载", - "rename": "重命名", - "moveTo": "移动到...", - "addToKnowledgeSet": "添加到知识集", - "removeFromKnowledgeSet": "从知识集中移除", - "delete": "删除" - }, - "moveModal": { - "title": "移动 \"{{name}}\"", - "home": "主页", - "noSubfolders": "没有子文件夹", - "moveHere": "移动到这里" - }, - "fileList": { - "itemTypes": { - "file": "文件", - "folder": "文件夹" - }, - "columns": { - "name": "名称", - "size": "大小", - "dateModified": "修改时间" - }, - "empty": { - "trash": "回收站为空", - "noItems": "暂无内容" - }, - "actions": { - "preview": "预览", - "download": "下载", - "restore": "还原", - "delete": "删除", - "deleteForever": "永久删除", - "moveToTrash": "移到回收站", - "deleteImmediately": "立即删除", - "deleteFailed": "删除失败", - "restoreFailed": "还原失败", - "downloadFailed": "下载失败" - }, - "deleteItem": { - "title": "删除{{itemType}}", - "message": "确定要删除这个{{itemType}}吗?" - }, - "moveToTrash": { - "message": "确定要将此文件移到回收站吗?" - }, - "deleteForever": { - "message": "确定要永久删除此文件吗?此操作无法撤销。" + "detail": { + "loading": "正在加载助手详情...", + "error": "加载助手详情失败,请重试。", + "back": "返回市场", + "publishedBy": "发布者", + "noDescription": "暂无描述", + "stats": { + "likes": "点赞", + "forks": "复刻", + "views": "浏览" }, - "emptyTrash": { - "title": "清空回收站", - "message": "确定要永久删除 {{count}} 个项目吗?此操作无法撤销。", - "confirm": "清空回收站", - "failed": "清空回收站失败,可能仍有部分项目未删除。" + "tabs": { + "readme": "说明文档", + "config": "配置信息", + "requirements": "运行要求" }, - "rename": { - "titleFile": "重命名文件", - "titleFolder": "重命名文件夹", - "placeholder": "输入新名称", - "confirm": "重命名", - "failed": "重命名失败" + "readme": { + "empty": "该助手暂无说明文档。", + "manage": "去添加说明文档" }, - "move": { - "failed": "移动失败" + "config": { + "model": "模型", + "systemPrompt": "系统提示词", + "mcpServers": "MCP 服务 ({{count}})", + "empty": "暂无配置信息。" }, - "knowledgeSet": { - "add": { - "title": "添加到知识集", - "subtitle": "选择一个知识集来添加 \"{{name}}\"" + "requirements": { + "provider": { + "title": "需要 LLM 服务商:", + "description": "你需要配置 AI 服务商(OpenAI, Anthropic 等)才能使用此助手。" }, - "none": "暂无可用知识集,请先创建一个。", - "fileCount": "{{count}} 个文件", - "added": "已成功添加到知识集", - "alreadyInSet": "该文件已在此知识集中。", - "addFailed": "添加到知识集失败", - "remove": { - "title": "从知识集中移除", - "message": "确定要将 \"{{name}}\" 从此知识集中移除吗?", - "confirm": "移除", - "failed": "从知识集中移除失败" - } - }, - "notifications": { - "successTitle": "成功", - "noticeTitle": "提示", - "errorTitle": "错误" - } - }, - "createKnowledgeSetModal": { - "title": "创建知识集", - "fields": { - "name": { - "label": "名称", - "placeholder": "请输入名称" + "mcpServers": "MCP 服务 ({{count}})", + "autoConfigured": "自动配置", + "knowledgeBase": { + "title": "知识库:", + "description": "原助手使用了 {{count}} 个文件。复刻此助手时,这些文件将被复制到你的工作区。" }, - "description": { - "label": "描述(可选)", - "placeholder": "添加简短描述" - } + "none": "无特殊要求!复刻后即可直接使用。", + "loading": "正在加载运行要求..." }, "actions": { - "create": "创建", - "creating": "创建中..." + "title": "操作", + "fork": "复刻此助手", + "liked": "已点赞", + "like": "点赞", + "manage": "管理助手" }, - "validation": { - "nameRequired": "名称不能为空" + "meta": { + "publishedBy": "发布者", + "firstPublished": "首次发布:", + "lastUpdated": "最后更新:" }, - "errors": { - "createFailed": "创建知识集失败" + "aboutForking": { + "title": "关于复刻", + "description": "复刻会创建一个独立的副本。你的修改不会影响原始助手。" } } - }, - "mcp": { - "title": "我的服务", - "subtitle": "浏览市场并管理你的服务", - "refresh": "刷新", - "addCustom": "添加自定义", - "market": { - "title": "MCP 市场", - "quickAdd": "快速添加", - "close": "关闭" - }, - "added": { - "title": "已添加服务", - "online": "在线", - "offline": "离线", - "tools": "工具", - "noDescription": "暂无描述", - "edit": "编辑服务", - "remove": "移除服务", - "test": "测试工具", - "empty": { - "title": "暂无已添加服务", - "description": "在左侧浏览市场添加服务,或创建自定义连接。", - "button": "添加自定义服务" - }, - "loading": "加载 MCP 服务中..." - } } } diff --git a/web/src/service/xyzenService.ts b/web/src/service/xyzenService.ts index e6df76c2..80dff70e 100644 --- a/web/src/service/xyzenService.ts +++ b/web/src/service/xyzenService.ts @@ -19,7 +19,10 @@ interface MessageEvent { | "tool_call_response" | "insufficient_balance" | "error" - | "topic_updated"; + | "topic_updated" + | "thinking_start" + | "thinking_chunk" + | "thinking_end"; data: | Message | { diff --git a/web/src/store/slices/chatSlice.ts b/web/src/store/slices/chatSlice.ts index 2cc388a5..57cd9f64 100644 --- a/web/src/store/slices/chatSlice.ts +++ b/web/src/store/slices/chatSlice.ts @@ -35,14 +35,22 @@ function groupToolMessagesWithAssistant(messages: Message[]): Message[] { arguments: { ...(toolCall.arguments || {}) }, }); - const cloneMessage = (message: Message): Message => ({ - ...message, - toolCalls: message.toolCalls - ? message.toolCalls.map((toolCall) => cloneToolCall(toolCall)) - : undefined, - attachments: message.attachments ? [...message.attachments] : undefined, - citations: message.citations ? [...message.citations] : undefined, - }); + const cloneMessage = (message: Message): Message => { + const backendThinkingContent = ( + message as Message & { thinking_content?: string } + ).thinking_content; + + return { + ...message, + toolCalls: message.toolCalls + ? message.toolCalls.map((toolCall) => cloneToolCall(toolCall)) + : undefined, + attachments: message.attachments ? [...message.attachments] : undefined, + citations: message.citations ? [...message.citations] : undefined, + // Map thinking_content from backend to thinkingContent for frontend + thinkingContent: backendThinkingContent ?? message.thinkingContent, + }; + }; for (const msg of messages) { if (msg.role !== "tool") { @@ -581,12 +589,14 @@ export const createChatSlice: StateCreator< } case "streaming_start": { - // Convert loading message to streaming message + // Convert loading or thinking message to streaming message channel.responding = true; + const eventData = event.data as { id: string }; + + // First check for loading message const loadingIndex = channel.messages.findIndex( (m) => m.isLoading, ); - const eventData = event.data as { id: string }; if (loadingIndex !== -1) { // eslint-disable-next-line @typescript-eslint/no-unused-vars const { isLoading: _, ...messageWithoutLoading } = @@ -597,18 +607,33 @@ export const createChatSlice: StateCreator< isStreaming: true, content: "", }; - } else { - // No loading present (backend may skip sending "loading"). Create a streaming message now. - channel.messages.push({ - id: eventData.id, - clientId: generateClientId(), - role: "assistant" as const, - content: "", - isNewMessage: true, - created_at: new Date().toISOString(), + break; + } + + // Check for existing message with same ID (e.g., after thinking_end set isThinking=false) + const existingIndex = channel.messages.findIndex( + (m) => m.id === eventData.id, + ); + if (existingIndex !== -1) { + // Convert existing message to streaming - keep thinking content if present + channel.messages[existingIndex] = { + ...channel.messages[existingIndex], + isThinking: false, isStreaming: true, - }); + }; + break; } + + // No loading or existing message found, create a streaming message now + channel.messages.push({ + id: eventData.id, + clientId: generateClientId(), + role: "assistant" as const, + content: "", + isNewMessage: true, + created_at: new Date().toISOString(), + isStreaming: true, + }); break; } @@ -981,6 +1006,68 @@ export const createChatSlice: StateCreator< break; } + case "thinking_start": { + // Start thinking mode - find or create the assistant message + channel.responding = true; + const eventData = event.data as { id: string }; + const loadingIndex = channel.messages.findIndex( + (m) => m.isLoading, + ); + if (loadingIndex !== -1) { + // Convert loading message to thinking message + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { isLoading: _, ...messageWithoutLoading } = + channel.messages[loadingIndex]; + channel.messages[loadingIndex] = { + ...messageWithoutLoading, + id: eventData.id, + isThinking: true, + thinkingContent: "", + content: "", + }; + } else { + // No loading present, create a thinking message + channel.messages.push({ + id: eventData.id, + clientId: `thinking-${Date.now()}`, + role: "assistant" as const, + content: "", + isNewMessage: true, + created_at: new Date().toISOString(), + isThinking: true, + thinkingContent: "", + }); + } + break; + } + + case "thinking_chunk": { + // Append to thinking content + const eventData = event.data as { id: string; content: string }; + const thinkingIndex = channel.messages.findIndex( + (m) => m.id === eventData.id, + ); + if (thinkingIndex !== -1) { + const currentThinking = + channel.messages[thinkingIndex].thinkingContent ?? ""; + channel.messages[thinkingIndex].thinkingContent = + currentThinking + eventData.content; + } + break; + } + + case "thinking_end": { + // End thinking mode + const eventData = event.data as { id: string }; + const endThinkingIndex = channel.messages.findIndex( + (m) => m.id === eventData.id, + ); + if (endThinkingIndex !== -1) { + channel.messages[endThinkingIndex].isThinking = false; + } + break; + } + case "topic_updated": { const eventData = event.data as { id: string; diff --git a/web/src/store/types.ts b/web/src/store/types.ts index 8effeb8c..ae9f5be8 100644 --- a/web/src/store/types.ts +++ b/web/src/store/types.ts @@ -67,6 +67,9 @@ export interface Message { attachments?: MessageAttachment[]; // Search citations from built-in search citations?: SearchCitation[]; + // Thinking/reasoning content from models like Claude, DeepSeek R1, OpenAI o1 + isThinking?: boolean; + thinkingContent?: string; } export interface KnowledgeContext { diff --git a/web/src/utils/providerDisplayNames.ts b/web/src/utils/providerDisplayNames.ts new file mode 100644 index 00000000..34f3708f --- /dev/null +++ b/web/src/utils/providerDisplayNames.ts @@ -0,0 +1,33 @@ +/** + * Provider display name mapping for frontend + * Backend uses internal names (e.g., "gpugeek"), frontend displays user-friendly names + */ +export function getProviderDisplayName(providerType: string): string { + const displayNameMap: Record = { + gpugeek: "Xin", + azure_openai: "Azure OpenAI", + google_vertex: "Google Vertex", + openai: "OpenAI", + google: "Google", + qwen: "Qwen", + }; + + return displayNameMap[providerType] || providerType; +} + +/** + * Get provider badge color based on provider type + * Different colors help distinguish between providers visually + */ +export function getProviderBadgeColor(providerType: string): string { + const colorMap: Record = { + openai: "bg-emerald-500", + azure_openai: "bg-blue-500", + google: "bg-red-500", + google_vertex: "bg-purple-500", + gpugeek: "bg-amber-500", // Amber/gold for Xin + qwen: "bg-cyan-500", // Cyan for Qwen (different from GPUGeek) + }; + + return colorMap[providerType] || "bg-neutral-500"; +} diff --git a/web/yarn.lock b/web/yarn.lock index ed19142e..bb444f4f 100644 --- a/web/yarn.lock +++ b/web/yarn.lock @@ -5343,14 +5343,12 @@ __metadata: react-dnd-html5-backend: "npm:^16.0.1" react-dom: "npm:^19.1.0" react-i18next: "npm:^16.5.0" - react-image-crop: "npm:^11.0.10" react-lite-youtube-embed: "npm:^3.3.3" react-markdown: "npm:^10.1.0" react-player: "npm:3.3.1" react-textarea-autosize: "npm:^8.5.9" react-tweet: "npm:^3.3.0" react-use-measure: "npm:^2.1.7" - react-use-websocket: "npm:^4.13.0" rehype-katex: "npm:^7.0.1" remark-gfm: "npm:^4.0.1" remark-math: "npm:^6.0.0" @@ -13385,15 +13383,6 @@ __metadata: languageName: node linkType: hard -"react-image-crop@npm:^11.0.10": - version: 11.0.10 - resolution: "react-image-crop@npm:11.0.10" - peerDependencies: - react: ">=16.13.1" - checksum: 10c0/2c2c7066c3a51838fc074a4a125ee2063170e01c26e3b1c8fc8b66155cf7ef576b5747089b4f365ce9dd2728794c39f57643b37eb78116e9db3f4b6299d3f682 - languageName: node - linkType: hard - "react-is@npm:^16.13.1, react-is@npm:^16.7.0": version: 16.13.1 resolution: "react-is@npm:16.13.1" @@ -13566,13 +13555,6 @@ __metadata: languageName: node linkType: hard -"react-use-websocket@npm:^4.13.0": - version: 4.13.0 - resolution: "react-use-websocket@npm:4.13.0" - checksum: 10c0/92f0941c67984f3b43979a2e5aa9a358d1e2b01591575b09fdcdd638d0c4275f6c5e180d1173632bc0fd564458afc3583643635eaeb4e0b8ce059555576661f3 - languageName: node - linkType: hard - "react@npm:^19.1.0": version: 19.2.3 resolution: "react@npm:19.2.3"