diff --git a/CLAUDE.md b/CLAUDE.md index e0772348..b273e8dd 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -168,12 +168,45 @@ Place it before imports with one blank line after. ### Python Conventions -- Type hints preferred (Pydantic models heavily used) +- Type hints required on all function parameters and return types - Async/await patterns for I/O operations - Use explicit `None` checks: `if x is not None:` not `if x:` - Local imports should be moved to top of file - Return defensive copies of mutable data to protect singletons +### Type Hints - NEVER Use `Any` + +**CRITICAL: Never use `typing.Any` in this codebase.** Using `Any` defeats the purpose of type checking and can hide bugs. Instead: + +1. **Use actual types from external SDKs** - When integrating with external libraries (OpenAI, LangChain, etc.), import and use their actual types: + ```python + from agents.memory import Session + from agents.items import TResponseInputItem + + async def send_chat_history(self, session: Session) -> OperationResult: + ... + ``` + +2. **Use `Union` for known possible types**: + ```python + from typing import Union + MessageType = Union[UserMessage, AssistantMessage, SystemMessage, Dict[str, object]] + ``` + +3. **Use `object` for truly unknown types** that you only pass through: + ```python + def log_item(item: object) -> None: ... + ``` + +4. **Use `Protocol` only as a last resort** - If external types cannot be found or imported, define a Protocol. However, **confirm with the developer first** before proceeding with this approach, as it may indicate a missing dependency or incorrect understanding of the external API. + +**Why this matters:** +- `Any` disables all type checking for that variable +- Bugs that type checkers would catch go unnoticed +- Code readability suffers - developers don't know what types to expect +- Using actual SDK types provides better IDE support and ensures compatibility +- This applies to both production code AND test files + ## CI/CD The `.github/workflows/ci.yml` pipeline: diff --git a/docs/design.md b/docs/design.md index 3ebd5c9f..9f107784 100644 --- a/docs/design.md +++ b/docs/design.md @@ -227,9 +227,51 @@ Framework-specific adapters for MCP tool integration: |---------|---------|------------| | `extensions-agentframework` | Adapt MCP tools to Microsoft Agents SDK | [design.md](../libraries/microsoft-agents-a365-tooling-extensions-agentframework/docs/design.md) | | `extensions-azureaifoundry` | Azure AI Foundry tool integration | [design.md](../libraries/microsoft-agents-a365-tooling-extensions-azureaifoundry/docs/design.md) | -| `extensions-openai` | OpenAI function calling integration | [design.md](../libraries/microsoft-agents-a365-tooling-extensions-openai/docs/design.md) | +| `extensions-openai` | OpenAI function calling integration and chat history | [design.md](../libraries/microsoft-agents-a365-tooling-extensions-openai/docs/design.md) | | `extensions-semantickernel` | Semantic Kernel plugin integration | [design.md](../libraries/microsoft-agents-a365-tooling-extensions-semantickernel/docs/design.md) | +#### OpenAI Extension: Chat History API + +The OpenAI tooling extension provides methods to send chat history to the MCP platform for real-time threat protection: + +**Key Classes:** + +| Class | Purpose | +|-------|---------| +| `McpToolRegistrationService` | MCP tool registration and chat history management | + +**Methods:** + +| Method | Purpose | +|--------|---------| +| `send_chat_history(turn_context, session, limit, options)` | Extract messages from OpenAI Session and send to MCP platform | +| `send_chat_history_messages(turn_context, messages, options)` | Send a list of OpenAI TResponseInputItem messages to MCP platform | + +**Usage Example:** + +```python +from agents import Agent, Runner +from microsoft_agents_a365.tooling.extensions.openai import McpToolRegistrationService + +service = McpToolRegistrationService() +agent = Agent(name="my-agent", model="gpt-4") + +# In your agent handler: +async with Runner.run(agent, messages) as result: + session = result.session + + # Option 1: Send from Session object + op_result = await service.send_chat_history(turn_context, session) + + # Option 2: Send from message list + op_result = await service.send_chat_history_messages(turn_context, messages) + + if op_result.succeeded: + print("Chat history sent successfully") +``` + +The methods convert OpenAI message types to `ChatHistoryMessage` format and delegate to the core `McpToolServerConfigurationService.send_chat_history()` method. + ### 6. Notifications (`microsoft-agents-a365-notifications`) > **Detailed documentation**: [libraries/microsoft-agents-a365-notifications/docs/design.md](../libraries/microsoft-agents-a365-notifications/docs/design.md) diff --git a/libraries/microsoft-agents-a365-tooling-extensions-openai/microsoft_agents_a365/tooling/extensions/openai/__init__.py b/libraries/microsoft-agents-a365-tooling-extensions-openai/microsoft_agents_a365/tooling/extensions/openai/__init__.py index c2d53bd0..6e77b7df 100644 --- a/libraries/microsoft-agents-a365-tooling-extensions-openai/microsoft_agents_a365/tooling/extensions/openai/__init__.py +++ b/libraries/microsoft-agents-a365-tooling-extensions-openai/microsoft_agents_a365/tooling/extensions/openai/__init__.py @@ -2,10 +2,21 @@ # Licensed under the MIT License. """ -OpenAI extensions for Microsoft Agent 365 Tooling SDK +OpenAI extensions for Microsoft Agent 365 Tooling SDK. Tooling and utilities specifically for OpenAI framework integration. -Provides OpenAI-specific helper utilities. +Provides OpenAI-specific helper utilities including: +- McpToolRegistrationService: Service for MCP tool registration and chat history management + +For type hints, use the types directly from the OpenAI Agents SDK: +- agents.memory.Session: Protocol for session objects +- agents.items.TResponseInputItem: Type for input message items """ +from .mcp_tool_registration_service import McpToolRegistrationService + __version__ = "1.0.0" + +__all__ = [ + "McpToolRegistrationService", +] diff --git a/libraries/microsoft-agents-a365-tooling-extensions-openai/microsoft_agents_a365/tooling/extensions/openai/mcp_tool_registration_service.py b/libraries/microsoft-agents-a365-tooling-extensions-openai/microsoft_agents_a365/tooling/extensions/openai/mcp_tool_registration_service.py index 3e7edc29..7708b4d2 100644 --- a/libraries/microsoft-agents-a365-tooling-extensions-openai/microsoft_agents_a365/tooling/extensions/openai/mcp_tool_registration_service.py +++ b/libraries/microsoft-agents-a365-tooling-extensions-openai/microsoft_agents_a365/tooling/extensions/openai/mcp_tool_registration_service.py @@ -1,31 +1,40 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from typing import Dict, Optional -from dataclasses import dataclass -import logging +""" +MCP Tool Registration Service for OpenAI. -from agents import Agent +This module provides OpenAI-specific extensions for MCP tool registration, +including methods to send chat history from OpenAI Sessions and message lists. +""" -from microsoft_agents.hosting.core import Authorization, TurnContext +import logging +import uuid +from dataclasses import dataclass +from datetime import datetime, timezone +from typing import Dict, List, Optional +from agents import Agent +from agents.items import TResponseInputItem from agents.mcp import ( MCPServerStreamableHttp, MCPServerStreamableHttpParams, ) +from agents.memory import Session +from microsoft_agents.hosting.core import Authorization, TurnContext + +from microsoft_agents_a365.runtime import OperationError, OperationResult from microsoft_agents_a365.runtime.utility import Utility +from microsoft_agents_a365.tooling.models import ChatHistoryMessage, ToolOptions from microsoft_agents_a365.tooling.services.mcp_tool_server_configuration_service import ( McpToolServerConfigurationService, ) - -from microsoft_agents_a365.tooling.models import ToolOptions from microsoft_agents_a365.tooling.utils.constants import Constants from microsoft_agents_a365.tooling.utils.utility import ( get_mcp_platform_authentication_scope, ) -# TODO: This is not needed. Remove this. @dataclass class MCPServerInfo: """Information about an MCP server""" @@ -60,7 +69,7 @@ async def add_tool_servers_to_agent( auth_handler_name: str, context: TurnContext, auth_token: Optional[str] = None, - ): + ) -> Agent: """ Add new MCP servers to the agent by creating a new Agent instance. @@ -79,7 +88,7 @@ async def add_tool_servers_to_agent( New Agent instance with all MCP servers, or original agent if no new servers """ - if not auth_token: + if auth_token is None or auth_token.strip() == "": scopes = get_mcp_platform_authentication_scope() authToken = await auth.exchange_token(context, scopes, auth_handler_name) auth_token = authToken.token @@ -185,8 +194,6 @@ async def add_tool_servers_to_agent( all_mcp_servers = existing_mcp_servers + new_mcp_servers # Recreate the agent with all MCP servers - from agents import Agent - new_agent = Agent( name=agent.name, model=agent.model, @@ -218,12 +225,12 @@ async def add_tool_servers_to_agent( # Clean up connected servers if agent creation fails self._logger.error(f"Failed to recreate agent with new MCP servers: {e}") await self._cleanup_servers(connected_servers) - raise e + raise self._logger.info("No new MCP servers to add to agent") return agent - async def _cleanup_servers(self, servers): + async def _cleanup_servers(self, servers: List[MCPServerStreamableHttp]) -> None: """Clean up connected MCP servers""" for server in servers: try: @@ -233,8 +240,430 @@ async def _cleanup_servers(self, servers): # Log cleanup errors but don't raise them self._logger.debug(f"Error during server cleanup: {e}") - async def cleanup_all_servers(self): + async def cleanup_all_servers(self) -> None: """Clean up all connected MCP servers""" if hasattr(self, "_connected_servers"): await self._cleanup_servers(self._connected_servers) self._connected_servers = [] + + # -------------------------------------------------------------------------- + # SEND CHAT HISTORY - OpenAI-specific implementations + # -------------------------------------------------------------------------- + + async def send_chat_history( + self, + turn_context: TurnContext, + session: Session, + limit: Optional[int] = None, + options: Optional[ToolOptions] = None, + ) -> OperationResult: + """ + Extract chat history from an OpenAI Session and send it to the MCP platform. + + This method extracts messages from an OpenAI Session object using get_items() + and sends them to the MCP platform for real-time threat protection. + + Args: + turn_context: TurnContext from the Agents SDK containing conversation info. + Must have a valid activity with conversation.id, activity.id, + and activity.text. + session: OpenAI Session instance to extract messages from. Must support + the get_items() method which returns a list of TResponseInputItem. + limit: Optional maximum number of items to retrieve from session. + If None, retrieves all items. + options: Optional ToolOptions for customization. If not provided, + uses default options with orchestrator_name="OpenAI". + + Returns: + OperationResult indicating success or failure. On success, returns + OperationResult.success(). On failure, returns OperationResult.failed() + with error details. + + Raises: + ValueError: If turn_context is None or session is None. + + Example: + >>> from agents import Agent, Runner + >>> from microsoft_agents_a365.tooling.extensions.openai import ( + ... McpToolRegistrationService + ... ) + >>> + >>> service = McpToolRegistrationService() + >>> agent = Agent(name="my-agent", model="gpt-4") + >>> + >>> # In your agent handler: + >>> async with Runner.run(agent, messages) as result: + ... session = result.session + ... op_result = await service.send_chat_history( + ... turn_context, session + ... ) + ... if op_result.succeeded: + ... print("Chat history sent successfully") + """ + # Validate inputs + if turn_context is None: + raise ValueError("turn_context cannot be None") + if session is None: + raise ValueError("session cannot be None") + + try: + # Extract messages from session + self._logger.info("Extracting messages from OpenAI session") + if limit is not None: + messages = session.get_items(limit=limit) + else: + messages = session.get_items() + + self._logger.debug(f"Retrieved {len(messages)} items from session") + + # Delegate to the list-based method + return await self.send_chat_history_messages( + turn_context=turn_context, + messages=messages, + options=options, + ) + except ValueError: + # Re-raise validation errors + raise + except Exception as ex: + self._logger.error(f"Failed to send chat history from session: {ex}") + return OperationResult.failed(OperationError(ex)) + + async def send_chat_history_messages( + self, + turn_context: TurnContext, + messages: List[TResponseInputItem], + options: Optional[ToolOptions] = None, + ) -> OperationResult: + """ + Send OpenAI chat history messages to the MCP platform for threat protection. + + This method accepts a list of OpenAI TResponseInputItem messages, converts + them to ChatHistoryMessage format, and sends them to the MCP platform. + + Args: + turn_context: TurnContext from the Agents SDK containing conversation info. + Must have a valid activity with conversation.id, activity.id, + and activity.text. + messages: List of OpenAI TResponseInputItem messages to send. Supports + UserMessage, AssistantMessage, SystemMessage, and other OpenAI + message types. + options: Optional ToolOptions for customization. If not provided, + uses default options with orchestrator_name="OpenAI". + + Returns: + OperationResult indicating success or failure. On success, returns + OperationResult.success(). On failure, returns OperationResult.failed() + with error details. + + Raises: + ValueError: If turn_context is None or messages is None. + + Example: + >>> from microsoft_agents_a365.tooling.extensions.openai import ( + ... McpToolRegistrationService + ... ) + >>> + >>> service = McpToolRegistrationService() + >>> messages = [ + ... {"role": "user", "content": "Hello"}, + ... {"role": "assistant", "content": "Hi there!"}, + ... ] + >>> + >>> result = await service.send_chat_history_messages( + ... turn_context, messages + ... ) + >>> if result.succeeded: + ... print("Chat history sent successfully") + """ + # Validate inputs + if turn_context is None: + raise ValueError("turn_context cannot be None") + if messages is None: + raise ValueError("messages cannot be None") + + # Handle empty list as no-op + if len(messages) == 0: + self._logger.info("Empty message list provided, returning success") + return OperationResult.success() + + self._logger.info(f"Sending {len(messages)} OpenAI messages as chat history") + + # Set default options + if options is None: + options = ToolOptions(orchestrator_name=self._orchestrator_name) + elif options.orchestrator_name is None: + options.orchestrator_name = self._orchestrator_name + + try: + # Convert OpenAI messages to ChatHistoryMessage format + chat_history_messages = self._convert_openai_messages_to_chat_history(messages) + + if len(chat_history_messages) == 0: + self._logger.warning("No messages could be converted to chat history format") + return OperationResult.success() + + self._logger.debug( + f"Converted {len(chat_history_messages)} messages to ChatHistoryMessage format" + ) + + # Delegate to core service + return await self.config_service.send_chat_history( + turn_context=turn_context, + chat_history_messages=chat_history_messages, + options=options, + ) + except ValueError: + # Re-raise validation errors from the core service + raise + except Exception as ex: + self._logger.error(f"Failed to send chat history messages: {ex}") + return OperationResult.failed(OperationError(ex)) + + # -------------------------------------------------------------------------- + # PRIVATE HELPER METHODS - Message Conversion + # -------------------------------------------------------------------------- + + def _convert_openai_messages_to_chat_history( + self, messages: List[TResponseInputItem] + ) -> List[ChatHistoryMessage]: + """ + Convert a list of OpenAI messages to ChatHistoryMessage format. + + Args: + messages: List of OpenAI TResponseInputItem messages. + + Returns: + List of ChatHistoryMessage objects. Messages that cannot be converted + are filtered out with a warning log. + """ + chat_history_messages: List[ChatHistoryMessage] = [] + + for idx, message in enumerate(messages): + converted = self._convert_single_message(message, idx) + if converted is not None: + chat_history_messages.append(converted) + + self._logger.info( + f"Converted {len(chat_history_messages)} of {len(messages)} messages " + "to ChatHistoryMessage format" + ) + return chat_history_messages + + def _convert_single_message( + self, message: TResponseInputItem, index: int = 0 + ) -> Optional[ChatHistoryMessage]: + """ + Convert a single OpenAI message to ChatHistoryMessage format. + + Args: + message: Single OpenAI TResponseInputItem message. + index: Index of the message in the list (for logging). + + Returns: + ChatHistoryMessage object or None if conversion fails. + """ + try: + role = self._extract_role(message) + content = self._extract_content(message) + msg_id = self._extract_id(message) + timestamp = self._extract_timestamp(message) + + self._logger.debug( + f"Converting message {index}: role={role}, " + f"has_id={msg_id is not None}, has_timestamp={timestamp is not None}" + ) + + # Skip messages with empty content after extraction + # The ChatHistoryMessage validator requires non-empty content + if not content or not content.strip(): + self._logger.warning(f"Message {index} has empty content, skipping") + return None + + return ChatHistoryMessage( + id=msg_id, + role=role, + content=content, + timestamp=timestamp, + ) + except Exception as ex: + self._logger.error(f"Failed to convert message {index}: {ex}") + return None + + def _extract_role(self, message: TResponseInputItem) -> str: + """ + Extract the role from an OpenAI message. + + Role mapping: + - UserMessage or role="user" -> "user" + - AssistantMessage or role="assistant" -> "assistant" + - SystemMessage or role="system" -> "system" + - ResponseOutputMessage with role="assistant" -> "assistant" + - Unknown types -> "user" (default fallback with warning) + + Args: + message: OpenAI message object. + + Returns: + Role string: "user", "assistant", or "system". + """ + # Check for role attribute directly + if hasattr(message, "role"): + role = message.role + if role in ("user", "assistant", "system"): + return role + + # Check message type by class name + type_name = type(message).__name__ + + if "UserMessage" in type_name or "user" in type_name.lower(): + return "user" + elif "AssistantMessage" in type_name or "assistant" in type_name.lower(): + return "assistant" + elif "SystemMessage" in type_name or "system" in type_name.lower(): + return "system" + elif "ResponseOutputMessage" in type_name: + # ResponseOutputMessage typically has role attribute + if hasattr(message, "role") and message.role == "assistant": + return "assistant" + return "assistant" # Default for response output + + # For dict-like objects + if isinstance(message, dict): + role = message.get("role", "") + if role in ("user", "assistant", "system"): + return role + + # Default fallback with warning + self._logger.warning(f"Unknown message type {type_name}, defaulting to 'user' role") + return "user" + + def _extract_content(self, message: TResponseInputItem) -> str: + """ + Extract text content from an OpenAI message. + + Content extraction priority: + 1. If message has .content as string -> use directly + 2. If message has .content as list -> concatenate all text parts + 3. If message has .text attribute -> use directly + 4. If content is empty/None -> return empty string with warning + + Args: + message: OpenAI message object. + + Returns: + Extracted text content as string. + """ + content = "" + + # Try .content attribute first + if hasattr(message, "content"): + raw_content = message.content + + if isinstance(raw_content, str): + content = raw_content + elif isinstance(raw_content, list): + # Concatenate text parts from content list + text_parts = [] + for part in raw_content: + if isinstance(part, str): + text_parts.append(part) + elif hasattr(part, "text"): + text_parts.append(str(part.text)) + elif isinstance(part, dict): + if "text" in part: + text_parts.append(str(part["text"])) + elif part.get("type") == "text" and "text" in part: + text_parts.append(str(part["text"])) + content = " ".join(text_parts) + + # Try .text attribute as fallback + if not content and hasattr(message, "text"): + content = str(message.text) if message.text else "" + + # Try dict-like access + if not content and isinstance(message, dict): + content = message.get("content", "") or message.get("text", "") or "" + if isinstance(content, list): + text_parts = [] + for part in content: + if isinstance(part, str): + text_parts.append(part) + elif isinstance(part, dict) and "text" in part: + text_parts.append(str(part["text"])) + content = " ".join(text_parts) + + if not content: + self._logger.warning("Message has empty content, using empty string") + + return content + + def _extract_id(self, message: TResponseInputItem) -> str: + """ + Extract or generate a unique ID for the message. + + If the message has an existing ID, it is preserved. Otherwise, + a new UUID is generated. + + Args: + message: OpenAI message object. + + Returns: + Message ID as string. + """ + # Try to get existing ID + existing_id = None + + if hasattr(message, "id") and message.id: + existing_id = str(message.id) + elif isinstance(message, dict) and message.get("id"): + existing_id = str(message["id"]) + + if existing_id: + return existing_id + + # Generate new UUID + generated_id = str(uuid.uuid4()) + self._logger.debug(f"Generated UUID {generated_id} for message without ID") + return generated_id + + def _extract_timestamp(self, message: TResponseInputItem) -> datetime: + """ + Extract or generate a timestamp for the message. + + If the message has an existing timestamp, it is preserved. Otherwise, + the current UTC time is used. + + Args: + message: OpenAI message object. + + Returns: + Timestamp as datetime object. + """ + # Try to get existing timestamp + existing_timestamp = None + + if hasattr(message, "timestamp") and message.timestamp: + existing_timestamp = message.timestamp + elif hasattr(message, "created_at") and message.created_at: + existing_timestamp = message.created_at + elif isinstance(message, dict): + existing_timestamp = message.get("timestamp") or message.get("created_at") + + if existing_timestamp: + # Convert to datetime if needed + if isinstance(existing_timestamp, datetime): + return existing_timestamp + elif isinstance(existing_timestamp, (int, float)): + # Unix timestamp + return datetime.fromtimestamp(existing_timestamp, tz=timezone.utc) + elif isinstance(existing_timestamp, str): + # Try ISO format parsing + try: + return datetime.fromisoformat(existing_timestamp.replace("Z", "+00:00")) + except ValueError: + pass + + # Use current UTC time + self._logger.debug("Using current UTC time for message without timestamp") + return datetime.now(timezone.utc) diff --git a/libraries/microsoft-agents-a365-tooling/microsoft_agents_a365/tooling/extensions/__init__.py b/libraries/microsoft-agents-a365-tooling/microsoft_agents_a365/tooling/extensions/__init__.py new file mode 100644 index 00000000..3e7da8a5 --- /dev/null +++ b/libraries/microsoft-agents-a365-tooling/microsoft_agents_a365/tooling/extensions/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Microsoft Agent 365 Tooling Extensions namespace package. + +This file enables the `microsoft_agents_a365.tooling.extensions` namespace +to span multiple installed packages (e.g., extensions-openai, extensions-agentframework). +""" + +import sys +from pkgutil import extend_path + +# Standard pkgutil-style namespace extension +__path__ = extend_path(__path__, __name__) + +# For editable installs with custom finders, manually discover extension paths +for finder in sys.meta_path: + if hasattr(finder, "find_spec"): + try: + spec = finder.find_spec(__name__, None) + if spec is not None and spec.submodule_search_locations: + for path in spec.submodule_search_locations: + if path not in __path__ and not path.endswith(".__path_hook__"): + __path__.append(path) + except (ImportError, TypeError): + # Some meta path finders may not support this namespace and can raise + # ImportError or TypeError; ignore these and continue discovering paths. + pass diff --git a/pyproject.toml b/pyproject.toml index 63d7c85a..e23deb0b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -122,7 +122,7 @@ min-file-size = 1 [tool.ruff.lint.per-file-ignores] # Tests can use magic values, assertions, and imports -"tests/*" = ["PLR2004", "S101", "TID252"] +"tests/*" = ["PLR2004", "S101", "TID252", "B903"] "samples/*" = ["B903"] [tool.ruff.format] diff --git a/tests/tooling/extensions/__init__.py b/tests/tooling/extensions/__init__.py new file mode 100644 index 00000000..dc3584eb --- /dev/null +++ b/tests/tooling/extensions/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Tests for tooling extensions.""" diff --git a/tests/tooling/extensions/openai/__init__.py b/tests/tooling/extensions/openai/__init__.py new file mode 100644 index 00000000..1793ec2c --- /dev/null +++ b/tests/tooling/extensions/openai/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Tests for OpenAI tooling extensions.""" diff --git a/tests/tooling/extensions/openai/conftest.py b/tests/tooling/extensions/openai/conftest.py new file mode 100644 index 00000000..9a6f108e --- /dev/null +++ b/tests/tooling/extensions/openai/conftest.py @@ -0,0 +1,295 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Shared pytest fixtures for OpenAI extension tests.""" + +from datetime import UTC, datetime +from typing import TypeAlias +from unittest.mock import Mock + +import pytest + +# -------------------------------------------------------------------------- +# TYPE DEFINITIONS +# -------------------------------------------------------------------------- + +# Content can be string, list of content parts, or None (mimics OpenAI SDK) +MessageContent: TypeAlias = str | list[object] | None + + +# -------------------------------------------------------------------------- +# MOCK OPENAI MESSAGE CLASSES +# -------------------------------------------------------------------------- + + +class MockUserMessage: + """Mock OpenAI UserMessage for testing.""" + + def __init__( + self, + content: MessageContent = "Hello", + id: str | None = None, + timestamp: datetime | None = None, + ): + self.role = "user" + self.content = content + self.id = id + self.timestamp = timestamp + + +class MockAssistantMessage: + """Mock OpenAI AssistantMessage for testing.""" + + def __init__( + self, + content: MessageContent = "Hi there!", + id: str | None = None, + timestamp: datetime | None = None, + ): + self.role = "assistant" + self.content = content + self.id = id + self.timestamp = timestamp + + +class MockSystemMessage: + """Mock OpenAI SystemMessage for testing.""" + + def __init__( + self, + content: MessageContent = "You are a helpful assistant.", + id: str | None = None, + timestamp: datetime | None = None, + ): + self.role = "system" + self.content = content + self.id = id + self.timestamp = timestamp + + +class MockResponseOutputMessage: + """Mock OpenAI ResponseOutputMessage for testing.""" + + def __init__( + self, + content: MessageContent = "Response from agent", + role: str = "assistant", + id: str | None = None, + timestamp: datetime | None = None, + ): + self.role = role + self.content = content + self.id = id + self.timestamp = timestamp + + +class MockUnknownMessage: + """Mock unknown message type for testing fallback behavior.""" + + def __init__(self, content: MessageContent = "Unknown content"): + self.content = content + + +class MockContentPart: + """Mock content part for list-based content.""" + + def __init__(self, text: str): + self.type = "text" + self.text = text + + +# Type alias for mock messages +MockMessage: TypeAlias = ( + MockUserMessage + | MockAssistantMessage + | MockSystemMessage + | MockResponseOutputMessage + | MockUnknownMessage +) + + +class MockSession: + """Mock OpenAI Session for testing.""" + + def __init__(self, items: list[MockMessage] | None = None): + self._items: list[MockMessage] = items or [] + + def get_items(self, limit: int | None = None) -> list[MockMessage]: + """Get items from the session, optionally limited.""" + if limit is not None: + return self._items[:limit] + return self._items + + +# -------------------------------------------------------------------------- +# PYTEST FIXTURES +# -------------------------------------------------------------------------- + + +@pytest.fixture +def mock_turn_context(): + """Create a mock TurnContext with all required fields.""" + from microsoft_agents.hosting.core import TurnContext + + mock_context = Mock(spec=TurnContext) + mock_activity = Mock() + mock_conversation = Mock() + + mock_conversation.id = "conv-123" + mock_activity.conversation = mock_conversation + mock_activity.id = "msg-456" + mock_activity.text = "Hello, how are you?" + + mock_context.activity = mock_activity + return mock_context + + +@pytest.fixture +def mock_turn_context_no_activity(): + """Create a mock TurnContext with no activity.""" + from microsoft_agents.hosting.core import TurnContext + + mock_context = Mock(spec=TurnContext) + mock_context.activity = None + return mock_context + + +@pytest.fixture +def mock_turn_context_no_conversation_id(): + """Create a mock TurnContext with no conversation ID.""" + from microsoft_agents.hosting.core import TurnContext + + mock_context = Mock(spec=TurnContext) + mock_activity = Mock() + mock_activity.conversation = None + mock_activity.id = "msg-456" + mock_activity.text = "Hello" + mock_context.activity = mock_activity + return mock_context + + +@pytest.fixture +def mock_turn_context_no_message_id(): + """Create a mock TurnContext with no message ID.""" + from microsoft_agents.hosting.core import TurnContext + + mock_context = Mock(spec=TurnContext) + mock_activity = Mock() + mock_conversation = Mock() + mock_conversation.id = "conv-123" + mock_activity.conversation = mock_conversation + mock_activity.id = None + mock_activity.text = "Hello" + mock_context.activity = mock_activity + return mock_context + + +@pytest.fixture +def mock_turn_context_no_user_message(): + """Create a mock TurnContext with no user message text.""" + from microsoft_agents.hosting.core import TurnContext + + mock_context = Mock(spec=TurnContext) + mock_activity = Mock() + mock_conversation = Mock() + mock_conversation.id = "conv-123" + mock_activity.conversation = mock_conversation + mock_activity.id = "msg-456" + mock_activity.text = None + mock_context.activity = mock_activity + return mock_context + + +@pytest.fixture +def sample_user_message(): + """Create a sample user message.""" + return MockUserMessage(content="Hello, how are you?") + + +@pytest.fixture +def sample_assistant_message(): + """Create a sample assistant message.""" + return MockAssistantMessage(content="I'm doing great, thank you!") + + +@pytest.fixture +def sample_system_message(): + """Create a sample system message.""" + return MockSystemMessage(content="You are a helpful assistant.") + + +@pytest.fixture +def sample_openai_messages(): + """Create a list of sample OpenAI messages.""" + return [ + MockUserMessage(content="Hello"), + MockAssistantMessage(content="Hi there!"), + MockUserMessage(content="How are you?"), + MockAssistantMessage(content="I'm doing well, thanks for asking!"), + ] + + +@pytest.fixture +def sample_messages_with_ids(): + """Create sample messages with pre-existing IDs.""" + return [ + MockUserMessage(content="Hello", id="user-msg-001"), + MockAssistantMessage(content="Hi!", id="assistant-msg-001"), + ] + + +@pytest.fixture +def sample_messages_with_timestamps(): + """Create sample messages with pre-existing timestamps.""" + timestamp = datetime(2024, 1, 15, 10, 30, 0, tzinfo=UTC) + return [ + MockUserMessage(content="Hello", timestamp=timestamp), + MockAssistantMessage( + content="Hi!", + timestamp=datetime(2024, 1, 15, 10, 30, 5, tzinfo=UTC), + ), + ] + + +@pytest.fixture +def sample_message_with_list_content(): + """Create a message with list-based content.""" + return MockUserMessage(content=[MockContentPart("Hello, "), MockContentPart("how are you?")]) + + +@pytest.fixture +def sample_message_with_empty_content(): + """Create a message with empty content.""" + return MockUserMessage(content="") + + +@pytest.fixture +def sample_message_with_none_content(): + """Create a message with None content.""" + return MockUserMessage(content=None) + + +@pytest.fixture +def sample_unknown_message(): + """Create an unknown message type.""" + return MockUnknownMessage(content="Unknown type content") + + +@pytest.fixture +def mock_session(sample_openai_messages): + """Create a mock OpenAI Session with sample messages.""" + return MockSession(items=sample_openai_messages) + + +@pytest.fixture +def mock_empty_session(): + """Create a mock OpenAI Session with no messages.""" + return MockSession(items=[]) + + +@pytest.fixture +def service(): + """Create a McpToolRegistrationService instance for testing.""" + from microsoft_agents_a365.tooling.extensions.openai import McpToolRegistrationService + + return McpToolRegistrationService() diff --git a/tests/tooling/extensions/openai/test_e2e.py b/tests/tooling/extensions/openai/test_e2e.py new file mode 100644 index 00000000..a33ec8e5 --- /dev/null +++ b/tests/tooling/extensions/openai/test_e2e.py @@ -0,0 +1,349 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""End-to-end tests for OpenAI send_chat_history methods with mocked HTTP. + +These tests verify the complete flow from Session/messages through conversion +to the HTTP call, using mocked HTTP responses. They are marked as unit tests +because they use mocks and don't require real external services. +""" + +import json +from datetime import UTC, datetime +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from microsoft_agents_a365.runtime import OperationResult +from microsoft_agents_a365.tooling.models import ChatHistoryMessage + +from .conftest import ( + MockAssistantMessage, + MockSession, + MockSystemMessage, + MockUserMessage, +) + +# ============================================================================= +# END-TO-END TESTS WITH MOCKED HTTP (E2E-01 to E2E-03) +# ============================================================================= + + +class TestEndToEndWithMockedHttp: + """End-to-end tests with mocked HTTP dependencies. + + These tests verify the complete flow through the service but use mocked + HTTP responses. They are marked as unit tests since no real network + calls are made. + """ + + # E2E-01 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_e2e_success(self, service, mock_turn_context): + """Test full end-to-end flow: Session -> conversion -> HTTP -> success.""" + # Create a session with realistic messages + messages = [ + MockSystemMessage(content="You are a helpful assistant."), + MockUserMessage(content="What is the capital of France?"), + MockAssistantMessage(content="The capital of France is Paris."), + MockUserMessage(content="And what about Germany?"), + MockAssistantMessage(content="The capital of Germany is Berlin."), + ] + session = MockSession(items=messages) + + # Mock aiohttp.ClientSession + mock_response = AsyncMock() + mock_response.status = 200 + mock_response.text = AsyncMock(return_value="OK") + + with patch("aiohttp.ClientSession") as mock_session_class: + mock_session_instance = MagicMock() + mock_post = AsyncMock() + mock_post.__aenter__.return_value = mock_response + mock_session_instance.post.return_value = mock_post + mock_session_class.return_value.__aenter__.return_value = mock_session_instance + + # Execute + result = await service.send_chat_history(mock_turn_context, session) + + # Verify + assert result.succeeded is True + assert len(result.errors) == 0 + + # Verify HTTP call was made + assert mock_session_instance.post.called + + # E2E-02 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_e2e_server_error(self, service, mock_turn_context): + """Test full end-to-end flow with HTTP 500 error.""" + messages = [ + MockUserMessage(content="Hello"), + MockAssistantMessage(content="Hi there!"), + ] + session = MockSession(items=messages) + + # Mock aiohttp.ClientSession with 500 response + mock_response = AsyncMock() + mock_response.status = 500 + mock_response.text = AsyncMock(return_value="Internal Server Error") + mock_response.request_info = MagicMock() + mock_response.history = () + mock_response.headers = {} + + with patch("aiohttp.ClientSession") as mock_session_class: + mock_session_instance = MagicMock() + mock_post = AsyncMock() + mock_post.__aenter__.return_value = mock_response + mock_session_instance.post.return_value = mock_post + mock_session_class.return_value.__aenter__.return_value = mock_session_instance + + # Execute + result = await service.send_chat_history(mock_turn_context, session) + + # Verify failure + assert result.succeeded is False + assert len(result.errors) == 1 + + # E2E-03 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_e2e_payload_format(self, service, mock_turn_context): + """Test that the JSON payload has the correct structure.""" + messages = [ + MockUserMessage(content="Hello", id="user-001"), + MockAssistantMessage(content="Hi!", id="assistant-001"), + ] + session = MockSession(items=messages) + + mock_response = AsyncMock() + mock_response.status = 200 + mock_response.text = AsyncMock(return_value="OK") + + captured_payload = None + + with patch("aiohttp.ClientSession") as mock_session_class: + mock_session_instance = MagicMock() + + def capture_post(*args, **kwargs): + nonlocal captured_payload + captured_payload = kwargs.get("data") + mock_post = AsyncMock() + mock_post.__aenter__.return_value = mock_response + return mock_post + + mock_session_instance.post.side_effect = capture_post + mock_session_class.return_value.__aenter__.return_value = mock_session_instance + + # Execute + result = await service.send_chat_history(mock_turn_context, session) + + # Verify success + assert result.succeeded is True + + # Verify payload structure + assert captured_payload is not None + payload = json.loads(captured_payload) + + # Check required fields + assert "conversationId" in payload + assert "messageId" in payload + assert "userMessage" in payload + assert "chatHistory" in payload + + # Check conversation context from turn_context + assert payload["conversationId"] == "conv-123" + assert payload["messageId"] == "msg-456" + assert payload["userMessage"] == "Hello, how are you?" + + # Check chat history + chat_history = payload["chatHistory"] + assert len(chat_history) == 2 + + # Verify first message (user) + assert chat_history[0]["role"] == "user" + assert chat_history[0]["content"] == "Hello" + assert chat_history[0]["id"] == "user-001" + + # Verify second message (assistant) + assert chat_history[1]["role"] == "assistant" + assert chat_history[1]["content"] == "Hi!" + assert chat_history[1]["id"] == "assistant-001" + + +class TestConversionChainE2E: + """End-to-end tests for the full conversion chain with mocked dependencies.""" + + @pytest.mark.asyncio + @pytest.mark.unit + async def test_messages_converted_to_chat_history_message_type( + self, service, mock_turn_context, sample_openai_messages + ): + """Test that OpenAI messages are converted to ChatHistoryMessage instances.""" + captured_messages = None + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.return_value = OperationResult.success() + + def capture_args(*args, **kwargs): + nonlocal captured_messages + captured_messages = kwargs.get("chat_history_messages") + return OperationResult.success() + + mock_send.side_effect = capture_args + + await service.send_chat_history_messages(mock_turn_context, sample_openai_messages) + + # Verify all messages are ChatHistoryMessage instances + assert captured_messages is not None + for msg in captured_messages: + assert isinstance(msg, ChatHistoryMessage) + + @pytest.mark.asyncio + @pytest.mark.unit + async def test_session_extraction_and_conversion_chain(self, service, mock_turn_context): + """Test the full chain: session.get_items() -> conversion -> send.""" + timestamp = datetime(2024, 1, 15, 10, 30, 0, tzinfo=UTC) + messages = [ + MockUserMessage(content="Query", id="q-1", timestamp=timestamp), + MockAssistantMessage(content="Response", id="r-1", timestamp=timestamp), + ] + session = MockSession(items=messages) + + captured_messages = None + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + + def capture_args(*args, **kwargs): + nonlocal captured_messages + captured_messages = kwargs.get("chat_history_messages") + return OperationResult.success() + + mock_send.side_effect = capture_args + + result = await service.send_chat_history(mock_turn_context, session) + + # Verify success + assert result.succeeded is True + + # Verify messages were extracted and converted + assert captured_messages is not None + assert len(captured_messages) == 2 + + # Verify IDs were preserved + assert captured_messages[0].id == "q-1" + assert captured_messages[1].id == "r-1" + + # Verify timestamps were preserved + assert captured_messages[0].timestamp == timestamp + assert captured_messages[1].timestamp == timestamp + + +class TestLimitParameterE2E: + """End-to-end tests for the limit parameter with mocked dependencies.""" + + @pytest.mark.asyncio + @pytest.mark.unit + async def test_limit_restricts_session_items(self, service, mock_turn_context): + """Test that limit parameter correctly restricts items from session.""" + # Create session with many messages + messages = [MockUserMessage(content=f"Msg {i}") for i in range(100)] + session = MockSession(items=messages) + + captured_messages = None + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + + def capture_args(*args, **kwargs): + nonlocal captured_messages + captured_messages = kwargs.get("chat_history_messages") + return OperationResult.success() + + mock_send.side_effect = capture_args + + # Send with limit + result = await service.send_chat_history(mock_turn_context, session, limit=10) + + assert result.succeeded is True + assert captured_messages is not None + assert len(captured_messages) == 10 + + @pytest.mark.asyncio + @pytest.mark.unit + async def test_no_limit_sends_all_items(self, service, mock_turn_context): + """Test that no limit sends all session items.""" + messages = [MockUserMessage(content=f"Msg {i}") for i in range(50)] + session = MockSession(items=messages) + + captured_messages = None + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + + def capture_args(*args, **kwargs): + nonlocal captured_messages + captured_messages = kwargs.get("chat_history_messages") + return OperationResult.success() + + mock_send.side_effect = capture_args + + # Send without limit + result = await service.send_chat_history(mock_turn_context, session) + + assert result.succeeded is True + assert captured_messages is not None + assert len(captured_messages) == 50 + + +class TestHeadersE2E: + """End-to-end tests for HTTP headers with mocked dependencies.""" + + @pytest.mark.asyncio + @pytest.mark.unit + async def test_user_agent_header_includes_orchestrator_name( + self, service, mock_turn_context, sample_openai_messages + ): + """Test that User-Agent header includes orchestrator name.""" + captured_headers = None + + mock_response = AsyncMock() + mock_response.status = 200 + mock_response.text = AsyncMock(return_value="OK") + + with patch("aiohttp.ClientSession") as mock_session_class: + mock_session_instance = MagicMock() + + def capture_post(*args, **kwargs): + nonlocal captured_headers + captured_headers = kwargs.get("headers") + mock_post = AsyncMock() + mock_post.__aenter__.return_value = mock_response + return mock_post + + mock_session_instance.post.side_effect = capture_post + mock_session_class.return_value.__aenter__.return_value = mock_session_instance + + await service.send_chat_history_messages(mock_turn_context, sample_openai_messages) + + # Verify headers + assert captured_headers is not None + assert "User-Agent" in captured_headers + # User agent should contain OpenAI or orchestrator info + user_agent = captured_headers["User-Agent"] + assert "OpenAI" in user_agent or "microsoft-agents" in user_agent.lower() diff --git a/tests/tooling/extensions/openai/test_message_conversion.py b/tests/tooling/extensions/openai/test_message_conversion.py new file mode 100644 index 00000000..df2a97f7 --- /dev/null +++ b/tests/tooling/extensions/openai/test_message_conversion.py @@ -0,0 +1,463 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Unit tests for message conversion logic in McpToolRegistrationService.""" + +import uuid +from datetime import UTC, datetime +from unittest.mock import Mock + +import pytest + +from .conftest import ( + MockAssistantMessage, + MockContentPart, + MockResponseOutputMessage, + MockSystemMessage, + MockUnknownMessage, + MockUserMessage, +) + +# ============================================================================= +# CONVERSION TESTS (CV-01 to CV-12) +# ============================================================================= + + +class TestRoleConversion: + """Tests for role extraction and mapping.""" + + # CV-01 + @pytest.mark.unit + def test_convert_user_message_to_chat_history(self, service): + """Test that UserMessage converts with role='user'.""" + message = MockUserMessage(content="Hello from user") + + result = service._convert_single_message(message) + + assert result is not None + assert result.role == "user" + assert result.content == "Hello from user" + + # CV-02 + @pytest.mark.unit + def test_convert_assistant_message_to_chat_history(self, service): + """Test that AssistantMessage converts with role='assistant'.""" + message = MockAssistantMessage(content="Hello from assistant") + + result = service._convert_single_message(message) + + assert result is not None + assert result.role == "assistant" + assert result.content == "Hello from assistant" + + # CV-03 + @pytest.mark.unit + def test_convert_system_message_to_chat_history(self, service): + """Test that SystemMessage converts with role='system'.""" + message = MockSystemMessage(content="System instructions") + + result = service._convert_single_message(message) + + assert result is not None + assert result.role == "system" + assert result.content == "System instructions" + + # CV-10 + @pytest.mark.unit + def test_convert_unknown_message_type_defaults_to_user(self, service): + """Test that unknown message type defaults to 'user' role.""" + message = MockUnknownMessage(content="Unknown type content") + + result = service._convert_single_message(message) + + assert result is not None + assert result.role == "user" + assert result.content == "Unknown type content" + + @pytest.mark.unit + def test_convert_response_output_message_to_assistant(self, service): + """Test that ResponseOutputMessage converts with role='assistant'.""" + message = MockResponseOutputMessage(content="Response content", role="assistant") + + result = service._convert_single_message(message) + + assert result is not None + assert result.role == "assistant" + + @pytest.mark.unit + def test_extract_role_from_dict_message(self, service): + """Test role extraction from dict-like message.""" + message = {"role": "user", "content": "Hello"} + + role = service._extract_role(message) + + assert role == "user" + + @pytest.mark.unit + def test_extract_role_from_dict_assistant(self, service): + """Test role extraction from dict with assistant role.""" + message = {"role": "assistant", "content": "Hi"} + + role = service._extract_role(message) + + assert role == "assistant" + + @pytest.mark.unit + def test_extract_role_from_dict_system(self, service): + """Test role extraction from dict with system role.""" + message = {"role": "system", "content": "Instructions"} + + role = service._extract_role(message) + + assert role == "system" + + +class TestContentExtraction: + """Tests for content extraction from messages.""" + + # CV-04 + @pytest.mark.unit + def test_convert_message_with_string_content(self, service): + """Test that string content is extracted directly.""" + message = MockUserMessage(content="Simple string content") + + result = service._convert_single_message(message) + + assert result is not None + assert result.content == "Simple string content" + + # CV-05 + @pytest.mark.unit + def test_convert_message_with_list_content(self, service): + """Test that list content is concatenated.""" + message = MockUserMessage(content=[MockContentPart("Hello, "), MockContentPart("world!")]) + + result = service._convert_single_message(message) + + assert result is not None + assert result.content == "Hello, world!" + + # CV-11 + @pytest.mark.unit + def test_convert_empty_content_skips_message(self, service): + """Test that messages with empty content are skipped during conversion.""" + message = MockUserMessage(content="") + + result = service._convert_single_message(message) + + # Empty content should cause the message to be skipped + assert result is None + + @pytest.mark.unit + def test_extract_content_from_text_attribute(self, service): + """Test content extraction from .text attribute as fallback.""" + message = Mock() + message.content = None + message.text = "Content from text attribute" + + content = service._extract_content(message) + + assert content == "Content from text attribute" + + @pytest.mark.unit + def test_extract_content_from_dict(self, service): + """Test content extraction from dict message.""" + message = {"role": "user", "content": "Dict content"} + + content = service._extract_content(message) + + assert content == "Dict content" + + @pytest.mark.unit + def test_extract_content_from_dict_with_list(self, service): + """Test content extraction from dict with list content.""" + message = { + "role": "user", + "content": [{"type": "text", "text": "Part 1"}, {"text": "Part 2"}], + } + + content = service._extract_content(message) + + assert "Part 1" in content + assert "Part 2" in content + + @pytest.mark.unit + def test_extract_content_concatenates_string_parts(self, service): + """Test that string parts in list are concatenated.""" + message = Mock() + message.content = ["Hello", " ", "world"] + + content = service._extract_content(message) + + assert content == "Hello world" + + +class TestIdExtraction: + """Tests for ID extraction and generation.""" + + # CV-06 + @pytest.mark.unit + def test_convert_message_generates_uuid_when_id_missing(self, service): + """Test that UUID is generated for messages without ID.""" + message = MockUserMessage(content="No ID message", id=None) + + result = service._convert_single_message(message) + + assert result is not None + assert result.id is not None + # Verify it's a valid UUID format + try: + uuid.UUID(result.id) + is_valid_uuid = True + except ValueError: + is_valid_uuid = False + assert is_valid_uuid + + # CV-08 + @pytest.mark.unit + def test_convert_message_preserves_existing_id(self, service): + """Test that existing ID is preserved.""" + message = MockUserMessage(content="Has ID", id="existing-id-123") + + result = service._convert_single_message(message) + + assert result is not None + assert result.id == "existing-id-123" + + @pytest.mark.unit + def test_extract_id_from_dict(self, service): + """Test ID extraction from dict message.""" + message = {"role": "user", "content": "Hello", "id": "dict-id-456"} + + msg_id = service._extract_id(message) + + assert msg_id == "dict-id-456" + + @pytest.mark.unit + def test_extract_id_generates_uuid_for_dict_without_id(self, service): + """Test UUID generation for dict without ID.""" + message = {"role": "user", "content": "Hello"} + + msg_id = service._extract_id(message) + + # Should be a valid UUID + try: + uuid.UUID(msg_id) + is_valid_uuid = True + except ValueError: + is_valid_uuid = False + assert is_valid_uuid + + +class TestTimestampExtraction: + """Tests for timestamp extraction and generation.""" + + # CV-07 + @pytest.mark.unit + def test_convert_message_uses_utc_when_timestamp_missing(self, service): + """Test that current UTC timestamp is used when missing.""" + message = MockUserMessage(content="No timestamp", timestamp=None) + before = datetime.now(UTC) + + result = service._convert_single_message(message) + + after = datetime.now(UTC) + + assert result is not None + assert result.timestamp is not None + assert before <= result.timestamp <= after + + # CV-09 + @pytest.mark.unit + def test_convert_message_preserves_existing_timestamp(self, service): + """Test that existing timestamp is preserved.""" + timestamp = datetime(2024, 1, 15, 10, 30, 0, tzinfo=UTC) + message = MockUserMessage(content="Has timestamp", timestamp=timestamp) + + result = service._convert_single_message(message) + + assert result is not None + assert result.timestamp == timestamp + + @pytest.mark.unit + def test_extract_timestamp_from_created_at(self, service): + """Test timestamp extraction from created_at attribute.""" + timestamp = datetime(2024, 6, 1, 12, 0, 0, tzinfo=UTC) + message = Mock() + message.timestamp = None + message.created_at = timestamp + + result = service._extract_timestamp(message) + + assert result == timestamp + + @pytest.mark.unit + def test_extract_timestamp_from_unix_timestamp(self, service): + """Test timestamp extraction from Unix timestamp.""" + unix_ts = 1704067200 # 2024-01-01 00:00:00 UTC + message = Mock() + message.timestamp = unix_ts + message.created_at = None + + result = service._extract_timestamp(message) + + assert result.year == 2024 + assert result.month == 1 + assert result.day == 1 + + @pytest.mark.unit + def test_extract_timestamp_from_iso_string(self, service): + """Test timestamp extraction from ISO format string.""" + message = Mock() + message.timestamp = "2024-03-15T14:30:00Z" + message.created_at = None + + result = service._extract_timestamp(message) + + assert result.year == 2024 + assert result.month == 3 + assert result.day == 15 + + +class TestBatchConversion: + """Tests for batch message conversion.""" + + # CV-12 + @pytest.mark.unit + def test_convert_multiple_messages(self, service, sample_openai_messages): + """Test that multiple messages are converted correctly.""" + result = service._convert_openai_messages_to_chat_history(sample_openai_messages) + + assert len(result) == len(sample_openai_messages) + + # Check alternating roles + assert result[0].role == "user" + assert result[1].role == "assistant" + assert result[2].role == "user" + assert result[3].role == "assistant" + + @pytest.mark.unit + def test_convert_filters_out_empty_content_messages(self, service): + """Test that messages with empty content are filtered out.""" + messages = [ + MockUserMessage(content="Valid content"), + MockUserMessage(content=""), # Should be filtered + MockAssistantMessage(content="Also valid"), + ] + + result = service._convert_openai_messages_to_chat_history(messages) + + # Only 2 messages should be converted (empty one filtered) + assert len(result) == 2 + assert result[0].content == "Valid content" + assert result[1].content == "Also valid" + + @pytest.mark.unit + def test_convert_handles_mixed_message_types(self, service): + """Test conversion of mixed message types.""" + messages = [ + MockSystemMessage(content="System prompt"), + MockUserMessage(content="User query"), + MockAssistantMessage(content="Assistant response"), + MockResponseOutputMessage(content="Output message"), + ] + + result = service._convert_openai_messages_to_chat_history(messages) + + assert len(result) == 4 + assert result[0].role == "system" + assert result[1].role == "user" + assert result[2].role == "assistant" + assert result[3].role == "assistant" + + @pytest.mark.unit + def test_convert_empty_list_returns_empty_list(self, service): + """Test that empty input returns empty output.""" + result = service._convert_openai_messages_to_chat_history([]) + + assert result == [] + + @pytest.mark.unit + def test_all_converted_messages_have_ids(self, service, sample_openai_messages): + """Test that all converted messages have IDs.""" + result = service._convert_openai_messages_to_chat_history(sample_openai_messages) + + for msg in result: + assert msg.id is not None + assert len(msg.id) > 0 + + @pytest.mark.unit + def test_all_converted_messages_have_timestamps(self, service, sample_openai_messages): + """Test that all converted messages have timestamps.""" + result = service._convert_openai_messages_to_chat_history(sample_openai_messages) + + for msg in result: + assert msg.timestamp is not None + + +class TestDictMessageConversion: + """Tests for dict-based message conversion.""" + + @pytest.mark.unit + def test_convert_dict_user_message(self, service): + """Test conversion of dict-based user message.""" + message = {"role": "user", "content": "Hello from dict"} + + result = service._convert_single_message(message) + + assert result is not None + assert result.role == "user" + assert result.content == "Hello from dict" + + @pytest.mark.unit + def test_convert_dict_with_id_and_timestamp(self, service): + """Test conversion of dict with ID and timestamp.""" + message = { + "role": "assistant", + "content": "Response", + "id": "dict-msg-id", + "timestamp": "2024-01-15T10:30:00Z", + } + + result = service._convert_single_message(message) + + assert result is not None + assert result.id == "dict-msg-id" + assert result.timestamp.year == 2024 + + +class TestEdgeCases: + """Tests for edge cases and boundary conditions.""" + + @pytest.mark.unit + def test_message_with_only_whitespace_content_skipped(self, service): + """Test that messages with only whitespace are skipped.""" + message = MockUserMessage(content=" ") + + result = service._convert_single_message(message) + + assert result is None + + @pytest.mark.unit + def test_message_with_none_content_skipped(self, service): + """Test that messages with None content are skipped.""" + message = Mock() + message.role = "user" + message.content = None + message.text = None + message.id = None + message.timestamp = None + + result = service._convert_single_message(message) + + assert result is None + + @pytest.mark.unit + def test_conversion_preserves_message_order(self, service): + """Test that message order is preserved during conversion.""" + messages = [MockUserMessage(content=f"Message {i}") for i in range(10)] + + result = service._convert_openai_messages_to_chat_history(messages) + + for i, msg in enumerate(result): + assert msg.content == f"Message {i}" diff --git a/tests/tooling/extensions/openai/test_send_chat_history.py b/tests/tooling/extensions/openai/test_send_chat_history.py new file mode 100644 index 00000000..9d7b9f12 --- /dev/null +++ b/tests/tooling/extensions/openai/test_send_chat_history.py @@ -0,0 +1,518 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Unit tests for send_chat_history and send_chat_history_messages methods.""" + +from unittest.mock import AsyncMock, MagicMock, Mock, patch + +import pytest +from microsoft_agents_a365.runtime import OperationResult +from microsoft_agents_a365.tooling.models import ToolOptions + +from .conftest import ( + MockSession, + MockUserMessage, +) + +# ============================================================================= +# INPUT VALIDATION TESTS (UV-01 to UV-09) +# ============================================================================= + + +class TestInputValidation: + """Tests for input validation in send_chat_history methods.""" + + # UV-01 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_validates_turn_context_none( + self, service, sample_openai_messages + ): + """Test that send_chat_history_messages raises ValueError when turn_context is None.""" + with pytest.raises(ValueError, match="turn_context cannot be None"): + await service.send_chat_history_messages(None, sample_openai_messages) + + # UV-02 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_validates_messages_none( + self, service, mock_turn_context + ): + """Test that send_chat_history_messages raises ValueError when messages is None.""" + with pytest.raises(ValueError, match="messages cannot be None"): + await service.send_chat_history_messages(mock_turn_context, None) + + # UV-03 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_empty_list_returns_success( + self, service, mock_turn_context + ): + """Test that empty message list returns success (no-op).""" + result = await service.send_chat_history_messages(mock_turn_context, []) + + assert result.succeeded is True + assert len(result.errors) == 0 + + # UV-04 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_validates_activity_none( + self, service, mock_turn_context_no_activity, sample_openai_messages + ): + """Test that send_chat_history_messages validates turn_context.activity.""" + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.side_effect = ValueError("turn_context.activity cannot be None") + + with pytest.raises(ValueError, match="turn_context.activity cannot be None"): + await service.send_chat_history_messages( + mock_turn_context_no_activity, sample_openai_messages + ) + + # UV-05 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_validates_conversation_id( + self, service, mock_turn_context_no_conversation_id, sample_openai_messages + ): + """Test that send_chat_history_messages validates conversation_id.""" + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.side_effect = ValueError("conversation_id cannot be empty or None") + + with pytest.raises(ValueError, match="conversation_id cannot be empty"): + await service.send_chat_history_messages( + mock_turn_context_no_conversation_id, sample_openai_messages + ) + + # UV-06 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_validates_message_id( + self, service, mock_turn_context_no_message_id, sample_openai_messages + ): + """Test that send_chat_history_messages validates message_id.""" + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.side_effect = ValueError("message_id cannot be empty or None") + + with pytest.raises(ValueError, match="message_id cannot be empty"): + await service.send_chat_history_messages( + mock_turn_context_no_message_id, sample_openai_messages + ) + + # UV-07 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_validates_user_message( + self, service, mock_turn_context_no_user_message, sample_openai_messages + ): + """Test that send_chat_history_messages validates user_message text.""" + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.side_effect = ValueError("user_message cannot be empty or None") + + with pytest.raises(ValueError, match="user_message cannot be empty"): + await service.send_chat_history_messages( + mock_turn_context_no_user_message, sample_openai_messages + ) + + # UV-08 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_validates_turn_context_none(self, service, mock_session): + """Test that send_chat_history raises ValueError when turn_context is None.""" + with pytest.raises(ValueError, match="turn_context cannot be None"): + await service.send_chat_history(None, mock_session) + + # UV-09 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_validates_session_none(self, service, mock_turn_context): + """Test that send_chat_history raises ValueError when session is None.""" + with pytest.raises(ValueError, match="session cannot be None"): + await service.send_chat_history(mock_turn_context, None) + + +# ============================================================================= +# SUCCESS PATH TESTS (SP-01 to SP-07) +# ============================================================================= + + +class TestSuccessPath: + """Tests for successful execution paths.""" + + # SP-01 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_success( + self, service, mock_turn_context, sample_openai_messages + ): + """Test successful send_chat_history_messages call.""" + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.return_value = OperationResult.success() + + result = await service.send_chat_history_messages( + mock_turn_context, sample_openai_messages + ) + + assert result.succeeded is True + assert len(result.errors) == 0 + mock_send.assert_called_once() + + # SP-02 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_with_options( + self, service, mock_turn_context, sample_openai_messages + ): + """Test send_chat_history_messages with custom ToolOptions.""" + custom_options = ToolOptions(orchestrator_name="CustomOrchestrator") + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.return_value = OperationResult.success() + + result = await service.send_chat_history_messages( + mock_turn_context, sample_openai_messages, options=custom_options + ) + + assert result.succeeded is True + # Verify options were passed through + call_args = mock_send.call_args + assert call_args.kwargs["options"].orchestrator_name == "CustomOrchestrator" + + # SP-03 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_default_orchestrator_name( + self, service, mock_turn_context, sample_openai_messages + ): + """Test that default orchestrator name is set to 'OpenAI'.""" + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.return_value = OperationResult.success() + + await service.send_chat_history_messages(mock_turn_context, sample_openai_messages) + + # Verify default orchestrator name + call_args = mock_send.call_args + assert call_args.kwargs["options"].orchestrator_name == "OpenAI" + + # SP-04 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_delegates_to_config_service( + self, service, mock_turn_context, sample_openai_messages + ): + """Test that send_chat_history_messages delegates to config_service.""" + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.return_value = OperationResult.success() + + await service.send_chat_history_messages(mock_turn_context, sample_openai_messages) + + # Verify delegation + mock_send.assert_called_once() + call_args = mock_send.call_args + + # Check turn_context was passed + assert call_args.kwargs["turn_context"] == mock_turn_context + + # Check chat_history_messages were converted + chat_history = call_args.kwargs["chat_history_messages"] + assert len(chat_history) == len(sample_openai_messages) + + # SP-05 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_success(self, service, mock_turn_context, mock_session): + """Test successful send_chat_history call.""" + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.return_value = OperationResult.success() + + result = await service.send_chat_history(mock_turn_context, mock_session) + + assert result.succeeded is True + mock_send.assert_called_once() + + # SP-06 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_with_limit(self, service, mock_turn_context): + """Test send_chat_history with limit parameter.""" + # Create session with many messages + messages = [MockUserMessage(content=f"Message {i}") for i in range(10)] + session = MockSession(items=messages) + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.return_value = OperationResult.success() + + result = await service.send_chat_history(mock_turn_context, session, limit=5) + + assert result.succeeded is True + + # Verify only limited messages were sent + call_args = mock_send.call_args + chat_history = call_args.kwargs["chat_history_messages"] + assert len(chat_history) == 5 + + # SP-07 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_delegates_to_send_chat_history_messages( + self, service, mock_turn_context, mock_session + ): + """Test that send_chat_history calls send_chat_history_messages.""" + with patch.object( + service, + "send_chat_history_messages", + new_callable=AsyncMock, + ) as mock_method: + mock_method.return_value = OperationResult.success() + + await service.send_chat_history(mock_turn_context, mock_session) + + mock_method.assert_called_once() + call_args = mock_method.call_args + assert call_args.kwargs["turn_context"] == mock_turn_context + + +# ============================================================================= +# ERROR HANDLING TESTS (EH-01 to EH-05) +# ============================================================================= + + +class TestErrorHandling: + """Tests for error handling scenarios.""" + + # EH-01 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_http_error( + self, service, mock_turn_context, sample_openai_messages + ): + """Test send_chat_history_messages handles HTTP errors.""" + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.return_value = OperationResult.failed( + MagicMock(message="HTTP 500: Internal Server Error") + ) + + result = await service.send_chat_history_messages( + mock_turn_context, sample_openai_messages + ) + + assert result.succeeded is False + + # EH-02 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_timeout_error( + self, service, mock_turn_context, sample_openai_messages + ): + """Test send_chat_history_messages handles timeout errors.""" + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.side_effect = TimeoutError("Request timed out") + + result = await service.send_chat_history_messages( + mock_turn_context, sample_openai_messages + ) + + assert result.succeeded is False + assert len(result.errors) == 1 + + # EH-03 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_client_error( + self, service, mock_turn_context, sample_openai_messages + ): + """Test send_chat_history_messages handles network/client errors.""" + import aiohttp + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.side_effect = aiohttp.ClientError("Connection failed") + + result = await service.send_chat_history_messages( + mock_turn_context, sample_openai_messages + ) + + assert result.succeeded is False + assert len(result.errors) == 1 + + # EH-04 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_messages_conversion_error(self, service, mock_turn_context): + """Test send_chat_history_messages handles conversion errors gracefully.""" + sample_messages = [MockUserMessage(content="Hello")] + + with patch.object(service, "_convert_openai_messages_to_chat_history") as mock_convert: + mock_convert.side_effect = Exception("Conversion failed") + + result = await service.send_chat_history_messages(mock_turn_context, sample_messages) + + assert result.succeeded is False + assert len(result.errors) == 1 + assert "Conversion failed" in str(result.errors[0].message) + + # EH-05 + @pytest.mark.asyncio + @pytest.mark.unit + async def test_send_chat_history_get_items_error(self, service, mock_turn_context): + """Test send_chat_history handles session.get_items() errors.""" + # Create a mock session that raises an error + mock_session = Mock() + mock_session.get_items.side_effect = Exception("Session error") + + result = await service.send_chat_history(mock_turn_context, mock_session) + + assert result.succeeded is False + assert len(result.errors) == 1 + + +# ============================================================================= +# ORCHESTRATOR NAME HANDLING TESTS +# ============================================================================= + + +class TestOrchestratorNameHandling: + """Tests for orchestrator name handling in options.""" + + @pytest.mark.asyncio + @pytest.mark.unit + async def test_options_with_none_orchestrator_name_gets_default( + self, service, mock_turn_context, sample_openai_messages + ): + """Test that options with None orchestrator_name get default value.""" + options = ToolOptions(orchestrator_name=None) + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.return_value = OperationResult.success() + + await service.send_chat_history_messages( + mock_turn_context, sample_openai_messages, options=options + ) + + call_args = mock_send.call_args + assert call_args.kwargs["options"].orchestrator_name == "OpenAI" + + @pytest.mark.asyncio + @pytest.mark.unit + async def test_options_preserves_custom_orchestrator_name( + self, service, mock_turn_context, sample_openai_messages + ): + """Test that custom orchestrator name is preserved.""" + options = ToolOptions(orchestrator_name="MyCustomOrchestrator") + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + mock_send.return_value = OperationResult.success() + + await service.send_chat_history_messages( + mock_turn_context, sample_openai_messages, options=options + ) + + call_args = mock_send.call_args + assert call_args.kwargs["options"].orchestrator_name == "MyCustomOrchestrator" + + +# ============================================================================= +# THREAD SAFETY / CONCURRENT CALLS TESTS +# ============================================================================= + + +class TestConcurrentCalls: + """Tests for thread safety and concurrent call isolation.""" + + @pytest.mark.asyncio + @pytest.mark.unit + async def test_concurrent_calls_do_not_interfere(self, service, mock_turn_context): + """Test that concurrent calls to send_chat_history_messages are isolated.""" + import asyncio + + messages1 = [MockUserMessage(content="Message set 1")] + messages2 = [MockUserMessage(content="Message set 2")] + + captured_payloads: list[list[object]] = [] + + with patch.object( + service.config_service, + "send_chat_history", + new_callable=AsyncMock, + ) as mock_send: + + async def capture_and_succeed(*args: object, **kwargs: object) -> OperationResult: + captured_payloads.append(kwargs.get("chat_history_messages")) + await asyncio.sleep(0.01) # Simulate async work + return OperationResult.success() + + mock_send.side_effect = capture_and_succeed + + # Run concurrently + results = await asyncio.gather( + service.send_chat_history_messages(mock_turn_context, messages1), + service.send_chat_history_messages(mock_turn_context, messages2), + ) + + # Both should succeed independently + assert all(r.succeeded for r in results) + assert len(captured_payloads) == 2 + # Verify no cross-contamination + contents = [p[0].content for p in captured_payloads] + assert "Message set 1" in contents + assert "Message set 2" in contents