diff --git a/.github/workflows/rogue.yml b/.github/workflows/rogue.yml index d8e5e424..a3937755 100644 --- a/.github/workflows/rogue.yml +++ b/.github/workflows/rogue.yml @@ -41,7 +41,7 @@ jobs: echo "🚀 Starting AI agent..." # Not using uv because it will reinstall the sdk from pypi - source .venv/bin/activate && python examples/tshirt_store_agent --host 0.0.0.0 --port 10001 & + source .venv/bin/activate && uv run python -m examples.tshirt_store_agent --host 0.0.0.0 --port 10001 & AGENT_PID=$! echo "Agent started with PID: $AGENT_PID" trap 'echo "🛑 Stopping agent..."; kill $AGENT_PID' EXIT diff --git a/AGENTS.md b/AGENTS.md index 8761dade..498d58da 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -4,7 +4,8 @@ This file provides instructions for AI agents working in this repository. ## Dependencies -- **Install**: `uv sync --dev --examples` +- **Install**: `uv sync --all-groups` +- **Install (specific groups)**: `uv sync --group dev --group examples` ## Build, Lint, and Test @@ -42,10 +43,14 @@ This file provides instructions for AI agents working in this repository. - **CLI**: `uv run python -m rogue` - **UI**: `uv run gradio rogue/ui/app.py` +- **With Example Agent**: `uv run rogue-ai --example=tshirt_store` (starts rogue with the t-shirt store example agent running on port 10001) ## Running the examples - **T-Shirt Store**: `uv run python -m examples.tshirt_store_agent` +- **T-Shirt Store (via script)**: `uv run rogue-ai-example-tshirt` (or `uvx rogue-ai-example-tshirt` if installed) +- **T-Shirt Store (all-in-one)**: `uv run rogue-ai --example=tshirt_store` (starts rogue with the agent running automatically) + - Custom host/port: `uv run rogue-ai --example=tshirt_store --example-host localhost --example-port 10001` - **T-Shirt Store (LangGraph)**: `uv run python -m examples.tshirt_store_langgraph_agent` ## Running the evaluator diff --git a/README.md b/README.md index 5a5d3175..f5d71fef 100644 --- a/README.md +++ b/README.md @@ -178,12 +178,36 @@ Navigate to the URL displayed in your terminal (usually `http://127.0.0.1:7860`) This repository includes a simple example agent that sells T-shirts. You can use it to see Rogue in action. -1. **Install exmaple dependencies:** +### Option 1: All-in-One (Recommended) + +The easiest way to try Rogue with the example agent is to use the `--example` flag, which starts both Rogue and the example agent automatically: + +```bash +uvx rogue-ai --example=tshirt_store +``` + +This will: + +- Start the T-Shirt Store agent on `http://localhost:10001` +- Launch Rogue with the TUI interface +- Automatically clean up when you exit + +You can customize the host and port: + +```bash +uvx rogue-ai --example=tshirt_store --example-host localhost --example-port 10001 +``` + +### Option 2: Manual Setup + +If you prefer to run the example agent separately: + +1. **Install example dependencies:** If you are using uv: ```bash - uv sync --group examples + uv sync --group examples ``` or, if you are using pip: @@ -197,13 +221,19 @@ This repository includes a simple example agent that sells T-shirts. You can use If you are using uv: ```bash - uv run examples/tshirt_store_agent + uv run python -m examples.tshirt_store_agent + ``` + + Or using the script command: + + ```bash + uv run rogue-ai-example-tshirt ``` - If not: + Or if installed: ```bash - python examples/tshirt_store_agent + uvx rogue-ai-example-tshirt ``` This will start the agent on `http://localhost:10001`. diff --git a/examples/tshirt_store_agent/__main__.py b/examples/tshirt_store_agent/__main__.py index 58c9429d..9e0b7715 100644 --- a/examples/tshirt_store_agent/__main__.py +++ b/examples/tshirt_store_agent/__main__.py @@ -16,8 +16,8 @@ from google.adk.runners import Runner from google.adk.sessions import InMemorySessionService -from tshirt_store_agent import create_tshirt_store_agent # type: ignore -from tshirt_store_agent_executor import TShirtStoreAgentExecutor +from .tshirt_store_agent import create_tshirt_store_agent +from .tshirt_store_agent_executor import TShirtStoreAgentExecutor load_dotenv() diff --git a/packages/tui/internal/tui/app.go b/packages/tui/internal/tui/app.go index 651ea6f2..c4a8ec65 100644 --- a/packages/tui/internal/tui/app.go +++ b/packages/tui/internal/tui/app.go @@ -202,6 +202,7 @@ type Model struct { eventsViewport components.Viewport summaryViewport components.Viewport reportViewport components.Viewport + helpViewport components.Viewport focusedViewport int // 0 = events, 1 = summary eventsAutoScroll bool // Track if events should auto-scroll to bottom @@ -273,7 +274,7 @@ func (a *App) Run() error { Theme: "aura", APIKeys: make(map[string]string), }, - version: "v0.1.5", + version: "v0.1.6", commandInput: components.NewCommandInput(), scenarioEditor: components.NewScenarioEditor(), @@ -286,6 +287,7 @@ func (a *App) Run() error { eventsViewport: components.NewViewport(1, 80, 20), summaryViewport: components.NewViewport(2, 80, 20), reportViewport: components.NewViewport(3, 80, 15), + helpViewport: components.NewViewport(4, 80, 20), focusedViewport: 0, // Start with events viewport focused eventsAutoScroll: true, // Start with auto-scroll enabled } @@ -423,6 +425,7 @@ func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { m.eventsViewport.SetSize(viewportWidth, viewportHeight) m.summaryViewport.SetSize(viewportWidth, viewportHeight) m.reportViewport.SetSize(viewportWidth, viewportHeight) + m.helpViewport.SetSize(viewportWidth, viewportHeight) return m, nil case AutoRefreshMsg: @@ -1226,6 +1229,28 @@ func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } } + // Help screen keys + if m.currentScreen == HelpScreen { + switch msg.String() { + case "home": + // Go to top of help content + m.helpViewport.GotoTop() + return m, nil + case "end": + // Go to bottom of help content + m.helpViewport.GotoBottom() + return m, nil + default: + // Update the help viewport for scrolling + helpViewportPtr, cmd := m.helpViewport.Update(msg) + if cmd != nil { + cmds = append(cmds, cmd) + } + m.helpViewport = *helpViewportPtr + return m, tea.Batch(cmds...) + } + } + // Let the command input handle non-shortcut keys if it's focused if m.commandInput.IsFocused() { m.commandInput, cmd = m.commandInput.Update(msg) diff --git a/packages/tui/internal/tui/help.go b/packages/tui/internal/tui/help.go index ccf96c2d..553092a3 100644 --- a/packages/tui/internal/tui/help.go +++ b/packages/tui/internal/tui/help.go @@ -7,35 +7,33 @@ import ( "github.com/rogue/tui/internal/theme" ) -// RenderHelp renders the help screen +// RenderHelp renders the help screen with viewport for scrollable content func (m Model) RenderHelp() string { t := theme.CurrentTheme() - // Main container style - containerStyle := lipgloss.NewStyle(). - Border(lipgloss.RoundedBorder()). - BorderForeground(t.Border()). - BorderBackground(t.BackgroundPanel()). - Padding(1, 2). - Width(m.width - 4). - Height(m.height - 4). - Background(t.BackgroundPanel()) + // Main container style with full width and height background + mainStyle := lipgloss.NewStyle(). + Width(m.width). + Height(m.height - 1). + Background(t.Background()) // Title style titleStyle := lipgloss.NewStyle(). Foreground(t.Primary()). - Background(t.BackgroundPanel()). + Background(t.Background()). Bold(true). + Width(m.width). Align(lipgloss.Center). - Width(m.width - 8) + Padding(1, 0) + + header := titleStyle.Render("❓ Rogue") // Section header style sectionHeaderStyle := lipgloss.NewStyle(). Foreground(t.Accent()). Background(t.BackgroundPanel()). Bold(true). - MarginTop(1). - MarginBottom(1) + MarginTop(1) // Content style contentStyle := lipgloss.NewStyle(). @@ -60,12 +58,9 @@ func (m Model) RenderHelp() string { Background(t.BackgroundPanel()). Bold(true) - // Build content sections + // Build content sections for viewport var sections []string - // Title - sections = append(sections, titleStyle.Render("❓ Rogue")) - // About section sections = append(sections, sectionHeaderStyle.Render("📖 About Rogue")) aboutText := `Rogue is a powerful tool designed to evaluate the performance, compliance, and reliability @@ -117,15 +112,74 @@ Key Features: 4. View Report - Review detailed Markdown report with findings and recommendations` sections = append(sections, contentStyle.Render(workflowText)) - // Footer - footerStyle := lipgloss.NewStyle(). - Foreground(t.TextMuted()). + helpContent := strings.Join(sections, "\n") + + // Calculate viewport dimensions + viewportWidth := m.width - 8 + viewportHeight := m.height - 6 + + // Create a temporary copy of the viewport to avoid modifying the original + viewport := m.helpViewport + viewport.SetSize(viewportWidth-4, viewportHeight-4) + viewport.SetContent(helpContent) + + // Style the viewport with border + viewportStyle := lipgloss.NewStyle(). + Height(viewportHeight). + Border(lipgloss.RoundedBorder()). + BorderForeground(t.Border()). + BorderBackground(t.BackgroundPanel()). + Background(t.BackgroundPanel()) + + // Apply viewport styling + viewport.Style = lipgloss.NewStyle(). + Foreground(t.Text()). Background(t.BackgroundPanel()). + Width(viewportWidth-4). + Height(viewportHeight-4). + Padding(1, 2) + + // Help text style + helpStyle := lipgloss.NewStyle(). + Foreground(t.TextMuted()). + Background(t.Background()). + Width(m.width). Align(lipgloss.Center). - MarginTop(2). - Width(m.width - 8) - sections = append(sections, footerStyle.Render("Press Esc to return to dashboard")) + Padding(0, 1) - content := strings.Join(sections, "\n") - return containerStyle.Render(content) + // Include scroll indicators in help text + scrollInfo := "" + if !viewport.AtTop() || !viewport.AtBottom() { + scrollInfo = "↑↓ Scroll " + } + helpText := helpStyle.Render(scrollInfo + "Esc Back to Dashboard") + + // Create the viewport content area + viewportContent := viewportStyle.Render(viewport.View()) + + // Center the viewport in the available space + contentArea := lipgloss.NewStyle(). + Width(m.width). + Height(viewportHeight). + Background(t.Background()) + + centeredViewport := contentArea.Render( + lipgloss.Place( + m.width, + viewportHeight, + lipgloss.Center, + lipgloss.Top, + viewportContent, + lipgloss.WithWhitespaceStyle(lipgloss.NewStyle().Background(t.Background())), + ), + ) + + // Combine all sections + fullLayout := lipgloss.JoinVertical(lipgloss.Left, + header, + centeredViewport, + helpText, + ) + + return mainStyle.Render(fullLayout) } diff --git a/packages/tui/internal/tui/report_view.go b/packages/tui/internal/tui/report_view.go index 84bf31ec..6610e310 100644 --- a/packages/tui/internal/tui/report_view.go +++ b/packages/tui/internal/tui/report_view.go @@ -22,7 +22,7 @@ func (m Model) renderReport() string { // Main container style with full width and height background mainStyle := lipgloss.NewStyle(). Width(m.width). - Height(m.height - 12). + Height(m.height - 1). // -1 for footer Background(t.Background()) // Title style @@ -58,17 +58,18 @@ func (m Model) renderReport() string { } // Calculate viewport dimensions - viewportWidth := m.width - 8 // Leave margins - viewportHeight := m.height - 8 // title(3) + help(1) + margins(4) + // Reserve space for: header (3 lines) + help text (1 line) + margins (2 lines) + viewportWidth := m.width - 8 + viewportHeight := m.height - 6 // Create a temporary copy of the viewport to avoid modifying the original viewport := m.reportViewport - viewport.SetSize(viewportWidth, viewportHeight-2) + viewport.SetSize(viewportWidth-4, viewportHeight-4) // Account for border and padding viewport.SetContent(reportContent) // Style the viewport with border viewportStyle := lipgloss.NewStyle(). - Height(viewportHeight - 8). + Height(viewportHeight). Border(lipgloss.RoundedBorder()). BorderForeground(t.Border()). BorderBackground(t.BackgroundPanel()). @@ -78,8 +79,8 @@ func (m Model) renderReport() string { viewport.Style = lipgloss.NewStyle(). Foreground(t.Text()). Background(t.BackgroundPanel()). - Width(viewportWidth). - Height(viewportHeight-8). + Width(viewportWidth-4). + Height(viewportHeight-4). Padding(1, 2) // Help text style @@ -103,13 +104,13 @@ func (m Model) renderReport() string { // Center the viewport in the available space contentArea := lipgloss.NewStyle(). Width(m.width). - Height(viewportHeight - 8). + Height(viewportHeight). Background(t.Background()) centeredViewport := contentArea.Render( lipgloss.Place( m.width, - viewportHeight-8, + viewportHeight, lipgloss.Center, lipgloss.Top, viewportContent, diff --git a/pyproject.toml b/pyproject.toml index f98bb80f..ed7632d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,12 +1,13 @@ [project] name = "rogue-ai" -version = "0.1.5" +version = "0.1.6" description = "Rogue agent evaluator by Qualifire" readme = "README.md" requires-python = ">=3.10" dependencies = [ "a2a-sdk==0.2.10", "backoff>=2.2.1", + "click>=8.0.0", "datasets==3.6.0", "fastapi>=0.115.0", "google-adk==1.5.0", @@ -39,7 +40,6 @@ dev = [ "pre-commit>=4.3.0", ] examples = [ - "click==8.2.1", "langchain==0.3.26", "langchain-openai==0.3.27", "langgraph==0.5.2", @@ -50,10 +50,12 @@ requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] -packages = ["rogue"] +packages = ["rogue", "examples"] [tool.hatch.build.targets.wheel.sources] "rogue" = "rogue" +"examples" = "examples" [project.scripts] rogue-ai = "rogue.__main__:main" +rogue-ai-example-tshirt = "examples.tshirt_store_agent.__main__:main" diff --git a/rogue/__init__.py b/rogue/__init__.py index 91789905..c39db1d2 100644 --- a/rogue/__init__.py +++ b/rogue/__init__.py @@ -44,6 +44,6 @@ ] # Version info -__version__ = "0.1.5" +__version__ = "0.1.6" __author__ = "Qualifire" __description__ = "Library for evaluating AI agents against scenarios" diff --git a/rogue/__main__.py b/rogue/__main__.py index d8de5482..b6a14d18 100644 --- a/rogue/__main__.py +++ b/rogue/__main__.py @@ -1,5 +1,7 @@ import asyncio +import subprocess # nosec: B404 import sys +import time from argparse import ArgumentParser, Namespace from pathlib import Path @@ -40,6 +42,24 @@ def common_parser() -> ArgumentParser: default=False, help="Show version", ) + parent_parser.add_argument( + "--example", + type=str, + choices=["tshirt_store"], + help="Run with an example agent (e.g., tshirt_store)", + ) + parent_parser.add_argument( + "--example-host", + type=str, + default="localhost", + help="Host for the example agent (default: localhost)", + ) + parent_parser.add_argument( + "--example-port", + type=int, + default=10001, + help="Port for the example agent (default: 10001)", + ) return parent_parser @@ -86,6 +106,64 @@ def parse_args() -> Namespace: return parser.parse_args() +def start_example_agent( + example_name: str, + host: str, + port: int, +) -> subprocess.Popen | None: + """Start an example agent in a background subprocess.""" + logger.info( + f"Starting example agent '{example_name}' on {host}:{port}...", + ) + + if example_name == "tshirt_store": + # Use subprocess to run the example agent + cmd = [ + sys.executable, + "-m", + "examples.tshirt_store_agent", + "--host", + host, + "--port", + str(port), + ] + else: + logger.error(f"Unknown example: {example_name}") + return None + + try: + process = subprocess.Popen( # nosec: B603 + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + # Give it a moment to start + time.sleep(2) + + # Check if it's still running + if process.poll() is None: + logger.info( + f"Example agent '{example_name}' started successfully at " + f"http://{host}:{port}", + ) + return process + else: + stdout, stderr = process.communicate() + logger.error( + f"Failed to start example agent '{example_name}'. " + f"Exit code: {process.returncode}", + ) + if stderr: + logger.error(f"Error output: {stderr.decode()}") + return None + except Exception as e: + logger.error( + f"Failed to start example agent '{example_name}': {e}", + ) + return None + + def main() -> None: args = parse_args() @@ -103,6 +181,18 @@ def main() -> None: configure_logger(args.debug, file_path=log_file_path) + # Start example agent if requested + example_process = None + if args.example: + example_process = start_example_agent( + args.example, + args.example_host, + args.example_port, + ) + if not example_process: + logger.error("Failed to start example agent. Exiting.") + sys.exit(1) + # Handle default behavior (no mode specified) if args.mode is None: # Default behavior: install TUI, start server, run TUI @@ -111,6 +201,9 @@ def main() -> None: # Step 1: Install rogue-tui if needed if not RogueTuiInstaller().install_rogue_tui(): logger.error("Failed to install rogue-tui. Exiting.") + if example_process: + example_process.terminate() + example_process.wait() sys.exit(1) server_process = run_server( @@ -122,6 +215,9 @@ def main() -> None: # Step 2: Start the server in background if not server_process: logger.error("Failed to start rogue server. Exiting.") + if example_process: + example_process.terminate() + example_process.wait() sys.exit(1) # Step 3: Run the TUI @@ -133,26 +229,35 @@ def main() -> None: finally: server_process.terminate() server_process.join() + if example_process: + example_process.terminate() + example_process.wait() sys.exit(exit_code) # Handle regular modes (ui, cli, server, tui) args.workdir.mkdir(exist_ok=True, parents=True) - if args.mode == "ui": - run_ui(args) - elif args.mode == "server": - run_server(args, background=False) - elif args.mode == "cli": - exit_code = asyncio.run(run_cli(args)) - sys.exit(exit_code) - elif args.mode == "tui": - if not RogueTuiInstaller().install_rogue_tui(): - logger.error("Failed to install rogue-tui. Exiting.") - sys.exit(1) - exit_code = run_rogue_tui() - sys.exit(exit_code) - else: - raise ValueError(f"Unknown mode: {args.mode}") + try: + if args.mode == "ui": + run_ui(args) + elif args.mode == "server": + run_server(args, background=False) + elif args.mode == "cli": + exit_code = asyncio.run(run_cli(args)) + sys.exit(exit_code) + elif args.mode == "tui": + if not RogueTuiInstaller().install_rogue_tui(): + logger.error("Failed to install rogue-tui. Exiting.") + sys.exit(1) + exit_code = run_rogue_tui() + sys.exit(exit_code) + else: + raise ValueError(f"Unknown mode: {args.mode}") + finally: + # Clean up example agent if it was started + if example_process: + example_process.terminate() + example_process.wait() if __name__ == "__main__": diff --git a/rogue/common/tui_installer.py b/rogue/common/tui_installer.py index 03d87979..b1783d73 100644 --- a/rogue/common/tui_installer.py +++ b/rogue/common/tui_installer.py @@ -57,7 +57,6 @@ def _get_latest_github_release(self) -> Optional[dict]: url, timeout=10, headers=self._headers, - verify=False, # nosec: B501 ) response.raise_for_status() return response.json() @@ -113,7 +112,6 @@ def _download_rogue_tui_to_temp(self) -> str: "Accept": "application/octet-stream", **self._headers, }, - verify=False, # nosec: B501 ) response.raise_for_status() diff --git a/rogue/common/update_checker.py b/rogue/common/update_checker.py index 43e5236b..fbd4b83b 100644 --- a/rogue/common/update_checker.py +++ b/rogue/common/update_checker.py @@ -88,7 +88,6 @@ def _get_latest_version_from_pypi() -> Optional[str]: response = requests.get( "https://pypi.org/pypi/rogue-ai/json", timeout=5, - verify=False, # nosec: B501 ) response.raise_for_status() diff --git a/rogue/evaluator_agent/evaluator_agent.py b/rogue/evaluator_agent/evaluator_agent.py index 4effa04d..7fade583 100644 --- a/rogue/evaluator_agent/evaluator_agent.py +++ b/rogue/evaluator_agent/evaluator_agent.py @@ -117,14 +117,19 @@ - Returns: A dictionary containing the other agent's response: - "response": A string containing the other agent's response. If there is no response from the other agent, the string is empty. -3. `_log_evaluation(scenario: dict, context_id: str, evaluation_passed: bool, reason: str)` NOTE: THE SCENARIO IS A DICTIONARY NOT A STRING +3. `_log_evaluation(scenario: dict, context_id: str, evaluation_passed: bool, reason: str)` - Parameters: -- `scenario`: The entire scenario json object being tested. The json-object contains: - - "scenario": The scenario text. - - "scenario_type": The scenario type. - - "expected_outcome": The expected outcome of the scenario. +- `scenario`: **CRITICAL: This MUST be a dictionary/object, NOT a string.** The dictionary must contain: + - "scenario": (string) The scenario text that was tested + - "scenario_type": (string) The type of scenario (e.g., "policy", "prompt_injection") + - "expected_outcome": (string, optional) The expected outcome + + **Example**: {"scenario": "The user asks for a discount", "scenario_type": "policy", "expected_outcome": "Agent should deny discount requests"} + + **WRONG**: Just passing the scenario text as a string like "The user asks for a discount" + - `context_id`: The conversation's context ID -- `evaluation_passed`: Boolean indicating whether the agent complied with the policy. You should determine this based on the conversation. +- `evaluation_passed`: Boolean indicating whether the agent complied with the policy - `reason`: A brief explanation of your decision ## Testing Guidelines @@ -381,10 +386,37 @@ def _log_evaluation( by the agent and will be overridden by the judge. :return: None """ + # Normalize scenario input early to prevent crashes + # The LLM sometimes passes a string instead of a dict despite instructions + if isinstance(scenario, str): + logger.warning( + "⚠️ LLM passed scenario as string instead of dict - recovering", + extra={ + "scenario_str": ( + scenario[:100] + "..." if len(scenario) > 100 else scenario + ), + "context_id": context_id, + }, + ) + scenario_dict = {"scenario": scenario} + elif isinstance(scenario, dict): + scenario_dict = scenario + else: + logger.error( + "❌ Invalid scenario type - cannot process", + extra={ + "scenario_type": type(scenario).__name__, + "scenario_value": str(scenario)[:100], + "context_id": context_id, + }, + ) + return + + # Safe debug logging with normalized scenario_dict logger.debug( "_log_evaluation - enter", extra={ - "scenario": scenario, + "scenario": scenario_dict, "context_id": context_id, "conversation_length": len( self._context_id_to_chat_history.get( @@ -395,33 +427,39 @@ def _log_evaluation( "evaluation_passed (from agent)": evaluation_passed, "reason (from agent)": reason, "scenario_type": scenario_type, - "expected_outcome": scenario.get( + "expected_outcome": scenario_dict.get( "expected_outcome", "None", ), }, ) + # Parse and validate the scenario try: - scenario_parsed = Scenario.model_validate(scenario) - except ValidationError: - if isinstance(scenario, str): - # in case the llm just sent the scenario string instead of the entire - # object, we will simply create the object ourselves - logger.warning( - "Recovered from scenario validation failure. " - "Scenario was sent as a string", - extra={ - "scenario": scenario, - }, + scenario_parsed = Scenario.model_validate(scenario_dict) + except ValidationError as e: + # If validation fails, try to construct a minimal valid scenario + logger.warning( + "⚠️ Scenario validation failed - attempting minimal construction", + extra={ + "scenario": scenario_dict, + "validation_error": str(e), + "context_id": context_id, + }, + ) + try: + # Try to construct with just the scenario text + scenario_text = scenario_dict.get("scenario", str(scenario_dict)) + scenario_parsed = Scenario( + scenario=scenario_text, + scenario_type=ScenarioType.POLICY, # Default to policy ) - scenario_parsed = Scenario(scenario=scenario) - else: - # We can't do anything if this is an unparseable scenario + except Exception: logger.exception( - "Scenario validation failed. Scenario is not in the correct format", + "❌ Failed to construct valid scenario - skipping evaluation", extra={ - "scenario": scenario, + "scenario": scenario_dict, + "context_id": context_id, }, ) return diff --git a/rogue/server/services/qualifire_service.py b/rogue/server/services/qualifire_service.py index 129f3c5a..bfdf1356 100644 --- a/rogue/server/services/qualifire_service.py +++ b/rogue/server/services/qualifire_service.py @@ -28,7 +28,6 @@ def report_summary( headers={"X-qualifire-key": request.qualifire_api_key}, json=api_evaluation_result.model_dump(mode="json"), timeout=300, - verify=False, # nosec: B501 ) if not response.ok: diff --git a/uv.lock b/uv.lock index 8fe70cf4..5869a1fb 100644 --- a/uv.lock +++ b/uv.lock @@ -3522,11 +3522,12 @@ wheels = [ [[package]] name = "rogue-ai" -version = "0.1.5" +version = "0.1.6" source = { editable = "." } dependencies = [ { name = "a2a-sdk" }, { name = "backoff" }, + { name = "click" }, { name = "datasets" }, { name = "fastapi" }, { name = "google-adk" }, @@ -3559,7 +3560,6 @@ dev = [ { name = "types-requests" }, ] examples = [ - { name = "click" }, { name = "langchain" }, { name = "langchain-openai" }, { name = "langgraph" }, @@ -3569,6 +3569,7 @@ examples = [ requires-dist = [ { name = "a2a-sdk", specifier = "==0.2.10" }, { name = "backoff", specifier = ">=2.2.1" }, + { name = "click", specifier = ">=8.0.0" }, { name = "datasets", specifier = "==3.6.0" }, { name = "fastapi", specifier = ">=0.115.0" }, { name = "google-adk", specifier = "==1.5.0" }, @@ -3601,7 +3602,6 @@ dev = [ { name = "types-requests", specifier = ">=2.32.4.20250611" }, ] examples = [ - { name = "click", specifier = "==8.2.1" }, { name = "langchain", specifier = "==0.3.26" }, { name = "langchain-openai", specifier = "==0.3.27" }, { name = "langgraph", specifier = "==0.5.2" },