diff --git a/alembic/versions/e38f2f6f395a_add_mock_manual_approval_required.py b/alembic/versions/e38f2f6f395a_add_mock_manual_approval_required.py
index b4097d0bb..3edd12381 100644
--- a/alembic/versions/e38f2f6f395a_add_mock_manual_approval_required.py
+++ b/alembic/versions/e38f2f6f395a_add_mock_manual_approval_required.py
@@ -5,32 +5,33 @@
Create Date: 2025-10-23 20:06:20.766732
"""
-from typing import Sequence, Union
-from alembic import op
+from collections.abc import Sequence
+
import sqlalchemy as sa
+from alembic import op
# revision identifiers, used by Alembic.
-revision: str = 'e38f2f6f395a'
-down_revision: Union[str, Sequence[str], None] = 'faaed3b71428'
-branch_labels: Union[str, Sequence[str], None] = None
-depends_on: Union[str, Sequence[str], None] = None
+revision: str = "e38f2f6f395a"
+down_revision: str | Sequence[str] | None = "faaed3b71428"
+branch_labels: str | Sequence[str] | None = None
+depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
"""Upgrade schema."""
# Add mock_manual_approval_required column to adapter_config table
- op.add_column('adapter_config', sa.Column('mock_manual_approval_required', sa.Boolean(), nullable=True))
+ op.add_column("adapter_config", sa.Column("mock_manual_approval_required", sa.Boolean(), nullable=True))
# Set default value to False for ALL existing rows (not just mock adapters)
op.execute("UPDATE adapter_config SET mock_manual_approval_required = false")
# Make the column non-nullable after setting defaults
- op.alter_column('adapter_config', 'mock_manual_approval_required', nullable=False, server_default=sa.false())
+ op.alter_column("adapter_config", "mock_manual_approval_required", nullable=False, server_default=sa.false())
def downgrade() -> None:
"""Downgrade schema."""
# Remove mock_manual_approval_required column
- op.drop_column('adapter_config', 'mock_manual_approval_required')
+ op.drop_column("adapter_config", "mock_manual_approval_required")
diff --git a/examples/upstream_product_catalog_server.py b/examples/upstream_product_catalog_server.py
index 18c174715..7ffae7dd3 100755
--- a/examples/upstream_product_catalog_server.py
+++ b/examples/upstream_product_catalog_server.py
@@ -164,15 +164,23 @@ async def match_products(self, brief: str, all_products: list[dict[str, Any]]) -
prompt = f"""Given this advertising brief: "{brief}"
And these available products:
-{json.dumps([{
- 'id': p['product_id'],
- 'name': p['name'],
- 'description': p['description'],
- 'formats': [f['type'] for f in p['formats']],
- 'targeting': p.get('targeting_template', {}),
- 'price': p.get('cpm') or p.get('price_guidance', {}).get('p50', 'variable'),
- 'special_features': p.get('availability', {})
-} for p in all_products], indent=2)}
+{
+ json.dumps(
+ [
+ {
+ "id": p["product_id"],
+ "name": p["name"],
+ "description": p["description"],
+ "formats": [f["type"] for f in p["formats"]],
+ "targeting": p.get("targeting_template", {}),
+ "price": p.get("cpm") or p.get("price_guidance", {}).get("p50", "variable"),
+ "special_features": p.get("availability", {}),
+ }
+ for p in all_products
+ ],
+ indent=2,
+ )
+ }
Select the most relevant products (up to 3) and return as JSON:
{{
diff --git a/scripts/audit_e2e_tests.py b/scripts/audit_e2e_tests.py
index 47292304d..714ce826f 100755
--- a/scripts/audit_e2e_tests.py
+++ b/scripts/audit_e2e_tests.py
@@ -6,6 +6,7 @@
3. Redundant tests
4. Tests with excessive tool calls
"""
+
import re
import sys
from collections import defaultdict
diff --git a/scripts/deploy/fly-proxy.py b/scripts/deploy/fly-proxy.py
index 3cec67718..a8a257270 100644
--- a/scripts/deploy/fly-proxy.py
+++ b/scripts/deploy/fly-proxy.py
@@ -2,6 +2,7 @@
"""
Simple HTTP proxy for Fly.io deployment to route between MCP server and Admin UI
"""
+
import asyncio
import logging
diff --git a/scripts/maintenance/audit_all_production_products.py b/scripts/maintenance/audit_all_production_products.py
index eb057ace5..82f2d7d1f 100644
--- a/scripts/maintenance/audit_all_production_products.py
+++ b/scripts/maintenance/audit_all_production_products.py
@@ -45,18 +45,18 @@ def audit_all_products():
if not data["pricing_options"]:
missing_pricing.append(data["product"])
- print(f"\n{'='*80}")
+ print(f"\n{'=' * 80}")
print("PRODUCTION PRODUCT PRICING AUDIT")
- print(f"{'='*80}\n")
+ print(f"{'=' * 80}\n")
print(f"Total products in database: {len(products_by_id)}")
print(f"Products WITH pricing_options: {len(products_by_id) - len(missing_pricing)}")
print(f"Products MISSING pricing_options: {len(missing_pricing)}")
if missing_pricing:
- print(f"\n{'='*80}")
+ print(f"\n{'=' * 80}")
print("⚠️ PRODUCTS MISSING PRICING_OPTIONS (BLOCKING MIGRATION)")
- print(f"{'='*80}\n")
+ print(f"{'=' * 80}\n")
for product in missing_pricing:
print(f"❌ {product.product_id}")
@@ -65,9 +65,9 @@ def audit_all_products():
print(f" Delivery Type: {product.delivery_type}")
print()
- print(f"{'='*80}")
+ print(f"{'=' * 80}")
print("⚠️ ACTION REQUIRED")
- print(f"{'='*80}\n")
+ print(f"{'=' * 80}\n")
print("These products MUST have pricing_options added before migration can proceed.")
print("\nRecommended fix (run in Fly.io SSH console):")
print("\n```python")
diff --git a/scripts/ops/aggregate_format_metrics.py b/scripts/ops/aggregate_format_metrics.py
index 2ae8439bc..39b2a72ed 100755
--- a/scripts/ops/aggregate_format_metrics.py
+++ b/scripts/ops/aggregate_format_metrics.py
@@ -51,7 +51,7 @@ def main():
args = parser.parse_args()
logger.info(
- f"Starting format metrics aggregation (period_days={args.period_days}, " f"tenant_id={args.tenant_id or 'all'})"
+ f"Starting format metrics aggregation (period_days={args.period_days}, tenant_id={args.tenant_id or 'all'})"
)
try:
diff --git a/scripts/ops/gam_helper.py b/scripts/ops/gam_helper.py
index d936d4be8..e7fdc8cf8 100644
--- a/scripts/ops/gam_helper.py
+++ b/scripts/ops/gam_helper.py
@@ -122,7 +122,7 @@ def test_gam_connection(tenant_id: str) -> dict:
return {
"success": True,
- "message": f'Successfully connected to GAM network: {network["displayName"]} (ID: {network["id"]})',
+ "message": f"Successfully connected to GAM network: {network['displayName']} (ID: {network['id']})",
}
except Exception as e:
diff --git a/scripts/ops/get_tokens.py b/scripts/ops/get_tokens.py
index 47ab8c4f3..8831c51b5 100644
--- a/scripts/ops/get_tokens.py
+++ b/scripts/ops/get_tokens.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python3
"""Quick script to get tokens from the database."""
-
from src.core.database.database_session import get_db_session
from src.core.database.models import Principal, Tenant
diff --git a/scripts/ops/show_principal_mappings.py b/scripts/ops/show_principal_mappings.py
index aee529322..9517c5eba 100644
--- a/scripts/ops/show_principal_mappings.py
+++ b/scripts/ops/show_principal_mappings.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
"""Show detailed platform mappings for all principals in a tenant."""
+
import json
import sys
@@ -19,7 +20,7 @@ def show_mappings(tenant_name=None):
principals = session.query(Principal).all()
for principal in principals:
- print(f"\n{'='*80}")
+ print(f"\n{'=' * 80}")
print(f"Name: {principal.name}")
print(f"Principal ID: {principal.principal_id}")
print(f"Tenant ID: {principal.tenant_id}")
diff --git a/scripts/ops/sync_all_tenants.py b/scripts/ops/sync_all_tenants.py
index 330175611..1ce7cfbbb 100644
--- a/scripts/ops/sync_all_tenants.py
+++ b/scripts/ops/sync_all_tenants.py
@@ -56,7 +56,7 @@ def sync_all_gam_tenants():
try:
# Call sync API
response = requests.post(
- f'http://localhost:{os.environ.get("ADMIN_UI_PORT", 8001)}/api/v1/sync/trigger/{tenant_id}',
+ f"http://localhost:{os.environ.get('ADMIN_UI_PORT', 8001)}/api/v1/sync/trigger/{tenant_id}",
headers={"X-API-Key": api_key},
json={"sync_type": "full"},
timeout=300, # 5 minute timeout per tenant
diff --git a/scripts/run_tests.py b/scripts/run_tests.py
index 84b87692b..942d5a3a0 100644
--- a/scripts/run_tests.py
+++ b/scripts/run_tests.py
@@ -123,7 +123,7 @@ def run_tests(categories, verbose=False, failfast=False, coverage=False, specifi
print(f"Available categories: {', '.join(TEST_CATEGORIES.keys())}")
continue
- print(f"\n{'='*60}")
+ print(f"\n{'=' * 60}")
print(f"Running {category} tests: {TEST_CATEGORIES[category]['description']}")
print("=" * 60)
@@ -155,7 +155,7 @@ def run_tests(categories, verbose=False, failfast=False, coverage=False, specifi
break
# Summary
- print(f"\n{'='*60}")
+ print(f"\n{'=' * 60}")
print("TEST SUMMARY")
print("=" * 60)
print(f"✅ Passed: {len(passed_categories)} categories")
diff --git a/scripts/setup/setup_tenant.py b/scripts/setup/setup_tenant.py
index affdd6c28..b73ce7a85 100644
--- a/scripts/setup/setup_tenant.py
+++ b/scripts/setup/setup_tenant.py
@@ -148,7 +148,7 @@ def create_tenant(args):
{f"Login as {admin_email} to manage this publisher" if admin_email else "Login with your Google account to manage this publisher"}
📝 Next Steps:
-1. {'Access the Admin UI with your admin account' if admin_email else 'Access the Admin UI to complete setup'}
+1. {"Access the Admin UI with your admin account" if admin_email else "Access the Admin UI to complete setup"}
2. Configure your ad server integration (if not done)
3. Add more authorized domains/emails in the Users & Access section
4. Create principals for each advertiser who will buy inventory
diff --git a/scripts/test_gam_automation_dry_run.py b/scripts/test_gam_automation_dry_run.py
index a55722e15..35f539c6d 100644
--- a/scripts/test_gam_automation_dry_run.py
+++ b/scripts/test_gam_automation_dry_run.py
@@ -339,7 +339,7 @@ def main():
total = len(results)
failed = total - passed
- print(f"\n{'='*40}")
+ print(f"\n{'=' * 40}")
print(f"📊 RESULTS: {passed}/{total} tests passed")
if failed == 0:
diff --git a/scripts/test_mcp_client.py b/scripts/test_mcp_client.py
index 86e09ba2f..589720f3a 100755
--- a/scripts/test_mcp_client.py
+++ b/scripts/test_mcp_client.py
@@ -14,9 +14,9 @@
async def test_mcp_endpoint(url: str, auth_token: str = None):
"""Test MCP endpoint with actual MCP client."""
- print(f"\n{'='*80}")
+ print(f"\n{'=' * 80}")
print(f"Testing MCP endpoint: {url}")
- print(f"{'='*80}")
+ print(f"{'=' * 80}")
headers = {}
if auth_token:
@@ -79,16 +79,16 @@ async def main():
results = []
for name, url, _needs_auth in test_cases:
- print(f"\n{'='*80}")
+ print(f"\n{'=' * 80}")
print(f"TEST: {name}")
- print(f"{'='*80}")
+ print(f"{'=' * 80}")
success = await test_mcp_endpoint(url)
results.append((name, success))
# Print summary
- print(f"\n{'='*80}")
+ print(f"\n{'=' * 80}")
print("SUMMARY")
- print(f"{'='*80}")
+ print(f"{'=' * 80}")
for name, success in results:
status = "✅ PASS" if success else "❌ FAIL"
diff --git a/scripts/test_nginx_routing.py b/scripts/test_nginx_routing.py
index b9d8af8ff..1be951721 100755
--- a/scripts/test_nginx_routing.py
+++ b/scripts/test_nginx_routing.py
@@ -61,12 +61,12 @@ def simulate_approximated_request(self, domain: str, path: str, extra_headers: d
def run_test(self, test: TestCase) -> bool:
"""Run a single test case."""
- print(f"\n{'='*80}")
+ print(f"\n{'=' * 80}")
print(f"TEST: {test.name}")
print(f"Domain: {test.domain}{test.path}")
if test.description:
print(f"Description: {test.description}")
- print(f"{'='*80}")
+ print(f"{'=' * 80}")
# Set headers based on routing path
if test.via_approximated:
@@ -150,26 +150,26 @@ def _error(self, test: TestCase, error: str):
def print_summary(self):
"""Print test summary."""
- print(f"\n{'='*80}")
+ print(f"\n{'=' * 80}")
print("TEST SUMMARY")
- print(f"{'='*80}")
+ print(f"{'=' * 80}")
print(f"Passed: {self.passed}")
print(f"Failed: {self.failed}")
print(f"Total: {self.passed + self.failed}")
if self.errors:
- print(f"\n{'='*80}")
+ print(f"\n{'=' * 80}")
print("FAILURES")
- print(f"{'='*80}")
+ print(f"{'=' * 80}")
for error in self.errors:
print(error)
- print(f"\n{'='*80}")
+ print(f"\n{'=' * 80}")
if self.failed == 0:
print("✅ ALL TESTS PASSED")
else:
print(f"❌ {self.failed} TESTS FAILED")
- print(f"{'='*80}")
+ print(f"{'=' * 80}")
def get_test_cases() -> list[TestCase]:
diff --git a/scripts/test_service_account_auth.py b/scripts/test_service_account_auth.py
index 3e4a056ca..3bb575ff3 100755
--- a/scripts/test_service_account_auth.py
+++ b/scripts/test_service_account_auth.py
@@ -21,10 +21,11 @@
sys.path.insert(0, str(project_root))
from sqlalchemy import select
+
+from src.adapters.gam import build_gam_config_from_adapter
+from src.adapters.google_ad_manager import GoogleAdManager
from src.core.database.database_session import get_db_session
from src.core.database.models import Tenant
-from src.adapters.google_ad_manager import GoogleAdManager
-from src.adapters.gam import build_gam_config_from_adapter
from src.core.schemas import Principal
@@ -132,6 +133,7 @@ def test_service_account_auth(tenant_name: str):
except Exception as e:
print(f"❌ Failed to create adapter: {e}")
import traceback
+
traceback.print_exc()
return False
@@ -153,6 +155,7 @@ def test_service_account_auth(tenant_name: str):
except Exception as e:
print(f"❌ Failed to fetch advertisers: {e}")
import traceback
+
traceback.print_exc()
return False
diff --git a/scripts/validate_pydantic_against_adcp_schemas.py b/scripts/validate_pydantic_against_adcp_schemas.py
index 1c0eec8c5..b609f9e7a 100644
--- a/scripts/validate_pydantic_against_adcp_schemas.py
+++ b/scripts/validate_pydantic_against_adcp_schemas.py
@@ -243,9 +243,9 @@ def validate_all(self) -> bool:
print() # Blank line between validations
# Summary
- print(f"\n{Colors.BOLD}{'='*60}{Colors.RESET}")
+ print(f"\n{Colors.BOLD}{'=' * 60}{Colors.RESET}")
print(f"{Colors.BOLD}Validation Summary{Colors.RESET}")
- print(f"{'='*60}")
+ print(f"{'=' * 60}")
print(f"Models validated: {validated_count}")
print(f"{Colors.RED}Errors: {len(self.errors)}{Colors.RESET}")
print(f"{Colors.YELLOW}Warnings: {len(self.warnings)}{Colors.RESET}")
diff --git a/src/a2a_server/adcp_a2a_server.py b/src/a2a_server/adcp_a2a_server.py
index 131ee5642..33f910151 100644
--- a/src/a2a_server/adcp_a2a_server.py
+++ b/src/a2a_server/adcp_a2a_server.py
@@ -529,7 +529,7 @@ async def on_message_send(
task.artifacts = task.artifacts or []
task.artifacts.append(
Artifact(
- artifactId=f"skill_result_{i+1}",
+ artifactId=f"skill_result_{i + 1}",
name=f"{'error' if not res['success'] else res['skill']}_result",
description=description, # Human-readable message
parts=[Part(type="data", data=artifact_data)],
@@ -2274,7 +2274,9 @@ async def dynamic_agent_card_endpoint(request):
logger.info("Replaced /.well-known/agent.json with dynamic version")
elif route.path == "/.well-known/agent-card.json":
# Replace with our dynamic endpoint (primary A2A discovery)
- new_routes.append(Route("/.well-known/agent-card.json", dynamic_agent_discovery, methods=["GET", "OPTIONS"]))
+ new_routes.append(
+ Route("/.well-known/agent-card.json", dynamic_agent_discovery, methods=["GET", "OPTIONS"])
+ )
logger.info("Replaced /.well-known/agent-card.json with dynamic version")
elif route.path == "/agent.json":
# Replace with our dynamic endpoint
diff --git a/src/adapters/gam/managers/inventory.py b/src/adapters/gam/managers/inventory.py
index 0d042f748..791e72143 100644
--- a/src/adapters/gam/managers/inventory.py
+++ b/src/adapters/gam/managers/inventory.py
@@ -169,8 +169,7 @@ def sync_all_inventory(self, custom_targeting_limit: int = 1000, fetch_values: b
discovery = self._get_discovery()
return discovery.sync_all(
- fetch_custom_targeting_values=fetch_values,
- max_custom_targeting_values_per_key=custom_targeting_limit
+ fetch_custom_targeting_values=fetch_values, max_custom_targeting_values_per_key=custom_targeting_limit
)
def build_ad_unit_tree(self) -> dict[str, Any]:
diff --git a/src/adapters/gam/managers/reporting.py b/src/adapters/gam/managers/reporting.py
index 9bb28a5eb..ffa1fd7d6 100644
--- a/src/adapters/gam/managers/reporting.py
+++ b/src/adapters/gam/managers/reporting.py
@@ -84,7 +84,7 @@ def start_delivery_reporting(
self._active_reports[media_buy_id] = thread
thread.start()
- logger.info(f"✅ Started delivery reporting for {media_buy_id} " f"(interval: {reporting_interval_hours}h)")
+ logger.info(f"✅ Started delivery reporting for {media_buy_id} (interval: {reporting_interval_hours}h)")
def stop_delivery_reporting(self, media_buy_id: str):
"""Stop delivery reporting for a media buy.
diff --git a/src/adapters/gam/managers/sync.py b/src/adapters/gam/managers/sync.py
index e8ba20531..61ba2000e 100644
--- a/src/adapters/gam/managers/sync.py
+++ b/src/adapters/gam/managers/sync.py
@@ -61,7 +61,13 @@ def __init__(
logger.info(f"Initialized GAMSyncManager for tenant {tenant_id} (dry_run: {dry_run})")
- def sync_inventory(self, db_session: Session, force: bool = False, fetch_custom_targeting_values: bool = False, custom_targeting_limit: int = 1000) -> dict[str, Any]:
+ def sync_inventory(
+ self,
+ db_session: Session,
+ force: bool = False,
+ fetch_custom_targeting_values: bool = False,
+ custom_targeting_limit: int = 1000,
+ ) -> dict[str, Any]:
"""Synchronize inventory data from GAM to database.
Args:
@@ -108,8 +114,7 @@ def sync_inventory(self, db_session: Session, force: bool = False, fetch_custom_
else:
# Perform actual inventory sync with custom targeting parameters
summary = self.inventory_manager.sync_all_inventory(
- custom_targeting_limit=custom_targeting_limit,
- fetch_values=fetch_custom_targeting_values
+ custom_targeting_limit=custom_targeting_limit, fetch_values=fetch_custom_targeting_values
)
# Save inventory to database - this would be delegated to inventory service
@@ -249,7 +254,9 @@ def sync_full(self, db_session: Session, force: bool = False, custom_targeting_l
}
# Sync inventory first with custom targeting limit
- inventory_result = self.sync_inventory(db_session, force=True, custom_targeting_limit=custom_targeting_limit)
+ inventory_result = self.sync_inventory(
+ db_session, force=True, custom_targeting_limit=custom_targeting_limit
+ )
combined_summary["inventory"] = inventory_result.get("summary", {})
# Then sync orders
diff --git a/src/adapters/gam/utils/error_handler.py b/src/adapters/gam/utils/error_handler.py
index 0f549868c..967fc6aac 100644
--- a/src/adapters/gam/utils/error_handler.py
+++ b/src/adapters/gam/utils/error_handler.py
@@ -328,9 +328,7 @@ def rollback(self) -> list[dict[str, Any]]:
for step in reversed(self.steps):
if step["rollback_action"]:
try:
- logger.info(
- f"Rolling back {step['step_name']} for {step['resource_type']} " f"{step['resource_id']}"
- )
+ logger.info(f"Rolling back {step['step_name']} for {step['resource_type']} {step['resource_id']}")
result = step["rollback_action"]()
rollback_results.append({"step": step["step_name"], "success": True, "result": result})
diff --git a/src/adapters/gam/utils/timeout_handler.py b/src/adapters/gam/utils/timeout_handler.py
index 06cc4570a..7e1f7b765 100644
--- a/src/adapters/gam/utils/timeout_handler.py
+++ b/src/adapters/gam/utils/timeout_handler.py
@@ -59,7 +59,7 @@ def wrapper(*args, **kwargs) -> T:
except concurrent.futures.TimeoutError:
# Log timeout for debugging
logger.error(
- f"⏰ {func.__name__} timed out after {seconds}s. " f"This usually means the GAM API is hanging."
+ f"⏰ {func.__name__} timed out after {seconds}s. This usually means the GAM API is hanging."
)
raise TimeoutError(f"{func.__name__} timed out after {seconds} seconds")
diff --git a/src/adapters/gam/utils/validation.py b/src/adapters/gam/utils/validation.py
index 6d6755e52..b89434fce 100644
--- a/src/adapters/gam/utils/validation.py
+++ b/src/adapters/gam/utils/validation.py
@@ -225,8 +225,7 @@ def validate_technical_requirements(self, asset: dict[str, Any]) -> list[str]:
valid_ratios = [16 / 9, 4 / 3, 1 / 1, 9 / 16] # Common video aspect ratios
if not any(abs(aspect_ratio - ratio) < 0.01 for ratio in valid_ratios):
issues.append(
- f"Video aspect ratio {aspect_ratio:.2f} is not standard. "
- f"Recommended: 16:9, 4:3, 1:1, or 9:16"
+ f"Video aspect ratio {aspect_ratio:.2f} is not standard. Recommended: 16:9, 4:3, 1:1, or 9:16"
)
return issues
@@ -322,8 +321,7 @@ def _validate_file_extension(self, url: str, format_type: str) -> list[str]:
allowed_extensions = self.ALLOWED_EXTENSIONS.get(creative_type, [])
if not any(file_path.endswith(ext) for ext in allowed_extensions):
issues.append(
- f"File extension not allowed for {creative_type} creatives. "
- f"Allowed: {', '.join(allowed_extensions)}"
+ f"File extension not allowed for {creative_type} creatives. Allowed: {', '.join(allowed_extensions)}"
)
return issues
diff --git a/src/adapters/google_ad_manager.py b/src/adapters/google_ad_manager.py
index 318d73b2f..d59385184 100644
--- a/src/adapters/google_ad_manager.py
+++ b/src/adapters/google_ad_manager.py
@@ -108,7 +108,9 @@ def __init__(
# advertiser_id is only required for order/campaign operations, not inventory sync
if not self.key_file and not self.service_account_json and not self.refresh_token:
- raise ValueError("GAM config requires either 'service_account_key_file', 'service_account_json', or 'refresh_token'")
+ raise ValueError(
+ "GAM config requires either 'service_account_key_file', 'service_account_json', or 'refresh_token'"
+ )
# Initialize modular components
if not self.dry_run:
@@ -314,7 +316,7 @@ def create_media_buy(
# Validate that advertiser_id and trafficker_id are configured
if not self.advertiser_id or not self.trafficker_id:
- error_msg = "GAM adapter is not fully configured for order creation. " "Missing required configuration: "
+ error_msg = "GAM adapter is not fully configured for order creation. Missing required configuration: "
missing = []
if not self.advertiser_id:
missing.append("advertiser_id (company_id)")
@@ -340,7 +342,8 @@ def create_media_buy(
from sqlalchemy import select
stmt = select(Product).filter_by(
- tenant_id=self.tenant_id, product_id=package.package_id # package_id is actually product_id
+ tenant_id=self.tenant_id,
+ product_id=package.package_id, # package_id is actually product_id
)
product = db_session.scalars(stmt).first()
if product:
@@ -379,11 +382,21 @@ def create_media_buy(
request, packages, start_time, end_time, media_buy_id
)
+ # Build package responses with package_ids (no line_item_ids yet - order not created)
+ package_responses = []
+ for package in packages:
+ package_responses.append(
+ {
+ "package_id": package.package_id,
+ }
+ )
+
if step_id:
return CreateMediaBuyResponse(
buyer_ref=request.buyer_ref,
media_buy_id=media_buy_id,
workflow_step_id=step_id,
+ packages=package_responses,
)
else:
error_msg = "Failed to create manual order workflow step"
@@ -391,6 +404,7 @@ def create_media_buy(
buyer_ref=request.buyer_ref,
media_buy_id=media_buy_id,
errors=[Error(code="workflow_creation_failed", message=error_msg)],
+ packages=package_responses,
)
# Automatic mode - create order directly
@@ -476,10 +490,21 @@ def create_media_buy(
step_id = self.workflow_manager.create_activation_workflow_step(order_id, packages)
+ # Build package responses with line_item_ids for creative association
+ package_responses = []
+ for package, line_item_id in zip(packages, line_item_ids, strict=False):
+ package_responses.append(
+ {
+ "package_id": package.package_id,
+ "platform_line_item_id": str(line_item_id), # GAM line item ID for creative association
+ }
+ )
+
return CreateMediaBuyResponse(
buyer_ref=request.buyer_ref,
media_buy_id=order_id,
workflow_step_id=step_id,
+ packages=package_responses,
)
# Build package responses with line_item_ids for creative association
@@ -518,9 +543,7 @@ def add_creative_assets(
# Validate that creatives manager is initialized
if not self.creatives_manager:
- error_msg = (
- "GAM adapter is not fully configured for creative operations. " "Missing required configuration: "
- )
+ error_msg = "GAM adapter is not fully configured for creative operations. Missing required configuration: "
missing = []
if not self.advertiser_id:
missing.append("advertiser_id (company_id)")
diff --git a/src/adapters/kevel.py b/src/adapters/kevel.py
index c8ce6f3c6..d70276c83 100644
--- a/src/adapters/kevel.py
+++ b/src/adapters/kevel.py
@@ -276,7 +276,8 @@ def create_media_buy(
campaign_id = campaign_data["Id"]
self.audit_logger.log_success(f"Created Kevel Campaign ID: {campaign_id}")
- # Create flights for each package
+ # Create flights for each package and track flight IDs
+ package_responses = []
for package in packages:
flight_payload = {
"Name": package.name,
@@ -310,15 +311,35 @@ def create_media_buy(
flight_response = requests.post(f"{self.base_url}/flight", headers=self.headers, json=flight_payload)
flight_response.raise_for_status()
+ flight_data = flight_response.json()
+ flight_id = flight_data.get("Id")
+
+ # Build package response with package_id and platform_flight_id
+ package_responses.append(
+ {
+ "package_id": package.package_id,
+ "platform_line_item_id": str(flight_id) if flight_id else None,
+ }
+ )
# Use the actual campaign ID from Kevel
media_buy_id = f"kevel_{campaign_id}"
+ # For dry_run, build package responses without flight IDs
+ if self.dry_run:
+ package_responses = []
+ for package in packages:
+ package_responses.append(
+ {
+ "package_id": package.package_id,
+ }
+ )
+
return CreateMediaBuyResponse(
+ buyer_ref=request.buyer_ref,
media_buy_id=media_buy_id,
- status="pending_activation",
- detail=f"Created Kevel campaign with {len(packages)} flight(s)",
creative_deadline=datetime.now() + timedelta(days=2),
+ packages=package_responses,
)
def add_creative_assets(
@@ -344,7 +365,7 @@ def add_creative_assets(
self.log(" Creative Payload: {")
self.log(f" 'Name': '{asset['name']}',")
self.log(
- f" 'Body': '',"
+ f' \'Body\': \'
\','
)
self.log(f" 'Url': '{asset['click_url']}'")
self.log(" }")
diff --git a/src/adapters/mock_ad_server.py b/src/adapters/mock_ad_server.py
index 8984f29af..06c2a8c01 100644
--- a/src/adapters/mock_ad_server.py
+++ b/src/adapters/mock_ad_server.py
@@ -872,7 +872,7 @@ def _add_creative_assets_immediate(
if self.dry_run:
for i, asset in enumerate(assets):
self.log("Would call: MockAdServer.uploadCreative()")
- self.log(f" Creative {i+1}:")
+ self.log(f" Creative {i + 1}:")
self.log(f" 'creative_id': '{asset['id']}',")
self.log(f" 'name': '{asset['name']}',")
self.log(f" 'format': '{asset['format']}',")
@@ -1129,11 +1129,12 @@ def update_media_buy(
) -> UpdateMediaBuyResponse:
"""Update media buy in database (Mock adapter implementation)."""
import logging
+
from sqlalchemy import select
from sqlalchemy.orm import attributes
from src.core.database.database_session import get_db_session
- from src.core.database.models import MediaBuy, MediaPackage
+ from src.core.database.models import MediaPackage
logger = logging.getLogger(__name__)
@@ -1141,8 +1142,7 @@ def update_media_buy(
if action == "update_package_budget" and package_id and budget is not None:
# Update package budget in MediaPackage.package_config JSON
stmt = select(MediaPackage).where(
- MediaPackage.package_id == package_id,
- MediaPackage.media_buy_id == media_buy_id
+ MediaPackage.package_id == package_id, MediaPackage.media_buy_id == media_buy_id
)
media_package = session.scalars(stmt).first()
if media_package:
diff --git a/src/adapters/triton_digital.py b/src/adapters/triton_digital.py
index 9996cf1e1..12073b30e 100644
--- a/src/adapters/triton_digital.py
+++ b/src/adapters/triton_digital.py
@@ -200,7 +200,8 @@ def create_media_buy(
campaign_data = response.json()
campaign_id = campaign_data["id"]
- # Create flights for each package
+ # Create flights for each package and track flight IDs
+ package_responses = []
for package in packages:
flight_payload = {
"name": package.name,
@@ -223,15 +224,35 @@ def create_media_buy(
flight_response = requests.post(f"{self.base_url}/flights", headers=self.headers, json=flight_payload)
flight_response.raise_for_status()
+ flight_data = flight_response.json()
+ flight_id = flight_data.get("id")
+
+ # Build package response with package_id and platform_flight_id
+ package_responses.append(
+ {
+ "package_id": package.package_id,
+ "platform_line_item_id": str(flight_id) if flight_id else None,
+ }
+ )
# Use the actual campaign ID from Triton
media_buy_id = f"triton_{campaign_id}"
+ # For dry_run, build package responses without flight IDs
+ if self.dry_run:
+ package_responses = []
+ for package in packages:
+ package_responses.append(
+ {
+ "package_id": package.package_id,
+ }
+ )
+
return CreateMediaBuyResponse(
+ buyer_ref=request.buyer_ref,
media_buy_id=media_buy_id,
- status="pending_activation",
- detail=f"Created Triton campaign with {len(packages)} flight(s)",
creative_deadline=datetime.now() + timedelta(days=2),
+ packages=package_responses,
)
def add_creative_assets(
diff --git a/src/adapters/xandr.py b/src/adapters/xandr.py
index 74bcabb46..4ddd2927f 100644
--- a/src/adapters/xandr.py
+++ b/src/adapters/xandr.py
@@ -15,25 +15,18 @@
from src.core.schemas import (
CreateMediaBuyRequest,
CreateMediaBuyResponse,
- CreativeAsset,
- CreativeDelivery,
- DeliveryMetrics,
- HourlyDelivery,
- MediaBuyDeliveryData,
- MediaBuyDetails,
- MediaBuyStatus,
- PacingAnalysis,
- Package,
- PackageStatus,
- PerformanceAlert,
+ MediaPackage,
Principal,
Product,
- ReportingPeriod,
extract_budget_amount,
)
+# NOTE: Xandr adapter needs full refactor - it's using old schemas and patterns
+# The other methods (get_media_buy_status, get_media_buy_delivery, etc.) still use old schemas
+# that no longer exist. Only create_media_buy has been updated to match the current API.
-# Import the actual MediaBuy model for this adapter's incorrect usage
+
+# Temporary stubs for old schemas until Xandr adapter is properly refactored
class MediaBuy:
"""Temporary stub for MediaBuy until xandr.py is properly refactored."""
@@ -42,6 +35,94 @@ def __init__(self, **kwargs):
setattr(self, k, v)
+class MediaBuyDetails:
+ """Temporary stub for MediaBuyDetails until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class MediaBuyStatus:
+ """Temporary stub for MediaBuyStatus until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class PackageStatus:
+ """Temporary stub for PackageStatus until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class MediaBuyDeliveryData:
+ """Temporary stub for MediaBuyDeliveryData until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class ReportingPeriod:
+ """Temporary stub for ReportingPeriod until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class HourlyDelivery:
+ """Temporary stub for HourlyDelivery until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class CreativeDelivery:
+ """Temporary stub for CreativeDelivery until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class PacingAnalysis:
+ """Temporary stub for PacingAnalysis until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class PerformanceAlert:
+ """Temporary stub for PerformanceAlert until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class DeliveryMetrics:
+ """Temporary stub for DeliveryMetrics until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class CreativeAsset:
+ """Temporary stub for CreativeAsset until xandr.py is properly refactored."""
+
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
logger = logging.getLogger(__name__)
@@ -246,7 +327,14 @@ def get_products(self) -> list[Product]:
logger.error(f"Error fetching Xandr products: {e}")
return []
- def create_media_buy(self, request: CreateMediaBuyRequest) -> CreateMediaBuyResponse:
+ def create_media_buy(
+ self,
+ request: CreateMediaBuyRequest,
+ packages: list[MediaPackage],
+ start_time: datetime,
+ end_time: datetime,
+ package_pricing_info: dict[str, dict] | None = None,
+ ) -> CreateMediaBuyResponse:
"""Create insertion order and line items in Xandr."""
if self._requires_manual_approval("create_media_buy"):
task_id = self._create_human_task(
@@ -254,38 +342,41 @@ def create_media_buy(self, request: CreateMediaBuyRequest) -> CreateMediaBuyResp
{"request": request.dict(), "principal": self.principal.name, "advertiser_id": self.advertiser_id},
)
- # Return pending status
- media_buy = MediaBuy(
+ # Build package responses
+ package_responses = []
+ for package in packages:
+ package_responses.append(
+ {
+ "package_id": package.package_id,
+ }
+ )
+
+ return CreateMediaBuyResponse(
+ buyer_ref=request.buyer_ref,
media_buy_id=f"xandr_pending_{task_id}",
- platform_id=f"pending_{task_id}",
- order_name=request.order_name,
- status="pending_approval",
- details=request,
+ packages=package_responses,
)
- response = CreateMediaBuyResponse(media_buy=media_buy, packages=[], manual_approval_required=True)
-
- return response
-
try:
+ # Extract total budget
+ total_budget, _ = extract_budget_amount(request.budget, request.currency or "USD")
+ days = (end_time.date() - start_time.date()).days
+ if days == 0:
+ days = 1
+
# Create insertion order
io_data = {
"insertion-order": {
- "name": request.order_name,
+ "name": request.campaign_name or f"AdCP Campaign {request.buyer_ref}",
"advertiser_id": int(self.advertiser_id),
- "start_date": request.flight_start_date.isoformat(),
- "end_date": request.flight_end_date.isoformat(),
+ "start_date": start_time.date().isoformat(),
+ "end_date": end_time.date().isoformat(),
"budget_intervals": [
{
- "start_date": request.flight_start_date.isoformat(),
- "end_date": request.flight_end_date.isoformat(),
- "daily_budget": float(
- extract_budget_amount(request.budget, request.currency or "USD")[0]
- / (request.flight_end_date - request.flight_start_date).days
- ),
- "lifetime_budget": float(
- extract_budget_amount(request.budget, request.currency or "USD")[0]
- ),
+ "start_date": start_time.date().isoformat(),
+ "end_date": end_time.date().isoformat(),
+ "daily_budget": float(total_budget / days),
+ "lifetime_budget": float(total_budget),
}
],
"currency": "USD",
@@ -296,66 +387,53 @@ def create_media_buy(self, request: CreateMediaBuyRequest) -> CreateMediaBuyResp
io_response = self._make_request("POST", "/insertion-order", io_data)
io_id = io_response["response"]["insertion-order"]["id"]
- packages = []
+ package_responses = []
# Create line items for each package
- if request.packages:
- for package_req in request.packages:
- li_data = {
- "line-item": {
- "name": package_req.name,
- "insertion_order_id": io_id,
- "advertiser_id": int(self.advertiser_id),
- "start_date": request.flight_start_date.isoformat(),
- "end_date": request.flight_end_date.isoformat(),
- "revenue_type": "cpm",
- "revenue_value": package_req.budget / package_req.impressions * 1000,
- "lifetime_budget": float(package_req.budget),
- "daily_budget": float(
- package_req.budget / (request.flight_end_date - request.flight_start_date).days
- ),
- "currency": "USD",
- "state": "inactive", # Start inactive
- "inventory_type": self._map_inventory_type(package_req.product_id),
- }
+ for package in packages:
+ li_data = {
+ "line-item": {
+ "name": package.name,
+ "insertion_order_id": io_id,
+ "advertiser_id": int(self.advertiser_id),
+ "start_date": start_time.date().isoformat(),
+ "end_date": end_time.date().isoformat(),
+ "revenue_type": "cpm",
+ "revenue_value": package.cpm,
+ "lifetime_budget": float(package.cpm * package.impressions / 1000),
+ "daily_budget": float(package.cpm * package.impressions / 1000 / days),
+ "currency": "USD",
+ "state": "inactive", # Start inactive
+ "inventory_type": "display",
}
+ }
- # Apply targeting
- if request.targeting_overlay:
- li_data["line-item"]["profile_id"] = self._create_targeting_profile(request.targeting_overlay)
-
- li_response = self._make_request("POST", "/line-item", li_data)
- li_id = li_response["response"]["line-item"]["id"]
-
- package = Package(
- package_id=f"xandr_li_{li_id}",
- platform_id=str(li_id),
- name=package_req.name,
- product_id=package_req.product_id,
- budget=package_req.budget,
- impressions=package_req.impressions,
- start_date=request.flight_start_date,
- end_date=request.flight_end_date,
- status=PackageStatus(state="inactive", is_editable=True, delivery_percentage=0.0),
- )
- packages.append(package)
+ # Apply targeting
+ if request.targeting_overlay:
+ li_data["line-item"]["profile_id"] = self._create_targeting_profile(request.targeting_overlay)
- media_buy = MediaBuy(
- media_buy_id=f"xandr_io_{io_id}",
- platform_id=str(io_id),
- order_name=request.order_name,
- status="inactive",
- details=request,
- )
+ li_response = self._make_request("POST", "/line-item", li_data)
+ li_id = li_response["response"]["line-item"]["id"]
- response = CreateMediaBuyResponse(media_buy=media_buy, packages=packages, manual_approval_required=False)
+ # Build package response with package_id and platform_line_item_id
+ package_responses.append(
+ {
+ "package_id": package.package_id,
+ "platform_line_item_id": str(li_id),
+ }
+ )
# Log the operation
self._log_operation(
- "create_media_buy", True, {"insertion_order_id": io_id, "line_item_count": len(packages)}
+ "create_media_buy", True, {"insertion_order_id": io_id, "line_item_count": len(package_responses)}
)
- return response
+ return CreateMediaBuyResponse(
+ buyer_ref=request.buyer_ref,
+ media_buy_id=f"xandr_io_{io_id}",
+ creative_deadline=datetime.now(UTC) + timedelta(days=2),
+ packages=package_responses,
+ )
except Exception as e:
logger.error(f"Failed to create Xandr media buy: {e}")
@@ -698,7 +776,7 @@ def pause_media_buy(self, media_buy_id: str) -> bool:
# Also pause all line items
li_response = self._make_request("GET", f"/line-item?insertion_order_id={io_id}")
for li in li_response["response"]["line-items"]:
- self._make_request("PUT", f'/line-item?id={li["id"]}', {"line-item": {"state": "inactive"}})
+ self._make_request("PUT", f"/line-item?id={li['id']}", {"line-item": {"state": "inactive"}})
return True
@@ -780,7 +858,7 @@ def update_package(self, media_buy_id: str, packages: list[dict[str, Any]]) -> d
# Remove existing associations
current_creatives = self._make_request("GET", f"/line-item/{li_id}/creative")
for creative in current_creatives.get("response", {}).get("creatives", []):
- self._make_request("DELETE", f'/line-item/{li_id}/creative/{creative["id"]}')
+ self._make_request("DELETE", f"/line-item/{li_id}/creative/{creative['id']}")
# Add new associations
for creative_id in package_update["creative_ids"]:
@@ -814,7 +892,7 @@ def resume_media_buy(self, media_buy_id: str) -> bool:
# Also resume all line items
li_response = self._make_request("GET", f"/line-item?insertion_order_id={io_id}")
for li in li_response["response"]["line-items"]:
- self._make_request("PUT", f'/line-item?id={li["id"]}', {"line-item": {"state": "active"}})
+ self._make_request("PUT", f"/line-item?id={li['id']}", {"line-item": {"state": "active"}})
return True
diff --git a/src/admin/blueprints/authorized_properties.py b/src/admin/blueprints/authorized_properties.py
index a1d80289f..75d07165c 100644
--- a/src/admin/blueprints/authorized_properties.py
+++ b/src/admin/blueprints/authorized_properties.py
@@ -184,8 +184,8 @@ def _save_properties_batch(properties_data: list[dict[str, Any]], tenant_id: str
except Exception as e:
error_count += 1
- errors.append(f"Property {i+1}: {str(e)}")
- logger.error(f"Error processing property {i+1}: {e}")
+ errors.append(f"Property {i + 1}: {str(e)}")
+ logger.error(f"Error processing property {i + 1}: {e}")
# Commit all changes
if success_count > 0:
diff --git a/src/admin/blueprints/creatives.py b/src/admin/blueprints/creatives.py
index 03e9584af..baf18263f 100644
--- a/src/admin/blueprints/creatives.py
+++ b/src/admin/blueprints/creatives.py
@@ -333,15 +333,16 @@ def approve_creative(tenant_id, creative_id, **kwargs):
# Send webhook notification to principal
from src.core.database.models import PushNotificationConfig
- stmt_webhook = select(PushNotificationConfig).filter_by(
- tenant_id=tenant_id,
- principal_id=creative.principal_id,
- is_active=True
- ).order_by(PushNotificationConfig.created_at.desc())
+ stmt_webhook = (
+ select(PushNotificationConfig)
+ .filter_by(tenant_id=tenant_id, principal_id=creative.principal_id, is_active=True)
+ .order_by(PushNotificationConfig.created_at.desc())
+ )
webhook_config = db_session.scalars(stmt_webhook).first()
if webhook_config:
import requests
+
webhook_payload = {
"event": "creative_approved",
"creative_id": creative.creative_id,
@@ -480,15 +481,16 @@ def reject_creative(tenant_id, creative_id, **kwargs):
# Send webhook notification to principal
from src.core.database.models import PushNotificationConfig
- stmt_webhook = select(PushNotificationConfig).filter_by(
- tenant_id=tenant_id,
- principal_id=creative.principal_id,
- is_active=True
- ).order_by(PushNotificationConfig.created_at.desc())
+ stmt_webhook = (
+ select(PushNotificationConfig)
+ .filter_by(tenant_id=tenant_id, principal_id=creative.principal_id, is_active=True)
+ .order_by(PushNotificationConfig.created_at.desc())
+ )
webhook_config = db_session.scalars(stmt_webhook).first()
if webhook_config:
import requests
+
webhook_payload = {
"event": "creative_rejected",
"creative_id": creative.creative_id,
diff --git a/src/admin/blueprints/inventory.py b/src/admin/blueprints/inventory.py
index 67a8e22b1..3cf538692 100644
--- a/src/admin/blueprints/inventory.py
+++ b/src/admin/blueprints/inventory.py
@@ -197,7 +197,6 @@ def sync_orders(tenant_id):
"""Sync GAM orders for a tenant."""
try:
with get_db_session() as db_session:
-
tenant = db_session.scalars(select(Tenant).filter_by(tenant_id=tenant_id)).first()
if not tenant:
diff --git a/src/admin/blueprints/operations.py b/src/admin/blueprints/operations.py
index 64f90e00f..739fc1c9e 100644
--- a/src/admin/blueprints/operations.py
+++ b/src/admin/blueprints/operations.py
@@ -160,7 +160,15 @@ def media_buy_detail(tenant_id, media_buy_id):
from src.core.context_manager import ContextManager
from src.core.database.database_session import get_db_session
- from src.core.database.models import Creative, CreativeAssignment, MediaBuy, MediaPackage, Principal, Product, WorkflowStep
+ from src.core.database.models import (
+ Creative,
+ CreativeAssignment,
+ MediaBuy,
+ MediaPackage,
+ Principal,
+ Product,
+ WorkflowStep,
+ )
try:
with get_db_session() as db_session:
@@ -190,10 +198,12 @@ def media_buy_detail(tenant_id, media_buy_id):
stmt = select(Product).filter_by(tenant_id=tenant_id, product_id=product_id)
product = db_session.scalars(stmt).first()
- packages.append({
- "package": media_pkg,
- "product": product,
- })
+ packages.append(
+ {
+ "package": media_pkg,
+ "product": product,
+ }
+ )
# Get creative assignments for this media buy
stmt = (
@@ -233,6 +243,7 @@ def media_buy_detail(tenant_id, media_buy_id):
# Get computed readiness state (not just raw database status)
from src.admin.services.media_buy_readiness_service import MediaBuyReadinessService
+
readiness = MediaBuyReadinessService.get_readiness_state(media_buy_id, tenant_id, db_session)
computed_state = readiness["state"]
@@ -333,8 +344,7 @@ def approve_media_buy(tenant_id, media_buy_id, **kwargs):
from src.core.database.models import Creative, CreativeAssignment
stmt_assignments = select(CreativeAssignment).filter_by(
- tenant_id=tenant_id,
- media_buy_id=media_buy_id
+ tenant_id=tenant_id, media_buy_id=media_buy_id
)
assignments = db_session.scalars(stmt_assignments).all()
@@ -342,8 +352,7 @@ def approve_media_buy(tenant_id, media_buy_id, **kwargs):
if assignments:
creative_ids = [a.creative_id for a in assignments]
stmt_creatives = select(Creative).filter(
- Creative.tenant_id == tenant_id,
- Creative.creative_id.in_(creative_ids)
+ Creative.tenant_id == tenant_id, Creative.creative_id.in_(creative_ids)
)
creatives = db_session.scalars(stmt_creatives).all()
@@ -369,15 +378,16 @@ def approve_media_buy(tenant_id, media_buy_id, **kwargs):
db_session.commit()
# Send webhook notification to buyer
- stmt_webhook = select(PushNotificationConfig).filter_by(
- tenant_id=tenant_id,
- principal_id=media_buy.principal_id,
- is_active=True
- ).order_by(PushNotificationConfig.created_at.desc())
+ stmt_webhook = (
+ select(PushNotificationConfig)
+ .filter_by(tenant_id=tenant_id, principal_id=media_buy.principal_id, is_active=True)
+ .order_by(PushNotificationConfig.created_at.desc())
+ )
webhook_config = db_session.scalars(stmt_webhook).first()
if webhook_config:
import requests
+
webhook_payload = {
"event": "media_buy_approved",
"media_buy_id": media_buy_id,
diff --git a/src/admin/blueprints/principals.py b/src/admin/blueprints/principals.py
index c9d9c056e..3dd15968b 100644
--- a/src/admin/blueprints/principals.py
+++ b/src/admin/blueprints/principals.py
@@ -366,6 +366,7 @@ def get_gam_advertisers(tenant_id):
# Use build_gam_config_from_adapter to handle both OAuth and service account
from src.adapters.gam import build_gam_config_from_adapter
+
gam_config = build_gam_config_from_adapter(tenant.adapter_config)
adapter = GoogleAdManager(
diff --git a/src/admin/blueprints/products.py b/src/admin/blueprints/products.py
index 6ff90ec5b..24a55ef80 100644
--- a/src/admin/blueprints/products.py
+++ b/src/admin/blueprints/products.py
@@ -317,7 +317,9 @@ def list_products(tenant_id):
formats_data = (
product.formats
if isinstance(product.formats, list)
- else json.loads(product.formats) if product.formats else []
+ else json.loads(product.formats)
+ if product.formats
+ else []
)
# Debug: Log raw formats data
@@ -437,12 +439,16 @@ def list_products(tenant_id):
"countries": (
product.countries
if isinstance(product.countries, list)
- else json.loads(product.countries) if product.countries else []
+ else json.loads(product.countries)
+ if product.countries
+ else []
),
"implementation_config": (
product.implementation_config
if isinstance(product.implementation_config, dict)
- else json.loads(product.implementation_config) if product.implementation_config else {}
+ else json.loads(product.implementation_config)
+ if product.implementation_config
+ else {}
),
"created_at": product.created_at if hasattr(product, "created_at") else None,
}
@@ -1095,17 +1101,23 @@ def edit_product(tenant_id, product_id):
"formats": (
product.formats
if isinstance(product.formats, list)
- else json.loads(product.formats) if product.formats else []
+ else json.loads(product.formats)
+ if product.formats
+ else []
),
"countries": (
product.countries
if isinstance(product.countries, list)
- else json.loads(product.countries) if product.countries else []
+ else json.loads(product.countries)
+ if product.countries
+ else []
),
"implementation_config": (
product.implementation_config
if isinstance(product.implementation_config, dict)
- else json.loads(product.implementation_config) if product.implementation_config else {}
+ else json.loads(product.implementation_config)
+ if product.implementation_config
+ else {}
),
}
diff --git a/src/admin/sync_api.py b/src/admin/sync_api.py
index 4de881a47..8bd401942 100644
--- a/src/admin/sync_api.py
+++ b/src/admin/sync_api.py
@@ -179,24 +179,12 @@ def trigger_sync(tenant_id: str):
# Use the sync manager to perform the sync
if sync_type == "full":
# Pass custom targeting limit to prevent timeouts
- result = adapter.sync_full(
- db_session,
- force=force,
- custom_targeting_limit=custom_targeting_limit
- )
+ result = adapter.sync_full(db_session, force=force, custom_targeting_limit=custom_targeting_limit)
elif sync_type == "inventory":
- result = adapter.sync_inventory(
- db_session,
- force=force,
- custom_targeting_limit=custom_targeting_limit
- )
+ result = adapter.sync_inventory(db_session, force=force, custom_targeting_limit=custom_targeting_limit)
elif sync_type == "targeting":
# Targeting sync can be mapped to inventory sync for now
- result = adapter.sync_inventory(
- db_session,
- force=force,
- custom_targeting_limit=custom_targeting_limit
- )
+ result = adapter.sync_inventory(db_session, force=force, custom_targeting_limit=custom_targeting_limit)
elif sync_type == "selective":
# Selective sync - only sync specified inventory types
if not sync_types:
@@ -205,7 +193,7 @@ def trigger_sync(tenant_id: str):
db_session,
sync_types=sync_types,
custom_targeting_limit=custom_targeting_limit,
- audience_segment_limit=audience_segment_limit
+ audience_segment_limit=audience_segment_limit,
)
else:
raise ValueError(f"Unsupported sync type: {sync_type}")
@@ -483,7 +471,6 @@ def sync_tenant_orders(tenant_id):
"""Trigger orders and line items sync for a tenant."""
db_session.remove() # Clean start
try:
-
# Get tenant and adapter config
stmt = select(Tenant).filter_by(tenant_id=tenant_id)
tenant = db_session.scalars(stmt).first()
@@ -600,7 +587,7 @@ def get_tenant_orders(tenant_id):
# Validate status is one of allowed values
valid_statuses = ["DRAFT", "PENDING_APPROVAL", "APPROVED", "PAUSED", "CANCELED", "DELETED"]
if status not in valid_statuses:
- return jsonify({"error": f'Invalid status. Must be one of: {", ".join(valid_statuses)}'}), 400
+ return jsonify({"error": f"Invalid status. Must be one of: {', '.join(valid_statuses)}"}), 400
filters["status"] = status
advertiser_id = request.args.get("advertiser_id")
diff --git a/src/admin/tenant_management_api.py b/src/admin/tenant_management_api.py
index 8a20f1893..ffafac5ef 100644
--- a/src/admin/tenant_management_api.py
+++ b/src/admin/tenant_management_api.py
@@ -161,8 +161,7 @@ def create_tenant():
# Auto-add creator as fallback with warning
email_list.append(creator_email)
logger.warning(
- f"No access control specified for tenant {data['name']}, "
- f"auto-adding creator {creator_email}"
+ f"No access control specified for tenant {data['name']}, auto-adding creator {creator_email}"
)
else:
return (
diff --git a/src/admin/tests/integration/test_admin_app.py b/src/admin/tests/integration/test_admin_app.py
index 1ce55e968..e3949b431 100644
--- a/src/admin/tests/integration/test_admin_app.py
+++ b/src/admin/tests/integration/test_admin_app.py
@@ -182,9 +182,7 @@ def test_tenant_dashboard(self, mock_get_db_session, mock_require_tenant_access,
mock_session.query.return_value.filter_by.return_value.first.return_value = mock_tenant
mock_session.query.return_value.filter_by.return_value.count.return_value = 0
mock_session.query.return_value.filter_by.return_value.filter.return_value.all.return_value = []
- mock_session.query.return_value.join.return_value.filter.return_value.order_by.return_value.limit.return_value.all.return_value = (
- []
- )
+ mock_session.query.return_value.join.return_value.filter.return_value.order_by.return_value.limit.return_value.all.return_value = []
response = client.get("/tenant/tenant_123")
# Will redirect due to decorator, but shows route exists
diff --git a/src/core/config_loader.py b/src/core/config_loader.py
index d0604992c..837a8dea5 100644
--- a/src/core/config_loader.py
+++ b/src/core/config_loader.py
@@ -186,6 +186,7 @@ def get_tenant_by_id(tenant_id: str) -> dict[str, Any] | None:
def get_tenant_by_virtual_host(virtual_host: str) -> dict[str, Any] | None:
"""Get tenant by virtual host."""
from rich.console import Console
+
console = Console()
console.print(f"[blue]🔍 get_tenant_by_virtual_host called with: {virtual_host}[/blue]")
@@ -198,13 +199,16 @@ def get_tenant_by_virtual_host(virtual_host: str) -> dict[str, Any] | None:
if tenant:
from src.core.utils.tenant_utils import serialize_tenant_to_dict
+
result = serialize_tenant_to_dict(tenant)
- console.print(f"[green] ✅ Found tenant: {result.get('tenant_id')} (subdomain: {result.get('subdomain')})[/green]")
+ console.print(
+ f"[green] ✅ Found tenant: {result.get('tenant_id')} (subdomain: {result.get('subdomain')})[/green]"
+ )
return result
console.print(f"[yellow] ⚠️ No tenant found with virtual_host={virtual_host}[/yellow]")
# Debug: Check what tenants exist
- all_tenants_stmt = select(Tenant).where(Tenant.is_active == True)
+ all_tenants_stmt = select(Tenant).where(Tenant.is_active)
all_tenants = db_session.scalars(all_tenants_stmt).all()
console.print(f"[blue] Total active tenants in database: {len(all_tenants)}[/blue]")
for t in all_tenants:
diff --git a/src/core/context_manager.py b/src/core/context_manager.py
index b619f7847..b70047df3 100644
--- a/src/core/context_manager.py
+++ b/src/core/context_manager.py
@@ -552,9 +552,7 @@ def link_workflow_to_object(
)
session.add(obj_mapping)
session.commit()
- console.print(
- f"[green]✅ Linked {object_type} {object_id} to workflow step {step_id}[/green]"
- )
+ console.print(f"[green]✅ Linked {object_type} {object_id} to workflow step {step_id}[/green]")
except Exception as e:
session.rollback()
console.print(f"[red]Failed to link object to workflow: {e}[/red]")
diff --git a/src/core/database/db_config.py b/src/core/database/db_config.py
index 79858398a..05cf83bcf 100644
--- a/src/core/database/db_config.py
+++ b/src/core/database/db_config.py
@@ -39,8 +39,7 @@ def _parse_database_url(url: str) -> dict[str, Any]:
if parsed.scheme not in ["postgres", "postgresql"]:
raise ValueError(
- f"Unsupported database scheme: {parsed.scheme}. "
- f"Only PostgreSQL is supported. Use 'postgresql://' URLs."
+ f"Unsupported database scheme: {parsed.scheme}. Only PostgreSQL is supported. Use 'postgresql://' URLs."
)
return {
@@ -64,10 +63,7 @@ def get_connection_string() -> str:
else:
auth = config["user"]
- return (
- f"postgresql://{auth}@{config['host']}:{config['port']}"
- f"/{config['database']}?sslmode={config['sslmode']}"
- )
+ return f"postgresql://{auth}@{config['host']}:{config['port']}/{config['database']}?sslmode={config['sslmode']}"
class DatabaseConnection:
diff --git a/src/core/database/models.py b/src/core/database/models.py
index 2c5086010..229b16afc 100644
--- a/src/core/database/models.py
+++ b/src/core/database/models.py
@@ -510,6 +510,7 @@ class MediaPackage(Base):
Stores packages separately from MediaBuy.raw_request for efficient lookups
by package_id, which is needed for creative assignments.
"""
+
__tablename__ = "media_packages"
media_buy_id: Mapped[str] = mapped_column(
diff --git a/src/core/main.py b/src/core/main.py
index a69e29b52..b2fc8ab94 100644
--- a/src/core/main.py
+++ b/src/core/main.py
@@ -240,7 +240,7 @@ def format_validation_error(validation_error: ValidationError, context: str = "r
)
elif "string_type" in error_type:
error_details.append(
- f" • {field_path}: Expected string, got {type(input_val).__name__}. " f"Please provide a string value."
+ f" • {field_path}: Expected string, got {type(input_val).__name__}. Please provide a string value."
)
elif "missing" in error_type:
error_details.append(f" • {field_path}: Required field is missing")
@@ -867,9 +867,7 @@ def load_tasks_from_db():
# --- Adapter Configuration ---
# Get adapter from config, fallback to mock
-SELECTED_ADAPTER = (
- (config.get("ad_server", {}).get("adapter") or "mock") if config else "mock"
-).lower() # noqa: F841 - used below for adapter selection
+SELECTED_ADAPTER = ((config.get("ad_server", {}).get("adapter") or "mock") if config else "mock").lower() # noqa: F841 - used below for adapter selection
AVAILABLE_ADAPTERS = ["mock", "gam", "kevel", "triton", "triton_digital"]
# --- In-Memory State (already initialized above, just adding context_map) ---
@@ -1285,7 +1283,7 @@ async def _get_products_impl(req: GetProductsRequestGenerated, context: Context)
logger.error(f"[GET_PRODUCTS] Principal found but no tenant context: principal_id={principal_id}")
print("❌ [GET_PRODUCTS DEBUG] Principal found but no tenant context", flush=True)
raise ToolError(
- f"Authentication succeeded but tenant context missing. " f"This is a bug. principal_id={principal_id}"
+ f"Authentication succeeded but tenant context missing. This is a bug. principal_id={principal_id}"
)
# else: No auth provided, which is OK for discovery endpoints
@@ -4282,8 +4280,7 @@ def _validate_pricing_model_selection(
if bid_decimal < floor_price:
raise ToolError(
"PRICING_ERROR",
- f"Bid price {package.bid_price} is below floor price {floor_price} "
- f"for {package.pricing_model} pricing",
+ f"Bid price {package.bid_price} is below floor price {floor_price} for {package.pricing_model} pricing",
)
# Validate fixed pricing has rate
@@ -4959,7 +4956,7 @@ async def _create_media_buy_impl(
ctx_manager.update_workflow_step(
step.step_id,
status="requires_approval",
- add_comment={"user": "system", "comment": "Manual approval required for media buy creation"}
+ add_comment={"user": "system", "comment": "Manual approval required for media buy creation"},
)
# Workflow step already created above - no need for separate task
@@ -5019,6 +5016,7 @@ async def _create_media_buy_impl(
# Generate permanent package ID using product_id and index
# Format: pkg_{product_id}_{timestamp_part}_{idx}
import secrets
+
package_id = f"pkg_{pkg.product_id}_{secrets.token_hex(4)}_{idx}"
# Use product_id or buyer_ref for package name since Package schema doesn't have 'name'
@@ -5038,13 +5036,15 @@ async def _create_media_buy_impl(
pkg_dict = {}
# Build response with complete package data (matching auto-approval path)
- pending_packages.append({
- **pkg_dict, # Include all package fields (budget, targeting_overlay, creative_ids, etc.)
- "package_id": package_id,
- "name": pkg_name,
- "buyer_ref": pkg.buyer_ref, # Include buyer_ref from request package
- "status": TaskStatus.INPUT_REQUIRED, # Consistent with TaskStatus enum (requires approval)
- })
+ pending_packages.append(
+ {
+ **pkg_dict, # Include all package fields (budget, targeting_overlay, creative_ids, etc.)
+ "package_id": package_id,
+ "name": pkg_name,
+ "buyer_ref": pkg.buyer_ref, # Include buyer_ref from request package
+ "status": TaskStatus.INPUT_REQUIRED, # Consistent with TaskStatus enum (requires approval)
+ }
+ )
# Update the package in raw_request with the generated package_id so UI can find it
raw_request_dict["packages"][idx - 1]["package_id"] = package_id
@@ -5087,13 +5087,17 @@ async def _create_media_buy_impl(
# Add full package data from raw_request
for idx, req_pkg in enumerate(req.packages):
if idx == pending_packages.index(pkg_data):
- package_config.update({
- "product_id": req_pkg.product_id,
- "budget": req_pkg.budget.model_dump() if req_pkg.budget else None,
- "targeting_overlay": req_pkg.targeting_overlay.model_dump() if req_pkg.targeting_overlay else None,
- "creative_ids": req_pkg.creative_ids,
- "format_ids_to_provide": req_pkg.format_ids_to_provide,
- })
+ package_config.update(
+ {
+ "product_id": req_pkg.product_id,
+ "budget": req_pkg.budget.model_dump() if req_pkg.budget else None,
+ "targeting_overlay": req_pkg.targeting_overlay.model_dump()
+ if req_pkg.targeting_overlay
+ else None,
+ "creative_ids": req_pkg.creative_ids,
+ "format_ids_to_provide": req_pkg.format_ids_to_provide,
+ }
+ )
break
db_package = DBMediaPackage(
@@ -5109,11 +5113,9 @@ async def _create_media_buy_impl(
# Link the workflow step to the media buy so the approval button shows in UI
with get_db_session() as session:
from src.core.database.models import ObjectWorkflowMapping
+
mapping = ObjectWorkflowMapping(
- object_type="media_buy",
- object_id=media_buy_id,
- step_id=step.step_id,
- action="create"
+ object_type="media_buy", object_id=media_buy_id, step_id=step.step_id, action="create"
)
session.add(mapping)
session.commit()
@@ -5207,6 +5209,7 @@ async def _create_media_buy_impl(
for idx, pkg in enumerate(req.packages, 1):
# Generate permanent package ID
import secrets
+
package_id = f"pkg_{pkg.product_id}_{secrets.token_hex(4)}_{idx}"
# Serialize the full package to include all fields (budget, targeting, etc.)
@@ -5219,13 +5222,15 @@ async def _create_media_buy_impl(
pkg_dict = {}
# Build response with complete package data (matching auto-approval path)
- response_packages.append({
- **pkg_dict, # Include all package fields (budget, targeting_overlay, creative_ids, etc.)
- "package_id": package_id,
- "name": f"{pkg.product_id} - Package {idx}",
- "buyer_ref": pkg.buyer_ref, # Include buyer_ref from request
- "status": TaskStatus.INPUT_REQUIRED, # Consistent with TaskStatus enum (requires approval)
- })
+ response_packages.append(
+ {
+ **pkg_dict, # Include all package fields (budget, targeting_overlay, creative_ids, etc.)
+ "package_id": package_id,
+ "name": f"{pkg.product_id} - Package {idx}",
+ "buyer_ref": pkg.buyer_ref, # Include buyer_ref from request
+ "status": TaskStatus.INPUT_REQUIRED, # Consistent with TaskStatus enum (requires approval)
+ }
+ )
# Send Slack notification for configuration-based approval requirement
try:
@@ -5397,15 +5402,16 @@ def format_display(url: str | None, fid: str) -> str:
# Generate permanent package ID (not product_id)
import secrets
+
package_id = f"pkg_{product.product_id}_{secrets.token_hex(4)}_{idx}"
# Get buyer_ref and budget from matching request package if available
buyer_ref = None
budget = None
if matching_package:
- if hasattr(matching_package, 'buyer_ref'):
+ if hasattr(matching_package, "buyer_ref"):
buyer_ref = matching_package.buyer_ref
- if hasattr(matching_package, 'budget'):
+ if hasattr(matching_package, "budget"):
budget = matching_package.budget
packages.append(
@@ -5416,7 +5422,9 @@ def format_display(url: str | None, fid: str) -> str:
cpm=cpm,
impressions=int(total_budget / cpm * 1000),
format_ids=format_ids_to_use,
- targeting_overlay=matching_package.targeting_overlay if matching_package and hasattr(matching_package, 'targeting_overlay') else None,
+ targeting_overlay=matching_package.targeting_overlay
+ if matching_package and hasattr(matching_package, "targeting_overlay")
+ else None,
buyer_ref=buyer_ref,
product_id=product.product_id, # Include product_id
budget=budget, # Include budget from request
@@ -5438,7 +5446,9 @@ def format_display(url: str | None, fid: str) -> str:
# Pass package_pricing_info for pricing model support (AdCP PR #88)
try:
response = adapter.create_media_buy(req, packages, start_time, end_time, package_pricing_info)
- logger.info(f"[DEBUG] create_media_buy: Adapter returned response with {len(response.packages) if response.packages else 0} packages")
+ logger.info(
+ f"[DEBUG] create_media_buy: Adapter returned response with {len(response.packages) if response.packages else 0} packages"
+ )
if response.packages:
for i, pkg in enumerate(response.packages):
logger.info(f"[DEBUG] create_media_buy: Response package {i} = {pkg}")
@@ -5501,7 +5511,9 @@ def format_display(url: str | None, fid: str) -> str:
logger.info(f"[DEBUG] Package {i}: resp_package.get('package_id') = {package_id}")
if not package_id:
- error_msg = f"Adapter did not return package_id for package {i}. This is a critical bug in the adapter."
+ error_msg = (
+ f"Adapter did not return package_id for package {i}. This is a critical bug in the adapter."
+ )
logger.error(error_msg)
raise ValueError(error_msg)
@@ -5528,7 +5540,9 @@ def format_display(url: str | None, fid: str) -> str:
session.add(db_package)
session.commit()
- logger.info(f"Saved {len(packages_to_save)} packages to media_packages table for media_buy {response.media_buy_id}")
+ logger.info(
+ f"Saved {len(packages_to_save)} packages to media_packages table for media_buy {response.media_buy_id}"
+ )
# Handle creative_ids in packages if provided (immediate association)
if req.packages:
@@ -5560,10 +5574,7 @@ def format_display(url: str | None, fid: str) -> str:
error_msg = f"Creative IDs not found: {', '.join(sorted(missing_ids))}"
logger.error(error_msg)
ctx_manager.update_workflow_step(step.step_id, status="failed", error_message=error_msg)
- raise ToolError(
- "CREATIVES_NOT_FOUND",
- error_msg
- )
+ raise ToolError("CREATIVES_NOT_FOUND", error_msg)
for i, package in enumerate(req.packages):
if package.creative_ids:
@@ -5686,7 +5697,9 @@ def format_display(url: str | None, fid: str) -> str:
# Start with adapter response package (has package_id)
if i < len(adapter_packages):
# Get package_id and other fields from adapter response
- response_package_dict = adapter_packages[i] if isinstance(adapter_packages[i], dict) else adapter_packages[i].model_dump()
+ response_package_dict = (
+ adapter_packages[i] if isinstance(adapter_packages[i], dict) else adapter_packages[i].model_dump()
+ )
else:
# Fallback if adapter didn't return enough packages
logger.warning(f"Adapter returned fewer packages than request. Using request package {i}")
@@ -5730,7 +5743,7 @@ def format_display(url: str | None, fid: str) -> str:
package_status = TaskStatus.WORKING
if package.creative_ids and len(package.creative_ids) > 0:
package_status = TaskStatus.COMPLETED
- elif hasattr(package, 'format_ids_to_provide') and package.format_ids_to_provide:
+ elif hasattr(package, "format_ids_to_provide") and package.format_ids_to_provide:
package_status = TaskStatus.WORKING
# Add status
@@ -6276,6 +6289,7 @@ def _update_media_buy_impl(
if pkg_update.budget:
# Extract budget amount - handle both Budget object and legacy float
from src.core.schemas import Budget, extract_budget_amount
+
if isinstance(pkg_update.budget, Budget):
pkg_budget_amount, _ = extract_budget_amount(pkg_update.budget, request_currency)
else:
@@ -6373,12 +6387,7 @@ def _update_media_buy_impl(
req._affected_packages.append(
{
"buyer_package_ref": pkg_update.package_id,
- "changes_applied": {
- "budget": {
- "updated": budget_amount,
- "currency": currency
- }
- },
+ "changes_applied": {"budget": {"updated": budget_amount, "currency": currency}},
}
)
@@ -6404,16 +6413,14 @@ def _update_media_buy_impl(
with get_db_session() as session:
# Resolve media_buy_id (might be buyer_ref)
mb_stmt = select(MediaBuyModel).where(
- MediaBuyModel.media_buy_id == req.media_buy_id,
- MediaBuyModel.tenant_id == tenant["tenant_id"]
+ MediaBuyModel.media_buy_id == req.media_buy_id, MediaBuyModel.tenant_id == tenant["tenant_id"]
)
media_buy_obj = session.scalars(mb_stmt).first()
# Try buyer_ref if not found
if not media_buy_obj:
mb_stmt = select(MediaBuyModel).where(
- MediaBuyModel.buyer_ref == req.media_buy_id,
- MediaBuyModel.tenant_id == tenant["tenant_id"]
+ MediaBuyModel.buyer_ref == req.media_buy_id, MediaBuyModel.tenant_id == tenant["tenant_id"]
)
media_buy_obj = session.scalars(mb_stmt).first()
@@ -6534,6 +6541,7 @@ def _update_media_buy_impl(
# Persist top-level budget update to database
from sqlalchemy import update
+
from src.core.database.models import MediaBuy
with get_db_session() as db_session:
@@ -6544,13 +6552,15 @@ def _update_media_buy_impl(
)
db_session.execute(stmt)
db_session.commit()
- logger.info(f"[update_media_buy] Updated MediaBuy {req.media_buy_id} budget to {total_budget} {currency}")
-
+ logger.info(
+ f"[update_media_buy] Updated MediaBuy {req.media_buy_id} budget to {total_budget} {currency}"
+ )
+
# Track top-level budget update in affected_packages
# When top-level budget changes, all packages are affected
if not hasattr(req, "_affected_packages"):
req._affected_packages = []
-
+
# Get all packages for this media buy to report them as affected
if hasattr(existing_req, "packages") and existing_req.packages:
for pkg in existing_req.packages:
@@ -6559,12 +6569,7 @@ def _update_media_buy_impl(
req._affected_packages.append(
{
"buyer_package_ref": package_ref,
- "changes_applied": {
- "budget": {
- "updated": total_budget,
- "currency": currency
- }
- },
+ "changes_applied": {"budget": {"updated": total_budget, "currency": currency}},
}
)
@@ -8059,7 +8064,7 @@ async def handle_landing_page(request: Request):
content=f"""
This is a sales agent for advertising inventory.
Domain: {apx_host}
@@ -8094,7 +8099,7 @@ async def handle_landing_page(request: Request): content=f""" -Subdomain: {apx_host}
diff --git a/src/core/mcp_context_wrapper.py b/src/core/mcp_context_wrapper.py index 53105648a..61e69304b 100644 --- a/src/core/mcp_context_wrapper.py +++ b/src/core/mcp_context_wrapper.py @@ -189,7 +189,7 @@ def _create_tool_context(self, fastmcp_context: FastMCPContext, tool_name: str) if not principal_id: # Determine if header is missing or just invalid if auth_header == "NOT_PRESENT": - raise ValueError(f"Missing x-adcp-auth header. " f"Apx-Incoming-Host: {apx_host}") + raise ValueError(f"Missing x-adcp-auth header. Apx-Incoming-Host: {apx_host}") else: # Header present but invalid (token not found in DB) raise ValueError( @@ -200,9 +200,7 @@ def _create_tool_context(self, fastmcp_context: FastMCPContext, tool_name: str) # Set tenant context (tenant was returned from get_principal_with_tenant) if not tenant: - raise ValueError( - f"No tenant context available. " f"Principal: {principal_id}, " f"Apx-Incoming-Host: {apx_host}" - ) + raise ValueError(f"No tenant context available. Principal: {principal_id}, Apx-Incoming-Host: {apx_host}") # Set the tenant context in the ContextVar set_current_tenant(tenant) diff --git a/src/core/schemas.py b/src/core/schemas.py index d882c37e4..83c549e9e 100644 --- a/src/core/schemas.py +++ b/src/core/schemas.py @@ -1384,7 +1384,7 @@ def format_id(self) -> str: This property will be removed in a future version. """ warnings.warn( - "format_id is deprecated and will be removed in a future version. " "Use format instead.", + "format_id is deprecated and will be removed in a future version. Use format instead.", DeprecationWarning, stacklevel=2, ) @@ -1398,7 +1398,7 @@ def content_uri(self) -> str: This property will be removed in a future version. """ warnings.warn( - "content_uri is deprecated and will be removed in a future version. " "Use url instead.", + "content_uri is deprecated and will be removed in a future version. Use url instead.", DeprecationWarning, stacklevel=2, ) @@ -1412,7 +1412,7 @@ def click_through_url(self) -> str | None: This property will be removed in a future version. """ warnings.warn( - "click_through_url is deprecated and will be removed in a future version. " "Use click_url instead.", + "click_through_url is deprecated and will be removed in a future version. Use click_url instead.", DeprecationWarning, stacklevel=2, ) @@ -2657,8 +2657,8 @@ class MediaPackage(BaseModel): impressions: int format_ids: list[FormatId] # FormatId objects per AdCP spec targeting_overlay: Optional["Targeting"] = None - buyer_ref: Optional[str] = None # Optional buyer reference from request package - product_id: Optional[str] = None # Product ID for this package + buyer_ref: str | None = None # Optional buyer reference from request package + product_id: str | None = None # Product ID for this package budget: Optional["Budget"] = None # Budget information from request @@ -3070,7 +3070,7 @@ def signal_id(self) -> str: This property will be removed in a future version. """ warnings.warn( - "signal_id is deprecated and will be removed in a future version. " "Use signal_agent_segment_id instead.", + "signal_id is deprecated and will be removed in a future version. Use signal_agent_segment_id instead.", DeprecationWarning, stacklevel=2, ) @@ -3084,7 +3084,7 @@ def type(self) -> str: This property will be removed in a future version. """ warnings.warn( - "type is deprecated and will be removed in a future version. " "Use signal_type instead.", + "type is deprecated and will be removed in a future version. Use signal_type instead.", DeprecationWarning, stacklevel=2, ) diff --git a/src/core/tools.py b/src/core/tools.py index d29de0703..92c1fc6d2 100644 --- a/src/core/tools.py +++ b/src/core/tools.py @@ -22,6 +22,19 @@ get_current_tenant, ) +# Import all implementation functions from main.py at the top +from src.core.main import ( + _create_media_buy_impl, + _get_media_buy_delivery_impl, + _get_products_impl, + _list_authorized_properties_impl, + _list_creative_formats_impl, + _list_creatives_impl, + _sync_creatives_impl, + _update_media_buy_impl, + update_performance_index, # Note: This one doesn't follow _impl pattern yet +) + # Schema models (explicit imports to avoid collisions) # Using adapters for models that need to stay in sync with AdCP spec from src.core.schema_adapters import ( @@ -41,19 +54,6 @@ GetSignalsRequest, ) -# Import all implementation functions from main.py at the top -from src.core.main import ( - _create_media_buy_impl, - _get_media_buy_delivery_impl, - _get_products_impl, - _list_authorized_properties_impl, - _list_creative_formats_impl, - _list_creatives_impl, - _sync_creatives_impl, - _update_media_buy_impl, - update_performance_index, # Note: This one doesn't follow _impl pattern yet -) - def get_principal_from_context(context: Context | None) -> str | None: """Extract principal ID from the FastMCP context or ToolContext. diff --git a/src/services/ai_product_service.py b/src/services/ai_product_service.py index d57a64a5a..3d0ccfad7 100644 --- a/src/services/ai_product_service.py +++ b/src/services/ai_product_service.py @@ -483,13 +483,13 @@ async def _generate_product_configuration( Product Description: - Name: {description.name} - External (buyer-facing): {description.external_description} - - Internal details: {description.internal_details or 'None provided'} + - Internal details: {description.internal_details or "None provided"} Inventory Analysis Results: - - Premium Level: {inventory_analysis['premium_level']} - - Best Matching Placements: {json.dumps(inventory_analysis['matched_placements'][:3], indent=2)} - - Suggested CPM Range: ${inventory_analysis['suggested_cpm_range']['min']:.2f} - ${inventory_analysis['suggested_cpm_range']['max']:.2f} - - Recommended Ad Sizes: {inventory_analysis['recommended_formats']} + - Premium Level: {inventory_analysis["premium_level"]} + - Best Matching Placements: {json.dumps(inventory_analysis["matched_placements"][:3], indent=2)} + - Suggested CPM Range: ${inventory_analysis["suggested_cpm_range"]["min"]:.2f} - ${inventory_analysis["suggested_cpm_range"]["max"]:.2f} + - Recommended Ad Sizes: {inventory_analysis["recommended_formats"]} Available Ad Server Inventory: {json.dumps(inventory.placements[:10], indent=2)} @@ -498,7 +498,7 @@ async def _generate_product_configuration( {json.dumps(inventory.targeting_options, indent=2)} Creative Formats Available: - {json.dumps([f for f in creative_formats if f['type'] in ['display', 'video', 'native']][:20], indent=2)} + {json.dumps([f for f in creative_formats if f["type"] in ["display", "video", "native"]][:20], indent=2)} Generate a product configuration following these guidelines: 1. Use the matched placements from the analysis as placement_targets diff --git a/src/services/delivery_simulator.py b/src/services/delivery_simulator.py index 9f8c62d11..1def3b228 100644 --- a/src/services/delivery_simulator.py +++ b/src/services/delivery_simulator.py @@ -43,7 +43,6 @@ def restart_active_simulations(self): before a server restart. Daemon threads don't survive restarts. """ try: - from sqlalchemy import select from src.core.database.database_session import get_db_session @@ -113,7 +112,7 @@ def restart_active_simulations(self): continue logger.info( - f"🚀 Restarting simulation for {media_buy.media_buy_id} " f"(product: {product.product_id})" + f"🚀 Restarting simulation for {media_buy.media_buy_id} (product: {product.product_id})" ) try: diff --git a/src/services/dynamic_pricing_service.py b/src/services/dynamic_pricing_service.py index e57c00bc3..5dc194027 100644 --- a/src/services/dynamic_pricing_service.py +++ b/src/services/dynamic_pricing_service.py @@ -72,7 +72,7 @@ def enrich_products_with_pricing( ) except Exception as e: - logger.warning(f"Failed to calculate pricing for product {product.product_id}: {e}. " "Using defaults.") + logger.warning(f"Failed to calculate pricing for product {product.product_id}: {e}. Using defaults.") # Leave defaults (floor_cpm, recommended_cpm, estimated_exposures remain None) return products diff --git a/src/services/gam_inventory_service.py b/src/services/gam_inventory_service.py index 633e9ded7..2ceb53fe4 100644 --- a/src/services/gam_inventory_service.py +++ b/src/services/gam_inventory_service.py @@ -410,7 +410,8 @@ def _streaming_sync_all_inventory(self, tenant_id: str, discovery: "GAMInventory logger.info("Streaming custom targeting keys (values lazy loaded)...") try: custom_targeting = discovery.discover_custom_targeting( - max_values_per_key=None, fetch_values=False # Don't fetch values # Lazy load values on demand + max_values_per_key=None, + fetch_values=False, # Don't fetch values # Lazy load values on demand ) self._write_custom_targeting_keys(tenant_id, discovery.custom_targeting_keys.values(), sync_time) counts["custom_targeting_keys"] = len(discovery.custom_targeting_keys) diff --git a/src/services/push_notification_service.py b/src/services/push_notification_service.py index 20054ef44..35edfca4d 100644 --- a/src/services/push_notification_service.py +++ b/src/services/push_notification_service.py @@ -220,7 +220,7 @@ async def _deliver_webhook( except httpx.TimeoutException: logger.warning( - f"Webhook delivery to {config.url} timed out " f"(attempt: {attempt + 1}/{self.max_retries})" + f"Webhook delivery to {config.url} timed out (attempt: {attempt + 1}/{self.max_retries})" ) except httpx.RequestError as e: logger.warning( diff --git a/src/services/webhook_delivery_service.py b/src/services/webhook_delivery_service.py index 87d296200..85fef2671 100644 --- a/src/services/webhook_delivery_service.py +++ b/src/services/webhook_delivery_service.py @@ -147,7 +147,7 @@ def enqueue(self, webhook_data: dict[str, Any]) -> bool: if len(self.queue) >= self.max_size: self._dropped_count += 1 logger.warning( - f"Webhook queue full ({self.max_size}), " f"dropping webhook (total dropped: {self._dropped_count})" + f"Webhook queue full ({self.max_size}), dropping webhook (total dropped: {self._dropped_count})" ) return False @@ -394,7 +394,7 @@ def _send_webhook_enhanced( # Check circuit breaker if not circuit_breaker.can_attempt(): - logger.warning(f"⚠️ Circuit breaker OPEN for {config.url}, " f"skipping webhook delivery") + logger.warning(f"⚠️ Circuit breaker OPEN for {config.url}, skipping webhook delivery") continue # Add to queue (bounded) @@ -460,7 +460,7 @@ def _deliver_with_backoff( if webhook_secret: if not self._verify_secret_strength(webhook_secret): - logger.warning(f"⚠️ Webhook secret for {config.url} is too weak " f"(min 32 characters required)") + logger.warning(f"⚠️ Webhook secret for {config.url} is too weak (min 32 characters required)") else: signature = self._generate_hmac_signature(payload, webhook_secret, timestamp) headers["X-ADCP-Signature"] = signature @@ -476,9 +476,7 @@ def _deliver_with_backoff( if attempt > 0: # Base delay * 2^attempt + random jitter (0-1 seconds) delay = (base_delay * (2**attempt)) + random.uniform(0, 1) - logger.debug( - f"Retrying webhook delivery after {delay:.2f}s " f"(attempt {attempt + 1}/{max_retries})" - ) + logger.debug(f"Retrying webhook delivery after {delay:.2f}s (attempt {attempt + 1}/{max_retries})") time.sleep(delay) # Send webhook @@ -490,7 +488,7 @@ def _deliver_with_backoff( ) if 200 <= response.status_code < 300: - logger.debug(f"Webhook delivered to {config.url} " f"(status: {response.status_code})") + logger.debug(f"Webhook delivered to {config.url} (status: {response.status_code})") circuit_breaker.record_success() return True @@ -501,11 +499,9 @@ def _deliver_with_backoff( ) except httpx.TimeoutException: - logger.warning(f"Webhook delivery to {config.url} timed out " f"(attempt: {attempt + 1}/{max_retries})") + logger.warning(f"Webhook delivery to {config.url} timed out (attempt: {attempt + 1}/{max_retries})") except httpx.RequestError as e: - logger.warning( - f"Webhook delivery to {config.url} failed: {e} " f"(attempt: {attempt + 1}/{max_retries})" - ) + logger.warning(f"Webhook delivery to {config.url} failed: {e} (attempt: {attempt + 1}/{max_retries})") except Exception as e: logger.error(f"Unexpected error delivering to {config.url}: {e}", exc_info=True) break diff --git a/src/services/webhook_verification.py b/src/services/webhook_verification.py index 1a874ba49..61864200a 100644 --- a/src/services/webhook_verification.py +++ b/src/services/webhook_verification.py @@ -86,7 +86,7 @@ def _verify_timestamp(self, timestamp: str): if age_seconds > self.replay_window_seconds: raise WebhookVerificationError( - f"Timestamp too old ({age_seconds:.0f}s > " f"{self.replay_window_seconds}s window)" + f"Timestamp too old ({age_seconds:.0f}s > {self.replay_window_seconds}s window)" ) def _verify_signature( diff --git a/tests/benchmarks/benchmark_ai_review_async.py b/tests/benchmarks/benchmark_ai_review_async.py index 5674fc7c2..658b3fb42 100755 --- a/tests/benchmarks/benchmark_ai_review_async.py +++ b/tests/benchmarks/benchmark_ai_review_async.py @@ -50,7 +50,7 @@ def benchmark_sync_mode(creative_count: int) -> dict: results = [] for i in range(creative_count): - creative_id = f"creative_{i+1}" + creative_id = f"creative_{i + 1}" print(f" Processing {creative_id}...", end=" ", flush=True) result = simulate_ai_review_sync(creative_id) results.append(result) @@ -61,8 +61,8 @@ def benchmark_sync_mode(creative_count: int) -> dict: print("\n📊 Results:") print(f" Total time: {total_time:.2f}s") - print(f" Average per creative: {total_time/creative_count:.2f}s") - print(f" Throughput: {creative_count/total_time:.1f} creatives/second") + print(f" Average per creative: {total_time / creative_count:.2f}s") + print(f" Throughput: {creative_count / total_time:.1f} creatives/second") # Check for timeout (>120 seconds is typical API timeout) timeout_threshold = 60.0 # 60 seconds for demo (120s in real system) @@ -86,7 +86,7 @@ def benchmark_async_mode(creative_count: int) -> dict: # Submit all reviews (non-blocking) tasks = [] for i in range(creative_count): - creative_id = f"creative_{i+1}" + creative_id = f"creative_{i + 1}" result = simulate_ai_review_async(creative_id, executor) tasks.append(result) diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index e12e2621e..d68c04230 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -120,7 +120,9 @@ def docker_services_e2e(request): # Build first with output visible, then start detached print("Step 1/2: Building Docker images...") build_result = subprocess.run( - ["docker-compose", "build", "--progress=plain"], env=env, capture_output=False # Show build output + ["docker-compose", "build", "--progress=plain"], + env=env, + capture_output=False, # Show build output ) if build_result.returncode != 0: print(f"❌ Docker build failed with exit code {build_result.returncode}") diff --git a/tests/e2e/test_adcp_schema_compliance.py b/tests/e2e/test_adcp_schema_compliance.py index 9a5a195af..d9b62fb58 100644 --- a/tests/e2e/test_adcp_schema_compliance.py +++ b/tests/e2e/test_adcp_schema_compliance.py @@ -155,12 +155,12 @@ async def test_get_products_compliance( for i, request in enumerate(valid_requests): try: await schema_validator.validate_request("get-products", request) - compliance_report.add_result("get-products", f"request-{i+1}", "pass", "Valid request structure") + compliance_report.add_result("get-products", f"request-{i + 1}", "pass", "Valid request structure") except SchemaValidationError as e: error_details = f"{str(e)} | Errors: {'; '.join(e.validation_errors)}" - compliance_report.add_result("get-products", f"request-{i+1}", "fail", error_details) + compliance_report.add_result("get-products", f"request-{i + 1}", "fail", error_details) except Exception as e: - compliance_report.add_result("get-products", f"request-{i+1}", "warning", f"Validation error: {e}") + compliance_report.add_result("get-products", f"request-{i + 1}", "warning", f"Validation error: {e}") # Test valid response patterns per AdCP schema (only 'products' field allowed) valid_responses = [ @@ -182,12 +182,12 @@ async def test_get_products_compliance( for i, response in enumerate(valid_responses): try: await schema_validator.validate_response("get-products", response) - compliance_report.add_result("get-products", f"response-{i+1}", "pass", "Valid response structure") + compliance_report.add_result("get-products", f"response-{i + 1}", "pass", "Valid response structure") except SchemaValidationError as e: error_details = f"{str(e)} | Errors: {'; '.join(e.validation_errors)}" - compliance_report.add_result("get-products", f"response-{i+1}", "fail", error_details) + compliance_report.add_result("get-products", f"response-{i + 1}", "fail", error_details) except Exception as e: - compliance_report.add_result("get-products", f"response-{i+1}", "warning", f"Validation error: {e}") + compliance_report.add_result("get-products", f"response-{i + 1}", "warning", f"Validation error: {e}") @pytest.mark.asyncio async def test_create_media_buy_compliance( @@ -238,11 +238,11 @@ async def test_targeting_schema_compliance( try: await schema_validator.validate_request("create-media-buy", request_with_targeting) - compliance_report.add_result("targeting", f"example-{i+1}", "pass", "Valid targeting structure") + compliance_report.add_result("targeting", f"example-{i + 1}", "pass", "Valid targeting structure") except SchemaValidationError as e: - compliance_report.add_result("targeting", f"example-{i+1}", "fail", str(e)) + compliance_report.add_result("targeting", f"example-{i + 1}", "fail", str(e)) except Exception as e: - compliance_report.add_result("targeting", f"example-{i+1}", "warning", f"Validation error: {e}") + compliance_report.add_result("targeting", f"example-{i + 1}", "warning", f"Validation error: {e}") @pytest.mark.asyncio async def test_format_compliance( @@ -299,14 +299,14 @@ async def test_error_response_compliance( try: await schema_validator.validate_request("create-media-buy", invalid_request) compliance_report.add_result( - "error-handling", f"invalid-{i+1}", "fail", "Should have failed validation" + "error-handling", f"invalid-{i + 1}", "fail", "Should have failed validation" ) except SchemaValidationError: compliance_report.add_result( - "error-handling", f"invalid-{i+1}", "pass", "Correctly rejected invalid request" + "error-handling", f"invalid-{i + 1}", "pass", "Correctly rejected invalid request" ) except Exception as e: - compliance_report.add_result("error-handling", f"invalid-{i+1}", "warning", f"Unexpected error: {e}") + compliance_report.add_result("error-handling", f"invalid-{i + 1}", "warning", f"Unexpected error: {e}") @pytest.mark.asyncio async def test_required_fields_compliance( @@ -325,15 +325,15 @@ async def test_required_fields_compliance( try: await schema_validator.validate_request("create-media-buy", incomplete) compliance_report.add_result( - "required-fields", f"incomplete-{i+1}", "fail", "Should require missing fields" + "required-fields", f"incomplete-{i + 1}", "fail", "Should require missing fields" ) except SchemaValidationError: compliance_report.add_result( - "required-fields", f"incomplete-{i+1}", "pass", "Correctly required missing fields" + "required-fields", f"incomplete-{i + 1}", "pass", "Correctly required missing fields" ) except Exception as e: compliance_report.add_result( - "required-fields", f"incomplete-{i+1}", "warning", f"Validation error: {e}" + "required-fields", f"incomplete-{i + 1}", "warning", f"Validation error: {e}" ) @pytest.mark.asyncio diff --git a/tests/helpers/a2a_response_validator.py b/tests/helpers/a2a_response_validator.py index d824ea680..6e86a62d6 100644 --- a/tests/helpers/a2a_response_validator.py +++ b/tests/helpers/a2a_response_validator.py @@ -172,8 +172,7 @@ def check_response_type_safety(self, response_class: type) -> ValidationResult: ) elif not has_str_method: warnings.append( - f"{class_name} only has .message field, no __str__() method. " - f"Consider adding __str__() for consistency." + f"{class_name} only has .message field, no __str__() method. Consider adding __str__() for consistency." ) return ValidationResult(is_valid=len(errors) == 0, errors=errors, warnings=warnings) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index f62554558..e976064f6 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -60,8 +60,7 @@ def integration_db(): else: # Fallback to defaults if URL parsing fails pytest.fail( - f"Failed to parse DATABASE_URL: {postgres_url}\n" - f"Expected format: postgresql://user:pass@host:port/dbname" + f"Failed to parse DATABASE_URL: {postgres_url}\nExpected format: postgresql://user:pass@host:port/dbname" ) user, password, host, postgres_port = "adcp_user", "test_password", "localhost", 5432 diff --git a/tests/integration/test_a2a_error_responses.py b/tests/integration/test_a2a_error_responses.py index 33de1ebee..2b756ebac 100644 --- a/tests/integration/test_a2a_error_responses.py +++ b/tests/integration/test_a2a_error_responses.py @@ -287,9 +287,9 @@ async def test_create_media_buy_success_has_no_errors_field(self, handler, test_ # CRITICAL ASSERTIONS: Success response assert artifact_data["success"] is True, "success must be True for successful operation" - assert ( - artifact_data.get("errors") is None or len(artifact_data.get("errors", [])) == 0 - ), "errors field must be None or empty array for success" + assert artifact_data.get("errors") is None or len(artifact_data.get("errors", [])) == 0, ( + "errors field must be None or empty array for success" + ) assert "media_buy_id" in artifact_data, "Success response must include media_buy_id" assert artifact_data["media_buy_id"] is not None, "media_buy_id must not be None for success" @@ -340,9 +340,9 @@ async def test_create_media_buy_response_includes_all_adcp_fields(self, handler, # Optional but important AdCP fields assert "packages" in artifact_data, "Must include packages (AdCP spec field)" - assert ( - "creative_deadline" in artifact_data or artifact_data.get("creative_deadline") is None - ), "Must include creative_deadline (AdCP spec field)" + assert "creative_deadline" in artifact_data or artifact_data.get("creative_deadline") is None, ( + "Must include creative_deadline (AdCP spec field)" + ) # A2A-specific augmentation fields assert "success" in artifact_data, "A2A wrapper must add success field" @@ -374,7 +374,8 @@ async def test_error_response_has_consistent_structure(self, handler): # Call handler directly with invalid params result = await handler._handle_create_media_buy_skill( - parameters={"brand_manifest": {"name": "test"}}, auth_token="test_token" # Missing required fields + parameters={"brand_manifest": {"name": "test"}}, + auth_token="test_token", # Missing required fields ) # Verify error response structure diff --git a/tests/integration/test_a2a_regression_prevention.py b/tests/integration/test_a2a_regression_prevention.py index 74c8f9c16..6596d8eae 100644 --- a/tests/integration/test_a2a_regression_prevention.py +++ b/tests/integration/test_a2a_regression_prevention.py @@ -207,9 +207,9 @@ def test_tool_context_creation_method_exists(self): handler = AdCPRequestHandler() # Method should exist - assert hasattr( - handler, "_create_tool_context_from_a2a" - ), "Handler should have _create_tool_context_from_a2a method" + assert hasattr(handler, "_create_tool_context_from_a2a"), ( + "Handler should have _create_tool_context_from_a2a method" + ) assert callable(handler._create_tool_context_from_a2a), "_create_tool_context_from_a2a should be callable" @@ -242,9 +242,9 @@ def test_no_redirect_on_agent_card_endpoints(self): if response.status_code == 200: # Should be 200, not a redirect (301, 302, etc.) - assert ( - 200 <= response.status_code < 300 - ), f"Endpoint {endpoint} returned redirect: {response.status_code}" + assert 200 <= response.status_code < 300, ( + f"Endpoint {endpoint} returned redirect: {response.status_code}" + ) # Should return JSON assert response.headers.get("content-type", "").startswith("application/json") diff --git a/tests/integration/test_a2a_response_compliance.py b/tests/integration/test_a2a_response_compliance.py index d17ada11b..915f8e960 100644 --- a/tests/integration/test_a2a_response_compliance.py +++ b/tests/integration/test_a2a_response_compliance.py @@ -334,9 +334,9 @@ def test_all_response_types_have_str_method(self): for response_cls in response_types: # All our response adapters should have __str__ - assert hasattr( - response_cls, "__str__" - ), f"{response_cls.__name__} must have __str__() for human-readable messages" + assert hasattr(response_cls, "__str__"), ( + f"{response_cls.__name__} must have __str__() for human-readable messages" + ) @pytest.mark.integration diff --git a/tests/integration/test_a2a_response_message_fields.py b/tests/integration/test_a2a_response_message_fields.py index 9df8fe6ec..b7fb8d39f 100644 --- a/tests/integration/test_a2a_response_message_fields.py +++ b/tests/integration/test_a2a_response_message_fields.py @@ -256,9 +256,9 @@ def test_all_response_types_have_str_or_message(self): # For now, just check the class definition has_message_field = "message" in response_cls.model_fields - assert ( - has_str_method or has_message_field - ), f"{response_cls.__name__} must have either __str__ method or .message field for A2A compatibility" + assert has_str_method or has_message_field, ( + f"{response_cls.__name__} must have either __str__ method or .message field for A2A compatibility" + ) @pytest.mark.integration @@ -289,6 +289,6 @@ async def test_skill_error_has_message_field(self, handler, sample_principal): assert "message" in result or "error" in result, "Error response must have message or error field" except Exception as e: # Errors are expected for invalid params - assert "message" not in str(e) or "AttributeError" not in str( - e - ), "Should not get AttributeError when handling skill errors" + assert "message" not in str(e) or "AttributeError" not in str(e), ( + "Should not get AttributeError when handling skill errors" + ) diff --git a/tests/integration/test_adapter_factory.py b/tests/integration/test_adapter_factory.py index 16fdff55a..53fbab97c 100644 --- a/tests/integration/test_adapter_factory.py +++ b/tests/integration/test_adapter_factory.py @@ -252,9 +252,9 @@ def test_get_adapter_instantiates_all_adapter_types(self, setup_adapters): # Verify correct adapter type expected_class = adapter_type_map[adapter_type] - assert isinstance( - adapter, expected_class - ), f"Expected {expected_class.__name__}, got {type(adapter).__name__}" + assert isinstance(adapter, expected_class), ( + f"Expected {expected_class.__name__}, got {type(adapter).__name__}" + ) # Verify dry_run mode was set assert adapter.dry_run is True, f"dry_run not set correctly for {adapter_type}" @@ -311,9 +311,9 @@ def test_gam_adapter_requires_network_code(self, setup_adapters): # Verify it's actually a GAM adapter, not mock fallback from src.adapters.google_ad_manager import GoogleAdManager - assert isinstance( - adapter, GoogleAdManager - ), f"Expected GAM adapter, got {type(adapter).__name__}. Check tenant/adapter_config setup." + assert isinstance(adapter, GoogleAdManager), ( + f"Expected GAM adapter, got {type(adapter).__name__}. Check tenant/adapter_config setup." + ) # Verify network_code was passed correctly assert hasattr(adapter, "network_code"), "GAM adapter missing network_code attribute" diff --git a/tests/integration/test_admin_ui_data_validation.py b/tests/integration/test_admin_ui_data_validation.py index ca9092638..527f30ca7 100644 --- a/tests/integration/test_admin_ui_data_validation.py +++ b/tests/integration/test_admin_ui_data_validation.py @@ -217,9 +217,9 @@ def test_principals_list_no_duplicates_with_relationships( # Principals page renders successfully # Actual display depends on template and filters # Just verify page contains principal-related content - assert ( - "principal" in html.lower() or "advertiser" in html.lower() - ), "Principals page should contain principal/advertiser-related content" + assert "principal" in html.lower() or "advertiser" in html.lower(), ( + "Principals page should contain principal/advertiser-related content" + ) class TestInventoryDataValidation: @@ -261,9 +261,9 @@ def test_inventory_browser_no_duplicate_ad_units( # Inventory page renders successfully even if empty # This test just verifies the page loads without errors # The actual inventory sync would require GAM adapter integration - assert ( - "inventory" in html.lower() or "ad units" in html.lower() - ), "Inventory page should contain inventory-related content" + assert "inventory" in html.lower() or "ad units" in html.lower(), ( + "Inventory page should contain inventory-related content" + ) class TestDashboardDataValidation: @@ -386,7 +386,7 @@ def test_media_buys_list_no_duplicates_with_packages( # Media buy should appear exactly once (not 3 times for 3 packages) count = html.count("test_mb_duplicate_check") assert count == 1, ( - f"Media buy appears {count} times in HTML (expected 1). " f"Check for joinedload() without .unique() bug." + f"Media buy appears {count} times in HTML (expected 1). Check for joinedload() without .unique() bug." ) def test_media_buys_list_shows_all_statuses( @@ -503,9 +503,9 @@ def test_workflows_list_no_duplicate_steps( # Workflows page renders successfully # Actual workflow display depends on filters/status # Just verify page contains workflow-related content - assert ( - "workflow" in html.lower() or "step" in html.lower() or "task" in html.lower() - ), "Workflows page should contain workflow-related content" + assert "workflow" in html.lower() or "step" in html.lower() or "task" in html.lower(), ( + "Workflows page should contain workflow-related content" + ) class TestAuthorizedPropertiesDataValidation: @@ -546,9 +546,7 @@ def test_authorized_properties_no_duplicates_with_tags( # Property should appear exactly once count = html.count("Test Property") - assert count == 1, ( - f"Property appears {count} times (expected 1). " f"Check for joinedload() without .unique() bug." - ) + assert count == 1, f"Property appears {count} times (expected 1). Check for joinedload() without .unique() bug." def test_authorized_properties_shows_all_properties( self, authenticated_admin_session, test_tenant_with_data, integration_db diff --git a/tests/integration/test_context_persistence.py b/tests/integration/test_context_persistence.py index 716c93bc5..ef5223f85 100644 --- a/tests/integration/test_context_persistence.py +++ b/tests/integration/test_context_persistence.py @@ -22,7 +22,6 @@ def test_simplified_context(integration_db): ctx_manager = ContextManager() try: - # Test 1: Create a simple context for async operation console.print("\n[yellow]Test 1: Creating context for async operation[/yellow]") ctx = ctx_manager.create_context( diff --git a/tests/integration/test_creative_lifecycle_mcp.py b/tests/integration/test_creative_lifecycle_mcp.py index e527b5678..4b7de18e9 100644 --- a/tests/integration/test_creative_lifecycle_mcp.py +++ b/tests/integration/test_creative_lifecycle_mcp.py @@ -155,7 +155,6 @@ def test_sync_creatives_create_new_creatives(self, mock_context, sample_creative patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Call sync_creatives tool (uses default patch=False for full upsert) response = core_sync_creatives_tool(creatives=sample_creatives, context=mock_context) @@ -238,7 +237,6 @@ def test_sync_creatives_upsert_existing_creative(self, mock_context): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Upsert with patch=False (default): full replacement response = core_sync_creatives_tool(creatives=updated_creative_data, context=mock_context) @@ -272,7 +270,6 @@ def test_sync_creatives_with_package_assignments(self, mock_context, sample_crea patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Use spec-compliant assignments dict: creative_id → package_ids response = core_sync_creatives_tool( creatives=creative_data, @@ -308,7 +305,6 @@ def test_sync_creatives_with_assignments_lookup(self, mock_context, sample_creat patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Use spec-compliant assignments dict response = core_sync_creatives_tool( creatives=creative_data, @@ -350,7 +346,6 @@ def test_sync_creatives_validation_failures(self, mock_context): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - response = core_sync_creatives_tool(creatives=invalid_creatives, context=mock_context) # Should sync valid creative but fail on invalid one @@ -398,7 +393,6 @@ def test_list_creatives_no_filters(self, mock_context): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - response = core_list_creatives_tool(context=mock_context) # Verify response structure @@ -446,7 +440,6 @@ def test_list_creatives_with_status_filter(self, mock_context): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Test approved filter response = core_list_creatives_tool(status="approved", context=mock_context) assert len(response.creatives) == 3 @@ -491,7 +484,6 @@ def test_list_creatives_with_format_filter(self, mock_context): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Test display format filter response = core_list_creatives_tool(format="display_300x250", context=mock_context) assert len(response.creatives) == 2 @@ -539,7 +531,6 @@ def test_list_creatives_with_date_filters(self, mock_context): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Test created_after filter created_after = (now - timedelta(days=5)).isoformat() response = core_list_creatives_tool(created_after=created_after, context=mock_context) @@ -588,7 +579,6 @@ def test_list_creatives_with_search(self, mock_context): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Search for "Holiday" response = core_list_creatives_tool(search="Holiday", context=mock_context) assert len(response.creatives) == 2 @@ -622,7 +612,6 @@ def test_list_creatives_pagination_and_sorting(self, mock_context): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Test first page response = core_list_creatives_tool(page=1, limit=10, context=mock_context) assert len(response.creatives) == 10 @@ -690,7 +679,6 @@ def test_list_creatives_with_media_buy_assignments(self, mock_context): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Filter by media_buy_id - should only return assigned creative response = core_list_creatives_tool(media_buy_id=self.test_media_buy_id, context=mock_context) assert len(response.creatives) == 1 @@ -707,7 +695,6 @@ def test_sync_creatives_authentication_required(self, sample_creatives): mock_context = MockContext("invalid-token") with patch("src.core.main._get_principal_id_from_context", side_effect=Exception("Invalid auth token")): - with pytest.raises(Exception) as exc_info: core_sync_creatives_tool(creatives=sample_creatives, context=mock_context) @@ -738,7 +725,6 @@ def test_sync_creatives_missing_tenant(self, mock_context, sample_creatives): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value=None), ): - with pytest.raises(Exception) as exc_info: core_sync_creatives_tool(creatives=sample_creatives, context=mock_context) @@ -751,7 +737,6 @@ def test_list_creatives_empty_results(self, mock_context): patch("src.core.main._get_principal_id_from_context", return_value=self.test_principal_id), patch("src.core.main.get_current_tenant", return_value={"tenant_id": self.test_tenant_id}), ): - # Query with filters that match nothing response = core_list_creatives_tool(status="rejected", context=mock_context) # No rejected creatives exist diff --git a/tests/integration/test_database_health_integration.py b/tests/integration/test_database_health_integration.py index 01bc816d5..77db69455 100644 --- a/tests/integration/test_database_health_integration.py +++ b/tests/integration/test_database_health_integration.py @@ -10,7 +10,6 @@ to improve test coverage and catch real bugs. """ - import pytest from src.core.database.models import Base, Product, Tenant @@ -115,9 +114,9 @@ def test_health_check_database_access_errors(self, integration_db): health = check_database_health() # Should handle error gracefully - assert ( - health["status"] == "error" - ), f"Should report error status for database connection failure, got: {health['status']}" + assert health["status"] == "error", ( + f"Should report error status for database connection failure, got: {health['status']}" + ) assert len(health["schema_issues"]) > 0, "Should report schema issues for failed connection" # Error should be descriptive diff --git a/tests/integration/test_database_integration.py b/tests/integration/test_database_integration.py index 23ae730d8..03909d266 100755 --- a/tests/integration/test_database_integration.py +++ b/tests/integration/test_database_integration.py @@ -17,9 +17,9 @@ def test_settings_queries(): """Test the actual SQL queries used in the settings page.""" - print(f"\n{'='*60}") + print(f"\n{'=' * 60}") print("TESTING REAL DATABASE QUERIES") - print(f"{'='*60}\n") + print(f"{'=' * 60}\n") print(f"Database: {DATABASE_URL.split('@')[1] if '@' in DATABASE_URL else DATABASE_URL}\n") @@ -129,9 +129,9 @@ def test_settings_queries(): return False # Summary - print(f"\n{'='*60}") + print(f"\n{'=' * 60}") print("SUMMARY") - print(f"{'='*60}") + print(f"{'=' * 60}") if errors: print(f"\n❌ {len(errors)} queries failed:") diff --git a/tests/integration/test_gam_country_adunit.py b/tests/integration/test_gam_country_adunit.py index fc3f07732..48f947aae 100644 --- a/tests/integration/test_gam_country_adunit.py +++ b/tests/integration/test_gam_country_adunit.py @@ -138,7 +138,9 @@ def test_ad_unit_breakdown(): # Test get_ad_unit_breakdown result = service.get_ad_unit_breakdown( - date_range="this_month", advertiser_id="12345", country="United States" # Filter by US + date_range="this_month", + advertiser_id="12345", + country="United States", # Filter by US ) # Assertions for test validation diff --git a/tests/integration/test_get_products_database_integration.py b/tests/integration/test_get_products_database_integration.py index ceab04c4c..cd062c226 100644 --- a/tests/integration/test_get_products_database_integration.py +++ b/tests/integration/test_get_products_database_integration.py @@ -705,12 +705,12 @@ async def test_parallel_database_isolation(self, integration_db, test_id): product_id=f"{test_id}_product_{i}", name=f"Parallel Product {test_id}_{i}", description=f"Product {i} for parallel test {test_id}", - formats=[f"display_{300+i*50}x{250+i*25}"], + formats=[f"display_{300 + i * 50}x{250 + i * 25}"], targeting_template={}, delivery_type="non_guaranteed", is_fixed_price=False, cpm=Decimal(f"{5 + i}.00"), - min_spend=Decimal(f"{1000 + i*100}.00"), + min_spend=Decimal(f"{1000 + i * 100}.00"), ) session.add(product) diff --git a/tests/integration/test_mcp_tools_audit.py b/tests/integration/test_mcp_tools_audit.py index 9bd3930d5..f49f53fc4 100644 --- a/tests/integration/test_mcp_tools_audit.py +++ b/tests/integration/test_mcp_tools_audit.py @@ -222,9 +222,9 @@ def test_media_buy_delivery_data_field_consistency(self): if field_name in internal_dict: internal_value = internal_dict[field_name] # Values should be compatible (allowing for type conversions) - assert type(external_value) is type( - internal_value - ), f"Field '{field_name}' type mismatch: {type(external_value)} vs {type(internal_value)}" + assert type(external_value) is type(internal_value), ( + f"Field '{field_name}' type mismatch: {type(external_value)} vs {type(internal_value)}" + ) else: # MediaBuyDeliveryData doesn't have model_dump_internal, so model_dump() is used # This means we need to ensure model_dump() produces reconstruction-compatible output @@ -267,7 +267,10 @@ def test_budget_nested_object_roundtrip(self): buyer_ref=f"budget_ref_{i}", status="active", totals=DeliveryTotals( - impressions=25000.0, spend=budget.total, clicks=750.0, ctr=0.03 # Use budget total as spend amount + impressions=25000.0, + spend=budget.total, + clicks=750.0, + ctr=0.03, # Use budget total as spend amount ), by_package=[ PackageDelivery( @@ -457,8 +460,8 @@ def test_testing_hooks_data_preservation(self): else: assert modified_value == original_value, f"Date value changed for '{key}'" else: - assert ( - modified_value == original_value - ), f"Value changed for '{key}': {original_value} → {modified_value}" + assert modified_value == original_value, ( + f"Value changed for '{key}': {original_value} → {modified_value}" + ) print("✅ Testing hooks preserve essential data correctly") diff --git a/tests/integration/test_notification_urls_exist.py b/tests/integration/test_notification_urls_exist.py index c5f89dc4b..64a278ef9 100644 --- a/tests/integration/test_notification_urls_exist.py +++ b/tests/integration/test_notification_urls_exist.py @@ -83,7 +83,9 @@ def test_all_slack_notification_urls_are_valid_routes(self, app_routes, slack_no # Check if route exists (exact match or as a prefix) route_exists = any( - route == flask_route or route.startswith(flask_route + "/") or + route == flask_route + or route.startswith(flask_route + "/") + or # Handle both /tenant/