diff --git a/src/sentry/api/urls.py b/src/sentry/api/urls.py index ad3741b3586d93..5b8b846df8b26e 100644 --- a/src/sentry/api/urls.py +++ b/src/sentry/api/urls.py @@ -525,6 +525,7 @@ ProjectRuleGroupHistoryIndexEndpoint, ) from sentry.rules.history.endpoints.project_rule_stats import ProjectRuleStatsIndexEndpoint +from sentry.seer.code_review.endpoints.code_review_local import OrganizationCodeReviewLocalEndpoint from sentry.seer.endpoints.group_ai_autofix import GroupAutofixEndpoint from sentry.seer.endpoints.group_ai_summary import GroupAiSummaryEndpoint from sentry.seer.endpoints.group_autofix_setup_check import GroupAutofixSetupCheck @@ -2398,6 +2399,11 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: OrganizationAutofixAutomationSettingsEndpoint.as_view(), name="sentry-api-0-organization-autofix-automation-settings", ), + re_path( + r"^(?P[^/]+)/code-review/local-review/$", + OrganizationCodeReviewLocalEndpoint.as_view(), + name="sentry-api-0-organization-code-review-local", + ), re_path( r"^(?P[^/]+)/seer-rpc/(?P\w+)/$", OrganizationSeerRpcEndpoint.as_view(), diff --git a/src/sentry/conf/server.py b/src/sentry/conf/server.py index 536ec50fce1012..d1bc04e6176b4b 100644 --- a/src/sentry/conf/server.py +++ b/src/sentry/conf/server.py @@ -2855,6 +2855,13 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: # For encrypting the access token for the GHE integration SEER_GHE_ENCRYPT_KEY: str | None = os.getenv("SEER_GHE_ENCRYPT_KEY") +# Code Review Local (sentry-cli review command) +CODE_REVIEW_LOCAL_ENABLED = True +CODE_REVIEW_LOCAL_TIMEOUT = 600 # 10 minutes in seconds +CODE_REVIEW_LOCAL_POLL_INTERVAL = 2 # seconds between Seer polls +CODE_REVIEW_LOCAL_USER_RATE_LIMIT = (10, 3600) # 10 per hour +CODE_REVIEW_LOCAL_ORG_RATE_LIMIT = (100, 3600) # 100 per hour + # Used to validate RPC requests from the Overwatch service OVERWATCH_RPC_SHARED_SECRET: list[str] | None = None if (val := os.environ.get("OVERWATCH_RPC_SHARED_SECRET")) is not None: diff --git a/src/sentry/features/temporary.py b/src/sentry/features/temporary.py index 7141db1235592e..12b6d864cba25e 100644 --- a/src/sentry/features/temporary.py +++ b/src/sentry/features/temporary.py @@ -68,6 +68,8 @@ def register_temporary_features(manager: FeatureManager) -> None: manager.add("organizations:detailed-data-for-seer", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable GenAI features such as Autofix and Issue Summary manager.add("organizations:autofix-seer-preferences", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable local code review for sentry-cli review command + manager.add("organizations:code-review-local", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False, default=False) # Enables Route Preloading manager.add("organizations:route-intent-preloading", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable Prevent AI code review to run per commit diff --git a/src/sentry/seer/code_review/endpoints/code_review_local.py b/src/sentry/seer/code_review/endpoints/code_review_local.py new file mode 100644 index 00000000000000..c9eff04f9b4bcd --- /dev/null +++ b/src/sentry/seer/code_review/endpoints/code_review_local.py @@ -0,0 +1,400 @@ +import logging +import time + +from django.conf import settings +from rest_framework.request import Request +from rest_framework.response import Response +from urllib3.exceptions import MaxRetryError +from urllib3.exceptions import TimeoutError as UrllibTimeoutError + +from sentry import features, ratelimits +from sentry.api.api_owners import ApiOwner +from sentry.api.api_publish_status import ApiPublishStatus +from sentry.api.base import region_silo_endpoint +from sentry.api.bases.organization import OrganizationEndpoint, OrganizationIntegrationsPermission +from sentry.models.organization import Organization +from sentry.models.repository import Repository +from sentry.seer.code_review.endpoints.serializers.code_review_local import ( + CodeReviewLocalRequestSerializer, +) +from sentry.seer.code_review_local import get_code_review_local_status, trigger_code_review_local +from sentry.utils import metrics + +logger = logging.getLogger(__name__) + + +@region_silo_endpoint +class OrganizationCodeReviewLocalEndpoint(OrganizationEndpoint): + """ + Handle local code review requests from sentry-cli. + + Synchronously polls Seer and returns results. + """ + + owner = ApiOwner.ML_AI + publish_status = { + "POST": ApiPublishStatus.PRIVATE, + } + permission_classes = (OrganizationIntegrationsPermission,) + + def post(self, request: Request, organization: Organization) -> Response: + """ + Trigger local code review for a git diff from sentry-cli. + + This endpoint: + 1. Validates the request (diff size, file count, etc.) + 2. Resolves the repository and checks permissions + 3. Triggers Seer analysis + 4. Polls Seer for completion (up to 10 minutes) + 5. Returns predictions or error + + Returns 200 with predictions on success, various error codes on failure. + """ + # Check if feature is globally enabled + if not settings.CODE_REVIEW_LOCAL_ENABLED: + return Response( + {"detail": "Local code review is not enabled"}, + status=503, + ) + + # Check feature flag + if not features.has("organizations:code-review-local", organization): + return Response( + {"detail": "Local code review is not enabled for this organization"}, + status=403, + ) + + # Rate limiting + user_key = f"code_review_local:user:{request.user.id}" + org_key = f"code_review_local:org:{organization.id}" + + user_limit, user_window = settings.CODE_REVIEW_LOCAL_USER_RATE_LIMIT + org_limit, org_window = settings.CODE_REVIEW_LOCAL_ORG_RATE_LIMIT + + if ratelimits.backend.is_limited(user_key, limit=user_limit, window=user_window): + metrics.incr("code_review_local.rate_limited", tags={"type": "user"}) + return Response( + { + "detail": f"Rate limit exceeded. Maximum {user_limit} requests per {user_window // 3600} hour(s) per user" + }, + status=429, + ) + + if ratelimits.backend.is_limited(org_key, limit=org_limit, window=org_window): + metrics.incr("code_review_local.rate_limited", tags={"type": "org"}) + return Response( + { + "detail": f"Organization rate limit exceeded. Maximum {org_limit} requests per {org_window // 3600} hour(s)" + }, + status=429, + ) + + # Validate request + serializer = CodeReviewLocalRequestSerializer(data=request.data) + if not serializer.is_valid(): + return Response({"detail": serializer.errors}, status=400) + + validated_data = serializer.validated_data + repo_data = validated_data["repository"] + diff = validated_data["diff"] + commit_message = validated_data.get("commit_message") + + # Resolve repository + # Repository names in the database are stored as "owner/name" (e.g., "getsentry/sentry") + full_repo_name = f"{repo_data['owner']}/{repo_data['name']}" + try: + repository = self._resolve_repository( + organization=organization, + repo_name=full_repo_name, + repo_provider=repo_data["provider"], + ) + except Repository.DoesNotExist: + return Response( + { + "detail": f"Repository {full_repo_name} not found. " + "Please ensure the repository is connected to Sentry via an integration." + }, + status=404, + ) + + # Log request + logger.info( + "code_review_local.request", + extra={ + "organization_id": organization.id, + "user_id": request.user.id, + "repository_id": repository.id, + "diff_size_bytes": len(diff), + }, + ) + + metrics.incr("code_review_local.request", tags={"org": organization.slug}) + + # Trigger Seer + # user.id is guaranteed to be non-None since this endpoint requires authentication + user_id = request.user.id + assert user_id is not None + user_name = request.user.username or getattr(request.user, "email", None) or str(user_id) + + try: + trigger_response = trigger_code_review_local( + repo_provider=repo_data["provider"], + repo_owner=repo_data["owner"], + repo_name=repo_data["name"], + repo_external_id=repository.external_id or "", + base_commit_sha=repo_data["base_commit_sha"], + diff=diff, + organization_id=organization.id, + organization_slug=organization.slug, + user_id=user_id, + user_name=user_name, + commit_message=commit_message, + ) + except (UrllibTimeoutError, MaxRetryError): + logger.exception( + "code_review_local.trigger.timeout", + extra={ + "organization_id": organization.id, + "user_id": request.user.id, + }, + ) + return Response( + {"detail": "Code review service is temporarily unavailable"}, status=503 + ) + except ValueError as e: + logger.exception( + "code_review_local.trigger.error", + extra={ + "organization_id": organization.id, + "user_id": request.user.id, + "error": str(e), + }, + ) + # Include the error message from Seer if available + error_msg = str(e) + if "Seer error" in error_msg: + return Response({"detail": error_msg}, status=502) + return Response({"detail": "Failed to start code review analysis"}, status=502) + except Exception as e: + # Catch-all for unexpected errors + logger.exception( + "code_review_local.trigger.unexpected_error", + extra={ + "organization_id": organization.id, + "user_id": request.user.id, + "error_type": type(e).__name__, + "error": str(e), + }, + ) + return Response( + {"detail": f"Unexpected error during code review: {type(e).__name__}"}, + status=500, + ) + + run_id = trigger_response["run_id"] + + logger.info( + "code_review_local.seer_triggered", + extra={ + "seer_run_id": run_id, + "organization_id": organization.id, + "user_id": request.user.id, + }, + ) + + # Poll for results + try: + final_response = self._poll_seer_for_results( + run_id=run_id, + timeout_seconds=settings.CODE_REVIEW_LOCAL_TIMEOUT, + poll_interval_seconds=settings.CODE_REVIEW_LOCAL_POLL_INTERVAL, + ) + except TimeoutError: + logger.exception( + "code_review_local.timeout", + extra={ + "seer_run_id": run_id, + "organization_id": organization.id, + "user_id": request.user.id, + }, + ) + metrics.incr("code_review_local.timeout") + return Response( + { + "detail": "Analysis exceeded maximum processing time (10 minutes). Please try again with a smaller diff." + }, + status=504, + ) + except ValueError as e: + # Seer returned error status + status_code, error_code, error_message = self._map_seer_error_to_response(str(e)) + logger.exception( + "code_review_local.seer_error", + extra={ + "seer_run_id": run_id, + "organization_id": organization.id, + "user_id": request.user.id, + "mapped_status": status_code, + }, + ) + metrics.incr("code_review_local.seer_error", tags={"error_code": error_code}) + return Response({"detail": error_message}, status=status_code) + + # Success + predictions = final_response.get("predictions", []) + diagnostics = final_response.get("diagnostics", {}) + + logger.info( + "code_review_local.completed", + extra={ + "seer_run_id": run_id, + "organization_id": organization.id, + "user_id": request.user.id, + "predictions_count": len(predictions), + "status": final_response.get("status"), + }, + ) + + metrics.incr("code_review_local.completed", tags={"status": "success"}) + metrics.incr("code_review_local.predictions", amount=len(predictions)) + + response_data = { + "status": final_response.get("status"), + "predictions": predictions, + "diagnostics": diagnostics, + "seer_run_id": run_id, + } + + return Response(response_data, status=200) + + def _resolve_repository( + self, organization: Organization, repo_name: str, repo_provider: str + ) -> Repository: + """ + Resolve repository by name and provider. + + Args: + organization: Organization object + repo_name: Repository name (e.g., "sentry") + repo_provider: Provider name (e.g., "github") + + Returns: + Repository object + + Raises: + Repository.DoesNotExist: If repository not found + """ + # Map simple provider names to integration provider names + # Repositories created via integrations use "integrations:github" format + provider_variants = [repo_provider] + if not repo_provider.startswith("integrations:"): + provider_variants.append(f"integrations:{repo_provider}") + + return Repository.objects.get( + organization_id=organization.id, + name=repo_name, + provider__in=provider_variants, + ) + + def _poll_seer_for_results( + self, run_id: int, timeout_seconds: int = 600, poll_interval_seconds: int = 2 + ) -> dict: + """ + Poll Seer until completion, error, or timeout. + + Args: + run_id: Seer run ID to poll + timeout_seconds: Maximum time to wait (default 10 minutes) + poll_interval_seconds: Time between polls (default 2 seconds) + + Returns: + Final response from Seer + + Raises: + TimeoutError: If timeout exceeded + ValueError: If Seer returns error status + """ + start_time = time.time() + attempt = 0 + + while True: + elapsed = time.time() - start_time + if elapsed >= timeout_seconds: + raise TimeoutError("CLI review processing exceeded timeout") + + attempt += 1 + logger.debug( + "code_review_local.polling", + extra={ + "seer_run_id": run_id, + "attempt": attempt, + "elapsed_seconds": elapsed, + }, + ) + + try: + response = get_code_review_local_status(run_id) + except (UrllibTimeoutError, MaxRetryError): + # If status check times out, wait and retry + logger.warning( + "code_review_local.poll.timeout", + extra={"seer_run_id": run_id, "attempt": attempt}, + ) + time.sleep(poll_interval_seconds) + continue + + status = response.get("status") + + if status == "completed": + return response + elif status == "errored": + error_message = response.get("error_message", "Unknown error from Seer") + raise ValueError(error_message) + elif status in ("pending", "in_progress"): + time.sleep(poll_interval_seconds) + continue + else: + raise ValueError(f"Unknown status from Seer: {status}") + + def _map_seer_error_to_response(self, seer_error_message: str) -> tuple[int, str, str]: + """ + Map Seer error messages to HTTP status codes and friendly messages. + + Args: + seer_error_message: Error message from Seer + + Returns: + Tuple of (status_code, error_code, user_message) + """ + error_lower = seer_error_message.lower() + + if "base commit not found" in error_lower or "commit not found" in error_lower: + return ( + 400, + "base_commit_not_found", + "Base commit must be pushed to the remote repository before running CLI review", + ) + + if "exceeds 500kb" in error_lower or "diff too large" in error_lower: + return ( + 400, + "diff_too_large", + "Diff exceeds the 500KB size limit. Please reduce the number of changes.", + ) + + if "exceeds 50 files" in error_lower or "too many files" in error_lower: + return ( + 400, + "too_many_files", + "Diff contains more than 50 files. Please reduce the number of files changed.", + ) + + if "failed to clone" in error_lower or "repository not accessible" in error_lower: + return ( + 502, + "repository_clone_failed", + "Unable to access the repository. Please check repository permissions.", + ) + + # Default to bad gateway for unknown Seer errors + return (502, "bad_gateway", "Code review service encountered an error") diff --git a/src/sentry/seer/code_review/endpoints/serializers/code_review_local.py b/src/sentry/seer/code_review/endpoints/serializers/code_review_local.py new file mode 100644 index 00000000000000..b1084f16b7158b --- /dev/null +++ b/src/sentry/seer/code_review/endpoints/serializers/code_review_local.py @@ -0,0 +1,40 @@ +from rest_framework import serializers + + +class RepositoryInfoSerializer(serializers.Serializer): + owner = serializers.CharField(required=True) + name = serializers.CharField(required=True) + provider = serializers.CharField(required=True) + base_commit_sha = serializers.CharField(required=True, min_length=40, max_length=40) + + def validate_base_commit_sha(self, value): + """Validate that base_commit_sha is a valid 40-character hex string""" + if not all(c in "0123456789abcdefABCDEF" for c in value): + raise serializers.ValidationError( + "base_commit_sha must be a valid 40-character hexadecimal string" + ) + return value + + +class CodeReviewLocalRequestSerializer(serializers.Serializer): + repository = RepositoryInfoSerializer(required=True) + diff = serializers.CharField(required=True, max_length=500_000) + current_branch = serializers.CharField(required=False, max_length=255) + commit_message = serializers.CharField(required=False, max_length=1000) + + def validate_diff(self, value): + """Validate diff constraints from Seer requirements""" + # Check size in bytes + size_bytes = len(value.encode("utf-8")) + if size_bytes > 500_000: + raise serializers.ValidationError("Diff exceeds 500KB limit") + + # Count files changed + file_count = value.count("diff --git") + if file_count > 50: + raise serializers.ValidationError("Diff contains too many files (max 50)") + + if file_count == 0: + raise serializers.ValidationError("Diff appears to be empty or invalid") + + return value diff --git a/src/sentry/seer/code_review_local.py b/src/sentry/seer/code_review_local.py new file mode 100644 index 00000000000000..a7890364ad26dd --- /dev/null +++ b/src/sentry/seer/code_review_local.py @@ -0,0 +1,218 @@ +import logging +from typing import Any + +from django.conf import settings +from urllib3.exceptions import MaxRetryError, TimeoutError + +from sentry.net.http import connection_from_url +from sentry.seer.signed_seer_api import make_signed_seer_api_request +from sentry.utils import json +from sentry.utils.json import JSONDecodeError + +logger = logging.getLogger(__name__) + +# Connection pool for CLI bug prediction requests +seer_cli_bug_prediction_connection_pool = connection_from_url( + settings.SEER_DEFAULT_URL, + timeout=settings.CODE_REVIEW_LOCAL_TIMEOUT, +) + + +def trigger_code_review_local( + repo_provider: str, + repo_owner: str, + repo_name: str, + repo_external_id: str, + base_commit_sha: str, + diff: str, + organization_id: int, + organization_slug: str, + user_id: int, + user_name: str, + commit_message: str | None = None, +) -> dict[str, Any]: + """ + Trigger CLI bug prediction analysis in Seer. + + Args: + repo_provider: Repository provider (e.g., "github", "gitlab") + repo_owner: Repository owner/organization + repo_name: Repository name + repo_external_id: External ID from integration + base_commit_sha: Base commit SHA (40 chars) + diff: Git diff content + organization_id: Sentry organization ID + organization_slug: Sentry organization slug + user_id: User ID making the request + user_name: Username making the request + commit_message: Optional commit message + + Returns: + dict with "run_id" and "status" keys + + Raises: + TimeoutError: If request times out + MaxRetryError: If max retries exceeded + ValueError: If response is invalid + """ + body_dict: dict[str, Any] = { + "repo": { + "provider": repo_provider, + "owner": repo_owner, + "name": repo_name, + "external_id": repo_external_id, + "base_commit_sha": base_commit_sha, + }, + "diff": diff, + "organization_id": organization_id, + "organization_slug": organization_slug, + "user_id": user_id, + "user_name": user_name, + } + + if commit_message: + body_dict["commit_message"] = commit_message + + logger.info( + "seer.cli_bug_prediction.trigger", + extra={ + "organization_id": organization_id, + "user_id": user_id, + "repo_provider": repo_provider, + "repo_external_id": repo_external_id, + "diff_size": len(diff), + }, + ) + + try: + response = make_signed_seer_api_request( + connection_pool=seer_cli_bug_prediction_connection_pool, + path="/v1/automation/codegen/cli-bug-prediction", + body=json.dumps(body_dict).encode("utf-8"), + timeout=10, # Initial trigger should be fast + ) + except (TimeoutError, MaxRetryError): + logger.exception( + "seer.cli_bug_prediction.trigger.timeout", + extra={ + "organization_id": organization_id, + "user_id": user_id, + }, + ) + raise + + if response.status >= 400: + # Try to extract error message from Seer's response + error_detail = "" + try: + error_data = json.loads(response.data) + error_detail = error_data.get("detail", error_data.get("message", str(error_data))) + except (JSONDecodeError, TypeError): + error_detail = response.data.decode("utf-8") if response.data else "Unknown error" + + logger.error( + "seer.cli_bug_prediction.trigger.error", + extra={ + "organization_id": organization_id, + "user_id": user_id, + "status_code": response.status, + "error_detail": error_detail, + }, + ) + raise ValueError(f"Seer error ({response.status}): {error_detail}") + + try: + response_data = json.loads(response.data) + except JSONDecodeError: + logger.exception( + "seer.cli_bug_prediction.trigger.invalid_response", + extra={ + "organization_id": organization_id, + "user_id": user_id, + }, + ) + raise ValueError("Invalid JSON response from Seer") + + if "run_id" not in response_data: + logger.error( + "seer.cli_bug_prediction.trigger.missing_run_id", + extra={ + "organization_id": organization_id, + "user_id": user_id, + "response_data": response_data, + }, + ) + raise ValueError("Missing run_id in Seer response") + + logger.info( + "seer.cli_bug_prediction.trigger.success", + extra={ + "organization_id": organization_id, + "user_id": user_id, + "run_id": response_data["run_id"], + }, + ) + + return response_data + + +def get_code_review_local_status(run_id: int) -> dict[str, Any]: + """ + Get the status of a CLI bug prediction run. + + Args: + run_id: The Seer run ID from trigger_code_review_local + + Returns: + dict with "status" key and optionally "predictions" and "diagnostics" + + Raises: + TimeoutError: If request times out + MaxRetryError: If max retries exceeded + ValueError: If response is invalid + """ + logger.debug("seer.cli_bug_prediction.status.check", extra={"run_id": run_id}) + + try: + # Seer status endpoint uses GET method + response = seer_cli_bug_prediction_connection_pool.urlopen( + "GET", + f"/v1/automation/codegen/cli-bug-prediction/{run_id}", + headers={"content-type": "application/json;charset=utf-8"}, + timeout=5, + ) + except (TimeoutError, MaxRetryError): + logger.exception( + "seer.cli_bug_prediction.status.timeout", + extra={"run_id": run_id}, + ) + raise + + if response.status >= 400: + logger.error( + "seer.cli_bug_prediction.status.error", + extra={ + "run_id": run_id, + "status_code": response.status, + "response_data": response.data, + }, + ) + raise ValueError(f"Seer returned error status: {response.status}") + + try: + response_data = json.loads(response.data) + except JSONDecodeError: + logger.exception( + "seer.cli_bug_prediction.status.invalid_response", + extra={"run_id": run_id}, + ) + raise ValueError("Invalid JSON response from Seer") + + if "status" not in response_data: + logger.error( + "seer.cli_bug_prediction.status.missing_status", + extra={"run_id": run_id, "response_data": response_data}, + ) + raise ValueError("Missing status in Seer response") + + return response_data diff --git a/static/app/utils/api/knownSentryApiUrls.generated.ts b/static/app/utils/api/knownSentryApiUrls.generated.ts index d46b667c1497b7..f235bec26d043b 100644 --- a/static/app/utils/api/knownSentryApiUrls.generated.ts +++ b/static/app/utils/api/knownSentryApiUrls.generated.ts @@ -112,6 +112,7 @@ export type KnownSentryApiUrls = | '/internal/mail/' | '/internal/notifications/registered-templates/' | '/internal/options/' + | '/internal/org-cell-mappings/' | '/internal/packages/' | '/internal/preprod-artifact/$headArtifactId/info/' | '/internal/preprod-artifact/batch-delete/' @@ -212,6 +213,7 @@ export type KnownSentryApiUrls = | '/organizations/$organizationIdOrSlug/code-mappings/' | '/organizations/$organizationIdOrSlug/code-mappings/$configId/' | '/organizations/$organizationIdOrSlug/code-mappings/$configId/codeowners/' + | '/organizations/$organizationIdOrSlug/code-review/local-review/' | '/organizations/$organizationIdOrSlug/codeowners-associations/' | '/organizations/$organizationIdOrSlug/combined-rules/' | '/organizations/$organizationIdOrSlug/conduit-demo/' @@ -231,8 +233,6 @@ export type KnownSentryApiUrls = | '/organizations/$organizationIdOrSlug/data-secrecy/' | '/organizations/$organizationIdOrSlug/derive-code-mappings/' | '/organizations/$organizationIdOrSlug/detector-types/' - | '/organizations/$organizationIdOrSlug/detector-workflow/' - | '/organizations/$organizationIdOrSlug/detector-workflow/$detectorWorkflowId/' | '/organizations/$organizationIdOrSlug/detectors/' | '/organizations/$organizationIdOrSlug/detectors/$detectorId/' | '/organizations/$organizationIdOrSlug/detectors/$detectorId/anomaly-data/' @@ -541,6 +541,8 @@ export type KnownSentryApiUrls = | '/organizations/$organizationIdOrSlug/sdk-deprecations/' | '/organizations/$organizationIdOrSlug/sdk-updates/' | '/organizations/$organizationIdOrSlug/sdks/' + | '/organizations/$organizationIdOrSlug/search-agent/start/' + | '/organizations/$organizationIdOrSlug/search-agent/state/$runId/' | '/organizations/$organizationIdOrSlug/search-agent/translate/' | '/organizations/$organizationIdOrSlug/searches/' | '/organizations/$organizationIdOrSlug/searches/$searchId/' diff --git a/tests/sentry/seer/code_review/endpoints/test_code_review_local.py b/tests/sentry/seer/code_review/endpoints/test_code_review_local.py new file mode 100644 index 00000000000000..8d3258bce584e6 --- /dev/null +++ b/tests/sentry/seer/code_review/endpoints/test_code_review_local.py @@ -0,0 +1,390 @@ +from unittest.mock import patch + +import pytest +from django.test import override_settings + +from sentry.testutils.cases import APITestCase +from sentry.testutils.helpers.features import with_feature + + +@pytest.mark.django_db +class OrganizationCodeReviewLocalTest(APITestCase): + endpoint = "sentry-api-0-organization-code-review-local" + method = "post" + + def setUp(self): + super().setUp() + self.organization = self.create_organization(owner=self.user) + self.project = self.create_project(organization=self.organization) + self.repository = self.create_repo( + project=self.project, + name="getsentry/test-repo", + provider="github", + external_id="12345", + ) + self.valid_payload = { + "repository": { + "owner": "getsentry", + "name": "test-repo", + "provider": "github", + "base_commit_sha": "a" * 40, + }, + "diff": "diff --git a/file.py b/file.py\n+print('hello')\n", + "current_branch": "feature/test", + "commit_message": "Add feature", + } + self.login_as(user=self.user) + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + def test_happy_path(self, mock_status, mock_trigger): + """Test successful prediction request""" + mock_trigger.return_value = {"run_id": 123, "status": "pending"} + mock_status.return_value = { + "status": "completed", + "run_id": 123, + "predictions": [ + { + "location": "file.py#L10", + "short_description": "Potential bug", + "explanation": "Detailed explanation", + "severity": "high", + "source": "code", + } + ], + "diagnostics": {"files_analyzed": 1, "execution_time_seconds": 30.0}, + } + + response = self.get_success_response( + self.organization.slug, + **self.valid_payload, + status_code=200, + ) + + assert response.data["status"] == "completed" + assert len(response.data["predictions"]) == 1 + assert response.data["predictions"][0]["location"] == "file.py#L10" + assert response.data["seer_run_id"] == 123 + assert response.data["diagnostics"]["files_analyzed"] == 1 + + def test_feature_flag_disabled(self): + """Test that request fails when feature flag is disabled""" + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=403, + ) + + assert "not enabled" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @override_settings(CODE_REVIEW_LOCAL_ENABLED=False) + def test_killswitch_disabled(self): + """Test that request fails when killswitch is disabled""" + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=503, + ) + + assert "not enabled" in response.data["detail"] + + @with_feature("organizations:code-review-local") + def test_invalid_diff_too_large(self): + """Test validation fails for diff exceeding 500KB""" + payload = self.valid_payload.copy() + payload["diff"] = "x" * 600_000 # 600KB + + response = self.get_error_response( + self.organization.slug, + **payload, + status_code=400, + ) + + assert "detail" in response.data + + @with_feature("organizations:code-review-local") + def test_invalid_diff_too_many_files(self): + """Test validation fails for diff with too many files""" + payload = self.valid_payload.copy() + # Create diff with 51 files + payload["diff"] = "\n".join([f"diff --git a/file{i}.py b/file{i}.py" for i in range(51)]) + + response = self.get_error_response( + self.organization.slug, + **payload, + status_code=400, + ) + + assert "detail" in response.data + + @with_feature("organizations:code-review-local") + def test_invalid_diff_empty(self): + """Test validation fails for empty diff""" + payload = self.valid_payload.copy() + payload["diff"] = "no diff markers here" + + response = self.get_error_response( + self.organization.slug, + **payload, + status_code=400, + ) + + assert "detail" in response.data + + @with_feature("organizations:code-review-local") + def test_invalid_commit_sha_format(self): + """Test validation fails for invalid commit SHA format""" + payload = self.valid_payload.copy() + payload["repository"]["base_commit_sha"] = "invalid_sha" + + response = self.get_error_response( + self.organization.slug, + **payload, + status_code=400, + ) + + assert "detail" in response.data + + @with_feature("organizations:code-review-local") + def test_invalid_commit_sha_length(self): + """Test validation fails for wrong length commit SHA""" + payload = self.valid_payload.copy() + payload["repository"]["base_commit_sha"] = "a" * 20 # Too short + + response = self.get_error_response( + self.organization.slug, + **payload, + status_code=400, + ) + + assert "detail" in response.data + + @with_feature("organizations:code-review-local") + def test_repository_not_found(self): + """Test error when repository not found""" + payload = self.valid_payload.copy() + payload["repository"]["name"] = "nonexistent-repo" + + response = self.get_error_response( + self.organization.slug, + **payload, + status_code=404, + ) + + assert "not found" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + def test_seer_trigger_timeout(self, mock_trigger): + """Test handling of Seer trigger timeout""" + from urllib3.exceptions import TimeoutError + + mock_trigger.side_effect = TimeoutError("Request timed out") + + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=503, + ) + + assert "unavailable" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + def test_seer_trigger_error(self, mock_trigger): + """Test handling of Seer trigger error""" + mock_trigger.side_effect = ValueError("Seer error") + + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=502, + ) + + assert "error" in response.data["detail"].lower() + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + @patch("sentry.seer.code_review.endpoints.code_review_local.time") + def test_seer_polling_timeout(self, mock_time_module, mock_status, mock_trigger): + """Test handling of polling timeout""" + mock_trigger.return_value = {"run_id": 123, "status": "pending"} + # Simulate timeout: first call returns 0 (start_time), second returns 700 (elapsed > 600) + call_count = [0] + + def fake_time(): + call_count[0] += 1 + if call_count[0] == 1: + return 0 # start_time + return 700 # elapsed check - past timeout + + mock_time_module.time.side_effect = fake_time + mock_time_module.sleep = lambda x: None # Don't actually sleep + mock_status.return_value = {"status": "in_progress"} + + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=504, + ) + + assert "exceeded maximum processing time" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + def test_seer_error_base_commit_not_found(self, mock_status, mock_trigger): + """Test mapping of base commit not found error""" + mock_trigger.return_value = {"run_id": 123, "status": "pending"} + mock_status.return_value = { + "status": "errored", + "error_message": "Base commit not found in repository", + } + + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=400, + ) + + assert "pushed to the remote" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + def test_seer_error_diff_too_large(self, mock_status, mock_trigger): + """Test mapping of diff too large error""" + mock_trigger.return_value = {"run_id": 123, "status": "pending"} + mock_status.return_value = { + "status": "errored", + "error_message": "Diff exceeds 500kb limit", + } + + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=400, + ) + + assert "500KB" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + def test_seer_error_too_many_files(self, mock_status, mock_trigger): + """Test mapping of too many files error""" + mock_trigger.return_value = {"run_id": 123, "status": "pending"} + mock_status.return_value = { + "status": "errored", + "error_message": "Diff exceeds 50 files limit", + } + + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=400, + ) + + assert "50 files" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + def test_seer_error_clone_failed(self, mock_status, mock_trigger): + """Test mapping of repository clone failed error""" + mock_trigger.return_value = {"run_id": 123, "status": "pending"} + mock_status.return_value = { + "status": "errored", + "error_message": "Failed to clone repository", + } + + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=502, + ) + + assert "permissions" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + def test_seer_error_unknown(self, mock_status, mock_trigger): + """Test mapping of unknown Seer error""" + mock_trigger.return_value = {"run_id": 123, "status": "pending"} + mock_status.return_value = { + "status": "errored", + "error_message": "Some unknown error", + } + + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=502, + ) + + assert "error" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @patch("sentry.ratelimits.backend.is_limited") + def test_rate_limit_user(self, mock_is_limited): + """Test user rate limiting""" + mock_is_limited.side_effect = [True, False] # User limited, org not + + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=429, + ) + + assert "Rate limit exceeded" in response.data["detail"] + assert "per user" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @patch("sentry.ratelimits.backend.is_limited") + def test_rate_limit_org(self, mock_is_limited): + """Test organization rate limiting""" + mock_is_limited.side_effect = [False, True] # User not limited, org limited + + response = self.get_error_response( + self.organization.slug, + **self.valid_payload, + status_code=429, + ) + + assert "Organization rate limit exceeded" in response.data["detail"] + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + def test_optional_fields(self, mock_status, mock_trigger): + """Test that optional fields are not required""" + mock_trigger.return_value = {"run_id": 123, "status": "pending"} + mock_status.return_value = { + "status": "completed", + "predictions": [], + "diagnostics": {}, + } + + # Remove optional fields + payload = { + "repository": { + "owner": "getsentry", + "name": "test-repo", + "provider": "github", + "base_commit_sha": "a" * 40, + }, + "diff": "diff --git a/file.py b/file.py\n+print('hello')\n", + # No current_branch or commit_message + } + + response = self.get_success_response( + self.organization.slug, + **payload, + status_code=200, + ) + + assert response.data["status"] == "completed" diff --git a/tests/sentry/seer/code_review/test_code_review_local_integration.py b/tests/sentry/seer/code_review/test_code_review_local_integration.py new file mode 100644 index 00000000000000..c53723289cca24 --- /dev/null +++ b/tests/sentry/seer/code_review/test_code_review_local_integration.py @@ -0,0 +1,307 @@ +from unittest.mock import patch + +import pytest + +from sentry.testutils.cases import APITestCase +from sentry.testutils.helpers.features import with_feature + + +@pytest.mark.django_db +class CodeReviewLocalIntegrationTest(APITestCase): + """ + Integration tests for CLI bug prediction end-to-end flow. + + These tests verify the full request-response cycle including polling logic. + """ + + endpoint = "sentry-api-0-organization-code-review-local" + method = "post" + + def setUp(self): + super().setUp() + self.organization = self.create_organization(owner=self.user) + self.project = self.create_project(organization=self.organization) + self.repository = self.create_repo( + project=self.project, + name="getsentry/test-repo", + provider="github", + external_id="12345", + ) + self.valid_payload = { + "repository": { + "owner": "getsentry", + "name": "test-repo", + "provider": "github", + "base_commit_sha": "a" * 40, + }, + "diff": "diff --git a/file.py b/file.py\n+print('hello')\n", + } + self.login_as(user=self.user) + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + def test_end_to_end_single_poll(self, mock_status, mock_trigger): + """Test end-to-end flow with immediate completion""" + mock_trigger.return_value = {"run_id": 123, "status": "pending"} + # First poll returns completed + mock_status.return_value = { + "status": "completed", + "run_id": 123, + "predictions": [ + { + "location": "file.py#L10", + "short_description": "Bug found", + "explanation": "Details", + "severity": "medium", + "source": "code", + } + ], + "diagnostics": {"files_analyzed": 1, "execution_time_seconds": 15.0}, + } + + response = self.get_success_response( + self.organization.slug, + **self.valid_payload, + status_code=200, + ) + + # Verify trigger was called + assert mock_trigger.call_count == 1 + # Verify status was checked once + assert mock_status.call_count == 1 + # Verify response contains predictions + assert response.data["status"] == "completed" + assert len(response.data["predictions"]) == 1 + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + @patch("time.sleep") + def test_end_to_end_multiple_polls(self, mock_sleep, mock_status, mock_trigger): + """Test end-to-end flow with multiple polling cycles""" + mock_trigger.return_value = {"run_id": 456, "status": "pending"} + + # Simulate state transitions: pending -> in_progress -> completed + mock_status.side_effect = [ + {"status": "pending", "run_id": 456}, + {"status": "in_progress", "run_id": 456}, + {"status": "in_progress", "run_id": 456}, + { + "status": "completed", + "run_id": 456, + "predictions": [ + { + "location": "test.py#L5", + "short_description": "Issue detected", + "explanation": "Full explanation", + "severity": "high", + "source": "ml", + } + ], + "diagnostics": {"files_analyzed": 3, "execution_time_seconds": 120.0}, + }, + ] + + response = self.get_success_response( + self.organization.slug, + **self.valid_payload, + status_code=200, + ) + + # Verify trigger was called once + assert mock_trigger.call_count == 1 + # Verify status was checked 4 times (3 pending/in_progress, 1 completed) + assert mock_status.call_count == 4 + # Verify sleep was called between polls (3 times for 4 polls) + assert mock_sleep.call_count == 3 + # Verify final response + assert response.data["status"] == "completed" + assert response.data["predictions"][0]["severity"] == "high" + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + @patch("time.sleep") + def test_status_check_network_error_recovery(self, mock_sleep, mock_status, mock_trigger): + """Test that network errors during status check are retried""" + from urllib3.exceptions import TimeoutError + + mock_trigger.return_value = {"run_id": 789, "status": "pending"} + + # First status check times out, second succeeds + mock_status.side_effect = [ + TimeoutError("Network timeout"), + { + "status": "completed", + "run_id": 789, + "predictions": [], + "diagnostics": {}, + }, + ] + + response = self.get_success_response( + self.organization.slug, + **self.valid_payload, + status_code=200, + ) + + # Verify status was called twice (once failed, once succeeded) + assert mock_status.call_count == 2 + # Verify we still got a successful response + assert response.data["status"] == "completed" + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + def test_empty_predictions_response(self, mock_status, mock_trigger): + """Test handling of completed status with no predictions""" + mock_trigger.return_value = {"run_id": 999, "status": "pending"} + mock_status.return_value = { + "status": "completed", + "run_id": 999, + "predictions": [], # No bugs found + "diagnostics": {"files_analyzed": 5, "execution_time_seconds": 30.0}, + } + + response = self.get_success_response( + self.organization.slug, + **self.valid_payload, + status_code=200, + ) + + assert response.data["status"] == "completed" + assert response.data["predictions"] == [] + assert response.data["diagnostics"]["files_analyzed"] == 5 + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + @patch("time.sleep") + def test_multiple_predictions(self, mock_sleep, mock_status, mock_trigger): + """Test handling of multiple predictions in response""" + mock_trigger.return_value = {"run_id": 111, "status": "pending"} + mock_status.return_value = { + "status": "completed", + "run_id": 111, + "predictions": [ + { + "location": "file1.py#L10", + "short_description": "Bug 1", + "explanation": "First bug", + "severity": "high", + "source": "code", + }, + { + "location": "file2.py#L20", + "short_description": "Bug 2", + "explanation": "Second bug", + "severity": "medium", + "source": "ml", + }, + { + "location": "file3.py#L30", + "short_description": "Bug 3", + "explanation": "Third bug", + "severity": "low", + "source": "code", + }, + ], + "diagnostics": {"files_analyzed": 3, "execution_time_seconds": 90.0}, + } + + response = self.get_success_response( + self.organization.slug, + **self.valid_payload, + status_code=200, + ) + + assert len(response.data["predictions"]) == 3 + assert response.data["predictions"][0]["severity"] == "high" + assert response.data["predictions"][1]["severity"] == "medium" + assert response.data["predictions"][2]["severity"] == "low" + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + @patch("time.sleep") + def test_seer_state_transition_pending_to_completed( + self, mock_sleep, mock_status, mock_trigger + ): + """Test state transition from pending directly to completed""" + mock_trigger.return_value = {"run_id": 222, "status": "pending"} + mock_status.side_effect = [ + {"status": "pending", "run_id": 222}, + { + "status": "completed", + "run_id": 222, + "predictions": [], + "diagnostics": {}, + }, + ] + + response = self.get_success_response( + self.organization.slug, + **self.valid_payload, + status_code=200, + ) + + assert mock_status.call_count == 2 + assert response.data["status"] == "completed" + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + @patch("time.sleep") + def test_seer_state_transition_with_in_progress(self, mock_sleep, mock_status, mock_trigger): + """Test state transition: pending -> in_progress -> completed""" + mock_trigger.return_value = {"run_id": 333, "status": "pending"} + mock_status.side_effect = [ + {"status": "pending", "run_id": 333}, + {"status": "in_progress", "run_id": 333}, + { + "status": "completed", + "run_id": 333, + "predictions": [], + "diagnostics": {}, + }, + ] + + response = self.get_success_response( + self.organization.slug, + **self.valid_payload, + status_code=200, + ) + + assert mock_status.call_count == 3 + assert response.data["status"] == "completed" + + @with_feature("organizations:code-review-local") + @patch("sentry.seer.code_review.endpoints.code_review_local.trigger_code_review_local") + @patch("sentry.seer.code_review.endpoints.code_review_local.get_code_review_local_status") + def test_diagnostics_included_in_response(self, mock_status, mock_trigger): + """Test that diagnostics are properly included in response""" + mock_trigger.return_value = {"run_id": 444, "status": "pending"} + mock_status.return_value = { + "status": "completed", + "run_id": 444, + "predictions": [], + "diagnostics": { + "files_analyzed": 10, + "execution_time_seconds": 145.5, + "total_lines_analyzed": 5000, + "model_version": "v2.0", + }, + } + + response = self.get_success_response( + self.organization.slug, + **self.valid_payload, + status_code=200, + ) + + diagnostics = response.data["diagnostics"] + assert diagnostics["files_analyzed"] == 10 + assert diagnostics["execution_time_seconds"] == 145.5 + assert diagnostics["total_lines_analyzed"] == 5000 + assert diagnostics["model_version"] == "v2.0" diff --git a/tests/sentry/seer/test_code_review_local.py b/tests/sentry/seer/test_code_review_local.py new file mode 100644 index 00000000000000..9277f0c7e89b09 --- /dev/null +++ b/tests/sentry/seer/test_code_review_local.py @@ -0,0 +1,334 @@ +from unittest.mock import Mock, patch + +import pytest +from urllib3.exceptions import MaxRetryError, TimeoutError + +from sentry.seer.code_review_local import get_code_review_local_status, trigger_code_review_local +from sentry.utils import json + + +@pytest.fixture +def mock_connection_pool(): + """Create a mock connection pool for testing""" + mock = Mock() + mock.host = "localhost" + mock.port = 9091 + mock.scheme = "http" + return mock + + +@pytest.mark.django_db +class TestTriggerCodeReviewLocal: + def test_trigger_success(self, mock_connection_pool): + """Test successful trigger of CLI bug prediction""" + # Mock successful response + mock_response = Mock() + mock_response.status = 200 + mock_response.data = json.dumps({"run_id": 123, "status": "pending"}).encode("utf-8") + mock_connection_pool.urlopen.return_value = mock_response + + with patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ): + result = trigger_code_review_local( + repo_provider="github", + repo_owner="getsentry", + repo_name="sentry", + repo_external_id="123456", + base_commit_sha="a" * 40, + diff="diff --git a/file.py b/file.py\n...", + organization_id=1, + organization_slug="test-org", + user_id=1, + user_name="test-user", + ) + + assert result["run_id"] == 123 + assert result["status"] == "pending" + + def test_trigger_with_commit_message(self, mock_connection_pool): + """Test trigger with optional commit message""" + mock_response = Mock() + mock_response.status = 200 + mock_response.data = json.dumps({"run_id": 456, "status": "pending"}).encode("utf-8") + mock_connection_pool.urlopen.return_value = mock_response + + with patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ): + result = trigger_code_review_local( + repo_provider="github", + repo_owner="getsentry", + repo_name="sentry", + repo_external_id="123456", + base_commit_sha="b" * 40, + diff="diff --git a/file.py b/file.py\n...", + organization_id=1, + organization_slug="test-org", + user_id=1, + user_name="test-user", + commit_message="Fix bug", + ) + + assert result["run_id"] == 456 + + def test_trigger_timeout(self, mock_connection_pool): + """Test timeout handling""" + mock_connection_pool.urlopen.side_effect = TimeoutError("Request timed out") + + with ( + patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ), + pytest.raises(TimeoutError), + ): + trigger_code_review_local( + repo_provider="github", + repo_owner="getsentry", + repo_name="sentry", + repo_external_id="123456", + base_commit_sha="c" * 40, + diff="diff --git a/file.py b/file.py\n...", + organization_id=1, + organization_slug="test-org", + user_id=1, + user_name="test-user", + ) + + def test_trigger_max_retry_error(self, mock_connection_pool): + """Test max retry error handling""" + mock_connection_pool.urlopen.side_effect = MaxRetryError( + pool=mock_connection_pool, url="/test" + ) + + with ( + patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ), + pytest.raises(MaxRetryError), + ): + trigger_code_review_local( + repo_provider="github", + repo_owner="getsentry", + repo_name="sentry", + repo_external_id="123456", + base_commit_sha="d" * 40, + diff="diff --git a/file.py b/file.py\n...", + organization_id=1, + organization_slug="test-org", + user_id=1, + user_name="test-user", + ) + + def test_trigger_error_response(self, mock_connection_pool): + """Test handling of error status codes""" + mock_response = Mock() + mock_response.status = 500 + mock_response.data = b"Internal server error" + mock_connection_pool.urlopen.return_value = mock_response + + with ( + patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ), + pytest.raises(ValueError, match="Seer error \\(500\\): Internal server error"), + ): + trigger_code_review_local( + repo_provider="github", + repo_owner="getsentry", + repo_name="sentry", + repo_external_id="123456", + base_commit_sha="e" * 40, + diff="diff --git a/file.py b/file.py\n...", + organization_id=1, + organization_slug="test-org", + user_id=1, + user_name="test-user", + ) + + def test_trigger_invalid_json_response(self, mock_connection_pool): + """Test handling of invalid JSON in response""" + mock_response = Mock() + mock_response.status = 200 + mock_response.data = b"not valid json" + mock_connection_pool.urlopen.return_value = mock_response + + with ( + patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ), + pytest.raises(ValueError, match="Invalid JSON response from Seer"), + ): + trigger_code_review_local( + repo_provider="github", + repo_owner="getsentry", + repo_name="sentry", + repo_external_id="123456", + base_commit_sha="f" * 40, + diff="diff --git a/file.py b/file.py\n...", + organization_id=1, + organization_slug="test-org", + user_id=1, + user_name="test-user", + ) + + def test_trigger_missing_run_id(self, mock_connection_pool): + """Test handling of response missing run_id""" + mock_response = Mock() + mock_response.status = 200 + mock_response.data = json.dumps({"status": "pending"}).encode("utf-8") + mock_connection_pool.urlopen.return_value = mock_response + + with ( + patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ), + pytest.raises(ValueError, match="Missing run_id in Seer response"), + ): + trigger_code_review_local( + repo_provider="github", + repo_owner="getsentry", + repo_name="sentry", + repo_external_id="123456", + base_commit_sha="0" * 40, + diff="diff --git a/file.py b/file.py\n...", + organization_id=1, + organization_slug="test-org", + user_id=1, + user_name="test-user", + ) + + +@pytest.mark.django_db +class TestGetCodeReviewLocalStatus: + def test_status_pending(self, mock_connection_pool): + """Test getting pending status""" + mock_response = Mock() + mock_response.status = 200 + mock_response.data = json.dumps({"status": "pending", "run_id": 123}).encode("utf-8") + mock_connection_pool.urlopen.return_value = mock_response + + with patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ): + result = get_code_review_local_status(run_id=123) + + assert result["status"] == "pending" + assert result["run_id"] == 123 + + def test_status_completed_with_predictions(self, mock_connection_pool): + """Test getting completed status with predictions""" + mock_response = Mock() + mock_response.status = 200 + mock_response.data = json.dumps( + { + "status": "completed", + "run_id": 123, + "predictions": [ + { + "location": "file.py#L10", + "short_description": "Potential bug", + "explanation": "...", + "severity": "high", + "source": "code", + } + ], + "diagnostics": {"files_analyzed": 3, "execution_time_seconds": 45.2}, + } + ).encode("utf-8") + mock_connection_pool.urlopen.return_value = mock_response + + with patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ): + result = get_code_review_local_status(run_id=123) + + assert result["status"] == "completed" + assert len(result["predictions"]) == 1 + assert result["predictions"][0]["location"] == "file.py#L10" + assert result["diagnostics"]["files_analyzed"] == 3 + + def test_status_timeout(self, mock_connection_pool): + """Test timeout handling for status check""" + mock_connection_pool.urlopen.side_effect = TimeoutError("Request timed out") + + with ( + patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ), + pytest.raises(TimeoutError), + ): + get_code_review_local_status(run_id=123) + + def test_status_max_retry_error(self, mock_connection_pool): + """Test max retry error handling for status check""" + mock_connection_pool.urlopen.side_effect = MaxRetryError( + pool=mock_connection_pool, url="/test" + ) + + with ( + patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ), + pytest.raises(MaxRetryError), + ): + get_code_review_local_status(run_id=123) + + def test_status_error_response(self, mock_connection_pool): + """Test handling of error status codes for status check""" + mock_response = Mock() + mock_response.status = 404 + mock_response.data = b"Not found" + mock_connection_pool.urlopen.return_value = mock_response + + with ( + patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ), + pytest.raises(ValueError, match="Seer returned error status: 404"), + ): + get_code_review_local_status(run_id=123) + + def test_status_invalid_json_response(self, mock_connection_pool): + """Test handling of invalid JSON in status response""" + mock_response = Mock() + mock_response.status = 200 + mock_response.data = b"not valid json" + mock_connection_pool.urlopen.return_value = mock_response + + with ( + patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ), + pytest.raises(ValueError, match="Invalid JSON response from Seer"), + ): + get_code_review_local_status(run_id=123) + + def test_status_missing_status_field(self, mock_connection_pool): + """Test handling of response missing status field""" + mock_response = Mock() + mock_response.status = 200 + mock_response.data = json.dumps({"run_id": 123}).encode("utf-8") + mock_connection_pool.urlopen.return_value = mock_response + + with ( + patch( + "sentry.seer.code_review_local.seer_cli_bug_prediction_connection_pool", + mock_connection_pool, + ), + pytest.raises(ValueError, match="Missing status in Seer response"), + ): + get_code_review_local_status(run_id=123)