Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: new ta tasks #976

Merged
merged 3 commits into from
Jan 15, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion requirements.in
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
https://github.com/codecov/test-results-parser/archive/996ecb2aaf7767bf4c2944c75835c1ee1eb2b566.tar.gz#egg=test-results-parser
https://github.com/codecov/test-results-parser/archive/190bbc8a911099749928e13d5fe57f6027ca1e74.tar.gz#egg=test-results-parser
https://github.com/codecov/shared/archive/de4b37bc5a736317c6e7c93f9c58e9ae07f8c96b.tar.gz#egg=shared
https://github.com/codecov/timestring/archive/d37ceacc5954dff3b5bd2f887936a98a668dda42.tar.gz#egg=timestring
asgiref>=3.7.2
Expand Down Expand Up @@ -37,6 +37,7 @@ pytest-celery
pytest-cov
pytest-django
pytest-freezegun
pytest-insta
pytest-mock
pytest-sqlalchemy
python-dateutil
Expand Down
6 changes: 5 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,7 @@ pytest==8.1.1
# pytest-cov
# pytest-django
# pytest-freezegun
# pytest-insta
# pytest-mock
# pytest-sqlalchemy
pytest-asyncio==0.14.0
Expand All @@ -313,6 +314,8 @@ pytest-django==4.7.0
# via -r requirements.in
pytest-freezegun==0.4.2
# via -r requirements.in
pytest-insta==0.3.0
# via -r requirements.in
pytest-mock==1.13.0
# via -r requirements.in
pytest-sqlalchemy==0.2.1
Expand Down Expand Up @@ -402,7 +405,7 @@ statsd==3.3.0
# via -r requirements.in
stripe==11.4.1
# via -r requirements.in
test-results-parser @ https://github.com/codecov/test-results-parser/archive/996ecb2aaf7767bf4c2944c75835c1ee1eb2b566.tar.gz#egg=test-results-parser
test-results-parser @ https://github.com/codecov/test-results-parser/archive/190bbc8a911099749928e13d5fe57f6027ca1e74.tar.gz#egg=test-results-parser
# via -r requirements.in
text-unidecode==1.3
# via faker
Expand Down Expand Up @@ -442,6 +445,7 @@ wcwidth==0.2.5
wrapt==1.16.0
# via
# deprecated
# pytest-insta
# vcrpy
yarl==1.9.4
# via vcrpy
Expand Down
2 changes: 2 additions & 0 deletions rollouts/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,5 @@
SHOW_IMPACT_ANALYSIS_DEPRECATION_MSG = Feature(
"show_impact_analysis_deprecation_message"
)

NEW_TA_TASKS = Feature("new_ta_tasks")
3 changes: 2 additions & 1 deletion services/processing/flake_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,9 @@ def process_flake_for_repo_commit(
):
uploads = ReportSession.objects.filter(
report__report_type=CommitReport.ReportType.TEST_RESULTS.value,
report__commit__repository__repoid=repo_id,
report__commit__commitid=commit_id,
Comment on lines +25 to 26
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe commit_id already uniquely identifies the report, so no need for an additional repository join.

state="processed",
state__in=["processed", "v2_finished"],
).all()

curr_flakes = fetch_curr_flakes(repo_id)
Expand Down
10 changes: 10 additions & 0 deletions services/test_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from database.models import (
Commit,
CommitReport,
Flake,
Repository,
RepositoryFlag,
TestInstance,
Expand Down Expand Up @@ -410,3 +411,12 @@ def should_do_flaky_detection(repo: Repository, commit_yaml: UserYaml) -> bool:
)
has_valid_plan_repo_or_owner = not_private_and_free_or_team(repo)
return has_flaky_configured and (feature_enabled or has_valid_plan_repo_or_owner)


def get_flake_set(db_session: Session, repoid: int) -> set[str]:
repo_flakes: list[Flake] = (
db_session.query(Flake.testid)
.filter(Flake.repoid == repoid, Flake.end_date.is_(None))
.all()
)
return {flake.testid for flake in repo_flakes}
6 changes: 4 additions & 2 deletions ta_storage/base.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from __future__ import annotations

from abc import ABC, abstractmethod

from test_results_parser import Testrun
import test_results_parser

from database.models.reports import Upload

Expand All @@ -15,6 +17,6 @@ def write_testruns(
branch_name: str,
upload: Upload,
framework: str | None,
testruns: list[Testrun],
testruns: list[test_results_parser.Testrun],
):
pass
6 changes: 4 additions & 2 deletions ta_storage/bq.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from __future__ import annotations

from datetime import datetime
from typing import Literal, TypedDict, cast

import test_results_parser
from shared.config import get_config
from test_results_parser import Testrun

import generated_proto.testrun.ta_testrun_pb2 as ta_testrun_pb2
from database.models.reports import Upload
Expand Down Expand Up @@ -52,7 +54,7 @@ def write_testruns(
branch_name: str,
upload: Upload,
framework: str | None,
testruns: list[Testrun],
testruns: list[test_results_parser.Testrun],
):
bq_service = get_bigquery_service()

Expand Down
23 changes: 10 additions & 13 deletions ta_storage/pg.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
from __future__ import annotations

from datetime import date, datetime
from typing import Any, Literal, TypedDict

import test_results_parser
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.orm import Session
from test_results_parser import Testrun

from database.models import (
DailyTestRollup,
Expand Down Expand Up @@ -51,7 +53,7 @@ def modify_structures(
test_instances_to_write: list[dict[str, Any]],
test_flag_bridge_data: list[dict],
daily_totals: dict[str, DailyTotals],
testrun: Testrun,
testrun: test_results_parser.Testrun,
upload: Upload,
repoid: int,
branch: str | None,
Expand Down Expand Up @@ -104,7 +106,7 @@ def modify_structures(
def generate_test_dict(
test_id: str,
repoid: int,
testrun: Testrun,
testrun: test_results_parser.Testrun,
flags_hash: str,
framework: str | None,
) -> dict[str, Any]:
Expand All @@ -123,7 +125,7 @@ def generate_test_dict(
def generate_test_instance_dict(
test_id: str,
upload: Upload,
testrun: Testrun,
testrun: test_results_parser.Testrun,
commit_sha: str,
branch: str | None,
repoid: int,
Expand All @@ -142,13 +144,11 @@ def generate_test_instance_dict(


def update_daily_totals(
daily_totals: dict,
daily_totals: dict[str, DailyTotals],
test_id: str,
duration_seconds: float | None,
outcome: Literal["pass", "failure", "error", "skip"],
):
daily_totals[test_id]["last_duration_seconds"] = duration_seconds

# logic below is a little complicated but we're basically doing:

# (old_avg * num of values used to compute old avg) + new value
Expand Down Expand Up @@ -192,8 +192,8 @@ def create_daily_totals(
daily_totals[test_id] = {
"test_id": test_id,
"repoid": repoid,
"last_duration_seconds": duration_seconds,
"avg_duration_seconds": duration_seconds,
"last_duration_seconds": duration_seconds or 0.0,
"avg_duration_seconds": duration_seconds or 0.0,
"pass_count": 1 if outcome == "pass" else 0,
"fail_count": 1 if outcome == "failure" or outcome == "error" else 0,
"skip_count": 1 if outcome == "skip" else 0,
Expand Down Expand Up @@ -290,7 +290,7 @@ def write_testruns(
branch_name: str,
upload: Upload,
framework: str | None,
testruns: list[Testrun],
testruns: list[test_results_parser.Testrun],
):
tests_to_write: dict[str, dict[str, Any]] = {}
test_instances_to_write: list[dict[str, Any]] = []
Expand Down Expand Up @@ -326,6 +326,3 @@ def write_testruns(

if len(test_instances_to_write) > 0:
save_test_instances(self.db_session, test_instances_to_write)

upload.state = "v2_persisted"
self.db_session.commit()
6 changes: 4 additions & 2 deletions ta_storage/tests/test_bq.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from __future__ import annotations

from datetime import datetime
from unittest.mock import MagicMock, patch

import pytest
from test_results_parser import Testrun
import test_results_parser

import generated_proto.testrun.ta_testrun_pb2 as ta_testrun_pb2
from database.tests.factories import RepositoryFlagFactory, UploadFactory
Expand Down Expand Up @@ -38,7 +40,7 @@ def test_bigquery_driver(dbsession, mock_bigquery_service):
upload.flags.append(repo_flag_2)
dbsession.flush()

test_data: list[Testrun] = [
test_data: list[test_results_parser.Testrun] = [
{
"name": "test_name",
"classname": "test_class",
Expand Down
2 changes: 2 additions & 0 deletions tasks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@
from tasks.sync_repo_languages_gql import sync_repo_languages_gql_task
from tasks.sync_repos import sync_repos_task
from tasks.sync_teams import sync_teams_task
from tasks.ta_finisher import ta_finisher_task
from tasks.ta_processor import ta_processor_task
from tasks.test_results_finisher import test_results_finisher_task
from tasks.test_results_processor import test_results_processor_task
from tasks.timeseries_backfill import (
Expand Down
Loading
Loading