Skip to content

Commit

Permalink
Add dry-run to labelanalysis
Browse files Browse the repository at this point in the history
This change comes as a request of sentry infra team to increaase our chances of merging ATS into getsentry initially.
The `--dry-run` option simply stops label analysis after collecting labels and prints them in the terminal. Yes, there are
other ways that you can achieve the same result (e.g. custom runner), but this is more straight forward.
It might be marginally useful when setting up or building confidence of label analysis for customers interested in the feature,
so why not?

I decided to add a brief explanation to what each label group is, since we have that opportunity here.
  • Loading branch information
giovanni-guidini committed Jun 22, 2023
1 parent de3cb3c commit bad7bc9
Show file tree
Hide file tree
Showing 2 changed files with 130 additions and 8 deletions.
49 changes: 41 additions & 8 deletions codecov_cli/commands/labelanalysis.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import logging
import time
from typing import List
Expand Down Expand Up @@ -47,6 +48,12 @@
default=None,
type=int,
)
@click.option(
"--dry-run",
"dry_run",
help="Userful during setup. This will run the label analysis, but will print the result to stdout and terminate instead of calling the runner.process_labelanalysis_result",
is_flag=True,
)
@click.pass_context
def label_analysis(
ctx: click.Context,
Expand All @@ -55,6 +62,7 @@ def label_analysis(
base_commit_sha: str,
runner_name: str,
max_wait_time: str,
dry_run: bool,
):
enterprise_url = ctx.obj.get("enterprise_url")
logger.debug(
Expand Down Expand Up @@ -115,7 +123,7 @@ def label_analysis(
"Sorry. Codecov is having problems",
extra=dict(extra_log_attributes=dict(status_code=response.status_code)),
)
_fallback_to_collected_labels(requested_labels, runner)
_fallback_to_collected_labels(requested_labels, runner, dry_run=dry_run)
return
if response.status_code >= 400:
logger.warning(
Expand Down Expand Up @@ -143,33 +151,55 @@ def label_analysis(
)
resp_json = resp_data.json()
if resp_json["state"] == "finished":
runner.process_labelanalysis_result(
LabelAnalysisRequestResult(resp_data.json()["result"])
)
if not dry_run:
runner.process_labelanalysis_result(
LabelAnalysisRequestResult(resp_data.json()["result"])
)
else:
_dry_run_output(LabelAnalysisRequestResult(resp_data.json()["result"]))
return
if resp_json["state"] == "error":
logger.error(
"Request had problems calculating",
extra=dict(extra_log_attributes=dict(resp_json=resp_json)),
)
_fallback_to_collected_labels(
collected_labels=requested_labels, runner=runner
collected_labels=requested_labels, runner=runner, dry_run=dry_run
)
return
if max_wait_time and (time.monotonic() - start_wait) > max_wait_time:
logger.error(
f"Exceeded max waiting time of {max_wait_time} seconds",
)
_fallback_to_collected_labels(
collected_labels=requested_labels, runner=runner
collected_labels=requested_labels, runner=runner, dry_run=dry_run
)
return
logger.info("Waiting more time for result")
time.sleep(5)


def _dry_run_output(result: LabelAnalysisRequestResult):
logger.info(
"Not executing tests because '--dry-run' is on. List of labels selected for running below."
)
logger.info("")
logger.info("Label groups:")
logger.info(
"- absent_labels: Set of new labels found in HEAD that are not present in BASE"
)
logger.info("- present_diff_labels: Set of labels affected by the git diff")
logger.info("- global_level_labels: Set of labels that possibly touch global code")
logger.info("- present_report_labels: Set of labels previously uploaded")
logger.info("")
logger.info(json.dumps(result))


def _fallback_to_collected_labels(
collected_labels: List[str], runner: LabelAnalysisRunnerInterface
collected_labels: List[str],
runner: LabelAnalysisRunnerInterface,
*,
dry_run: bool = False,
) -> dict:
logger.info("Trying to fallback on collected labels")
if collected_labels:
Expand All @@ -182,6 +212,9 @@ def _fallback_to_collected_labels(
"global_level_labels": [],
}
)
return runner.process_labelanalysis_result(fake_response)
if not dry_run:
return runner.process_labelanalysis_result(fake_response)
else:
return _dry_run_output(LabelAnalysisRequestResult(fake_response))
logger.error("Cannot fallback to collected labels because no labels were collected")
raise click.ClickException("Failed to get list of labels to run")
89 changes: 89 additions & 0 deletions tests/commands/test_invoke_labelanalysis.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import json

import pytest
import responses
from click.testing import CliRunner
Expand Down Expand Up @@ -64,6 +66,10 @@ def test_labelanalysis_help(self, mocker, fake_ci_provider):
" --max-wait-time INTEGER Max time (in seconds) to wait for the label",
" analysis result before falling back to running",
" all tests. Default is to wait forever.",
" --dry-run Userful during setup. This will run the label",
" analysis, but will print the result to stdout",
" and terminate instead of calling the",
" runner.process_labelanalysis_result",
" --help Show this message and exit.",
"",
]
Expand Down Expand Up @@ -145,6 +151,49 @@ def test_invoke_label_analysis(self, get_labelanalysis_deps, mocker):
print(result.output)
assert result.exit_code == 0

def test_invoke_label_analysis_dry_run(self, get_labelanalysis_deps, mocker):
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
fake_runner = get_labelanalysis_deps["fake_runner"]

label_analysis_result = {
"present_report_labels": ["test_present"],
"absent_labels": ["test_absent"],
"present_diff_labels": ["test_in_diff"],
"global_level_labels": ["test_global"],
}

with responses.RequestsMock() as rsps:
rsps.add(
responses.POST,
"https://api.codecov.io/labels/labels-analysis",
json={"external_id": "label-analysis-request-id"},
status=201,
match=[
matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
],
)
rsps.add(
responses.GET,
"https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
json={"state": "finished", "result": label_analysis_result},
)
cli_runner = CliRunner()
result = cli_runner.invoke(
cli,
[
"label-analysis",
"--token=STATIC_TOKEN",
"--base-sha=BASE_SHA",
"--dry-run",
],
obj={},
)
mock_get_runner.assert_called()
fake_runner.process_labelanalysis_result.assert_not_called()
assert result.exit_code == 0
print(result.output)
assert json.dumps(label_analysis_result) in result.output

def test_fallback_to_collected_labels(self, mocker):
mock_runner = mocker.MagicMock()
collected_labels = ["label_1", "label_2", "label_3"]
Expand Down Expand Up @@ -199,6 +248,46 @@ def test_fallback_collected_labels_covecov_500_error(
print(result.output)
assert result.exit_code == 0

def test_fallback_dry_run(self, get_labelanalysis_deps, mocker):
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
fake_runner = get_labelanalysis_deps["fake_runner"]
collected_labels = get_labelanalysis_deps["collected_labels"]
mock_dry_run = mocker.patch(
"codecov_cli.commands.labelanalysis._dry_run_output"
)
with responses.RequestsMock() as rsps:
rsps.add(
responses.POST,
"https://api.codecov.io/labels/labels-analysis",
status=500,
match=[
matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
],
)
cli_runner = CliRunner()
result = cli_runner.invoke(
cli,
[
"-v",
"label-analysis",
"--token=STATIC_TOKEN",
"--base-sha=BASE_SHA",
"--dry-run",
],
obj={},
)
mock_get_runner.assert_called()
fake_runner.process_labelanalysis_result.assert_not_called()
mock_dry_run.assert_called_with(
{
"present_report_labels": [],
"absent_labels": collected_labels,
"present_diff_labels": [],
"global_level_labels": [],
}
)
assert result.exit_code == 0

def test_fallback_collected_labels_codecov_error_processing_label_analysis(
self, get_labelanalysis_deps, mocker
):
Expand Down

0 comments on commit bad7bc9

Please sign in to comment.