Skip to content

Commit

Permalink
Merge pull request #20 from ESA-APEx/apex5-s3-upload
Browse files Browse the repository at this point in the history
Benchmarks: add pytest plugin to automatically upload generated assets to S3
  • Loading branch information
soxofaan authored Jul 26, 2024
2 parents 1f54af2 + f673cc9 commit c0cd18b
Show file tree
Hide file tree
Showing 11 changed files with 436 additions and 35 deletions.
17 changes: 16 additions & 1 deletion .github/workflows/benchmarks.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,28 @@ jobs:
run: |
cd qa/benchmarks
mkdir report
mkdir tmp_path_root
pytest \
-vv \
--log-cli-level=INFO \
--random-subset=1 \
--html report/report.html --self-contained-html \
--track-metrics-report=report/metrics.json
--track-metrics-report=report/metrics.json \
--basetemp=tmp_path_root \
--upload-assets-run-id="gh-$GITHUB_RUN_ID" \
--upload-assets-endpoint-url="https://s3.waw3-1.cloudferro.com" \
--upload-assets-bucket="APEx-benchmarks"
env:
OPENEO_AUTH_METHOD: client_credentials
OPENEO_AUTH_CLIENT_CREDENTIALS_CDSEFED: ${{ secrets.OPENEO_AUTH_CLIENT_CREDENTIALS_CDSEFED }}
UPLOAD_ASSETS_ACCESS_KEY_ID: ${{ secrets.UPLOAD_ASSETS_ACCESS_KEY_ID }}
UPLOAD_ASSETS_SECRET_ACCESS_KEY: ${{ secrets.UPLOAD_ASSETS_SECRET_ACCESS_KEY }}
- name: List local reports
if: always()
run: ls -alR qa/benchmarks/report
- name: List local results
if: always()
run: ls -alR qa/benchmarks/tmp_path_root
- name: upload report
uses: actions/upload-artifact@v4
if: always()
Expand Down
1 change: 1 addition & 0 deletions qa/benchmarks/.gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
report/
tmp_path_root/
38 changes: 33 additions & 5 deletions qa/benchmarks/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

pytest_plugins = [
"apex_algorithm_qa_tools.pytest_track_metrics",
"apex_algorithm_qa_tools.pytest_upload_assets",
]


Expand All @@ -24,21 +25,48 @@ def pytest_addoption(parser):
type=int,
help="Only run random selected subset benchmarks.",
)
parser.addoption(
"--dummy",
action="store_true",
help="Toggle to only run dummy benchmarks/tests (instead of skipping them)",
)


def pytest_collection_modifyitems(session, config, items):
def pytest_ignore_collect(collection_path, config):
"""
Pytest plugin to select a random subset of benchmarks to run.
Pytest hook to ignore certain directories/files during test collection.
"""
# Note: there as some subtleties about the return values of this hook,
# which makes the logic slightly more complex than a naive approach would suggest:
# - `True` means to ignore the path,
# - `False` means to forcefully include it regardless of other plugins,
# - `None` means to keep it for now, but allow other plugins to still ignore.
dummy_mode = bool(config.getoption("--dummy"))
is_dummy_path = bool("dummy" in collection_path.name)
if dummy_mode and not is_dummy_path:
return True
elif not dummy_mode and is_dummy_path:
return True
else:
return None

based on https://alexwlchan.net/til/2024/run-random-subset-of-tests-in-pytest/

@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(session, config, items):
"""
Pytest hook to filter/reorder collected test items.
"""
# Optionally, select a random subset of benchmarks to run.
# based on https://alexwlchan.net/til/2024/run-random-subset-of-tests-in-pytest/
# Note that with current pytest versions the collection/summary stats might be messed up,
# see https://github.com/pytest-dev/pytest/issues/12663
subset_size = config.getoption("--random-subset")

if subset_size >= 0:
_log.warning(
f"Selecting random subset of {subset_size} from {len(items)} benchmarks."
)
items[:] = random.sample(items, k=subset_size)
if subset_size < len(items):
items[:] = random.sample(items, k=subset_size)


def _get_client_credentials_env_var(url: str) -> str:
Expand Down
14 changes: 11 additions & 3 deletions qa/benchmarks/tests/test_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,11 @@
],
)
def test_run_benchmark(
scenario: BenchmarkScenario, connection_factory, tmp_path: Path, track_metric
scenario: BenchmarkScenario,
connection_factory,
tmp_path: Path,
track_metric,
upload_assets_on_fail,
):
connection: openeo.Connection = connection_factory(url=scenario.backend)

Expand All @@ -39,8 +43,12 @@ def test_run_benchmark(

# Download actual results
actual_dir = tmp_path / "actual"
job.get_results().download_files(target=actual_dir, include_stac_metadata=True)
# TODO: upload actual results to somewhere?
paths = job.get_results().download_files(
target=actual_dir, include_stac_metadata=True
)

# Upload assets on failure
upload_assets_on_fail(*paths)

# Compare actual results with reference data
reference_dir = download_reference_data(
Expand Down
46 changes: 46 additions & 0 deletions qa/benchmarks/tests/test_dummy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
"""
Dummy tests to to help with tooling development.
Note that this test module will be skipped by default.
Use the `--dummy` runtime option to run these tests
and skip all other non-dummy tests.
"""

import pytest


@pytest.mark.parametrize("y", [4, 5, 6])
def test_tracking(track_metric, y):
x = 3
track_metric("x squared", x * x)
track_metric("y", y)
assert x + y == 8


def test_simple_success():
x = 3
assert x + 5 == 8


def test_simple_fail():
x = 3
assert x + 5 == "eight"


def test_produce_files_success(tmp_path):
path = tmp_path / "hello.txt"
path.write_text("Hello, world.\n")


def test_produce_files_fail(tmp_path):
path = tmp_path / "hello.txt"
path.write_text("Hello, world.\n")
assert 1 == 2


@pytest.mark.parametrize("x", [3, 5])
def test_upload_assets(tmp_path, upload_assets_on_fail, x):
path = tmp_path / "hello.txt"
path.write_text("Hello, world.\n")
upload_assets_on_fail(path)
assert x == 5
38 changes: 15 additions & 23 deletions qa/tools/apex_algorithm_qa_tools/pytest_track_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,39 +31,30 @@ def test_dummy(track_metric):

import pytest

_TRACK_METRICS_PATH = "track_metrics_path"
_TRACK_METRICS_NAME = "track_metrics"
_TRACK_METRICS_PLUGIN_NAME = "track_metrics"


def pytest_addoption(parser):
def pytest_addoption(parser: pytest.Parser):
parser.addoption(
"--track-metrics-report",
metavar="PATH",
action="store",
dest=_TRACK_METRICS_PATH,
default=None,
help="Path to JSON file to store test/benchmark metrics.",
)


def pytest_configure(config):
track_metrics_path = config.getoption(_TRACK_METRICS_PATH)
if (
track_metrics_path
# Don't register on xdist worker nodes
and not hasattr(config, "workerinput")
):
if hasattr(config, "workerinput"):
warnings.warn("`track_metrics` plugin is not supported on xdist worker nodes.")
return

track_metrics_path = config.getoption("track_metrics_report")
if track_metrics_path:
config.pluginmanager.register(
TrackMetricsReporter(path=track_metrics_path),
name=_TRACK_METRICS_NAME,
name=_TRACK_METRICS_PLUGIN_NAME,
)


def pytest_unconfigure(config):
if config.pluginmanager.hasplugin(_TRACK_METRICS_NAME):
config.pluginmanager.unregister(name=_TRACK_METRICS_NAME)


class TrackMetricsReporter:
def __init__(
self, path: Union[str, Path], user_properties_key: str = "track_metrics"
Expand Down Expand Up @@ -91,6 +82,9 @@ def pytest_sessionfinish(self, session):
with self.path.open("w", encoding="utf8") as f:
json.dump(self.metrics, f, indent=2)

def pytest_report_header(self):
return f"Plugin `track_metrics` is active, reporting to {self.path}"

def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-", f"Generated track_metrics report: {self.path}")

Expand Down Expand Up @@ -121,18 +115,16 @@ def track_metric(
Returns a callable that expects a metric name and value
"""

reporter: Union[TrackMetricsReporter, None] = pytestconfig.pluginmanager.get_plugin(
_TRACK_METRICS_NAME
reporter: TrackMetricsReporter | None = pytestconfig.pluginmanager.get_plugin(
_TRACK_METRICS_PLUGIN_NAME
)

if reporter:

def append(name: str, value: Any):
reporter.get_metrics(request.node.user_properties).append((name, value))
else:
warnings.warn(
"The `track_metric` fixture is requested, but no output file is defined (e.g. with `--metrics-tracker-report=path/to/metrics.json`."
)
warnings.warn("Fixture `track_metric` is a no-op (incomplete set up).")

def append(name: str, value: Any):
pass
Expand Down
Loading

0 comments on commit c0cd18b

Please sign in to comment.