Skip to content

Commit

Permalink
Add Python version of the basic benchmarks (#4411)
Browse files Browse the repository at this point in the history
### What

This PR adds the 3 basic benchmarks for Python. One notable different
w.r.t the rust version is that the benchmarks use a single recording,
but create a fresh memory sink for each iteration (the rust version
creates a fresh recording for each iteration as well). This is due to
#4410.

To run:
```
just py-bench
```

**IMPORTANT**: the python version of `many_individual` runs 100k points
instead of 1M for the other benchmarks!

* Part of #4100

On my machine:

<img width="1590" alt="image"
src="https://github.com/rerun-io/rerun/assets/49431240/99a74354-aa09-4267-a0fa-6587ecd9f8e5">

### Checklist
* [x] I have read and agree to [Contributor
Guide](https://github.com/rerun-io/rerun/blob/main/CONTRIBUTING.md) and
the [Code of
Conduct](https://github.com/rerun-io/rerun/blob/main/CODE_OF_CONDUCT.md)
* [x] I've included a screenshot or gif (if applicable)
* [x] I have tested [app.rerun.io](https://app.rerun.io/pr/4411) (if
applicable)
* [x] The PR title and labels are set such as to maximize their
usefulness for the next release's CHANGELOG

- [PR Build Summary](https://build.rerun.io/pr/4411)
- [Docs
preview](https://rerun.io/preview/0f8403061c76b2147bebef25cfebd1b0c5e47c73/docs)
<!--DOCS-PREVIEW-->
- [Examples
preview](https://rerun.io/preview/0f8403061c76b2147bebef25cfebd1b0c5e47c73/examples)
<!--EXAMPLES-PREVIEW-->
- [Recent benchmark results](https://build.rerun.io/graphs/crates.html)
- [Wasm size tracking](https://build.rerun.io/graphs/sizes.html)
  • Loading branch information
abey79 authored Dec 1, 2023
1 parent 2f8b31f commit 6fe6d11
Show file tree
Hide file tree
Showing 5 changed files with 96 additions and 0 deletions.
5 changes: 5 additions & 0 deletions justfile
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,11 @@ py-test:
py-test-allpy:
nox -s tests

# Run all Python benchmarks
py-bench *ARGS:
python -m pytest -c rerun_py/pyproject.toml --benchmark-only {{ARGS}}


# Serve the python docs locally
py-docs-serve:
mkdocs serve -f rerun_py/mkdocs.yml -w rerun_py
Expand Down
1 change: 1 addition & 0 deletions rerun_py/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -130,3 +130,4 @@ python-packages = ["rerun_sdk/rerun"]
filterwarnings = """
error
"""
norecursedirs = ".* venv* target* build"
26 changes: 26 additions & 0 deletions tests/python/log_benchmark/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from __future__ import annotations

import dataclasses

import numpy as np

MAX_INT64 = 2**63 - 1
MAX_INT32 = 2**31 - 1


@dataclasses.dataclass
class Point3DInput:
positions: np.ndarray
colors: np.ndarray
radii: np.ndarray
label: str = "some label"

@classmethod
def prepare(cls, seed: int, num_points: int):
rng = np.random.default_rng(seed=seed)

return cls(
positions=rng.integers(0, MAX_INT64, (num_points, 3)).astype(dtype=np.float32),
colors=rng.integers(0, MAX_INT32, num_points, dtype=np.uint32),
radii=rng.integers(0, MAX_INT64, num_points).astype(dtype=np.float32),
)
63 changes: 63 additions & 0 deletions tests/python/log_benchmark/test_log_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
"""Python logging benchmarks. Use `just py-bench` to run."""

from __future__ import annotations

import numpy as np
import pytest
import rerun as rr

from . import Point3DInput


def log_points3d_large_batch(data: Point3DInput):
# create a new, empty memory sink for the current recording
rr.memory_recording()

rr.log(
"large_batch",
rr.Points3D(positions=data.positions, colors=data.colors, radii=data.radii, labels=data.label),
)


@pytest.mark.parametrize("num_points", [50_000_000])
def test_bench_points3d_large_batch(benchmark, num_points):
rr.init("rerun_example_benchmark_points3d_large_batch")
data = Point3DInput.prepare(42, num_points)
benchmark(log_points3d_large_batch, data)


def log_points3d_many_individual(data: Point3DInput):
# create a new, empty memory sink for the current recording
rr.memory_recording()

for i in range(data.positions.shape[0]):
rr.log(
"single_point",
rr.Points3D(positions=data.positions[i], colors=data.colors[i], radii=data.radii[i]),
)


@pytest.mark.parametrize("num_points", [100_000])
def test_bench_points3d_many_individual(benchmark, num_points):
rr.init("rerun_example_benchmark_points3d_many_individual")
data = Point3DInput.prepare(1337, num_points)
benchmark(log_points3d_many_individual, data)


def log_image(image: np.ndarray, num_log_calls):
# create a new, empty memory sink for the current recording
rr.memory_recording()

for i in range(num_log_calls):
rr.log("test_image", rr.Tensor(image))


@pytest.mark.parametrize(
["image_dimension", "image_channels", "num_log_calls"],
[pytest.param(16_384, 4, 4, id="16384^2px-4channels-4calls")],
)
def test_bench_image(benchmark, image_dimension, image_channels, num_log_calls):
rr.init("rerun_example_benchmark_image")

image = np.zeros((image_dimension, image_dimension, image_channels), dtype=np.uint8)
benchmark(log_image, image, num_log_calls)
1 change: 1 addition & 0 deletions tests/python/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
-r test_api/requirements.txt
-r nv12image/requirements.txt
pytest-benchmark

0 comments on commit 6fe6d11

Please sign in to comment.