Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

(Re)-add internal profiler #459

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions tested/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,10 @@ class Options:
Longer exercises, or exercises where the solution might depend on optimization
may need this option.
"""
profile: bool = False
"""
Profile the execution or not.
"""


@fallback_field(get_converter(), {"testplan": "test_suite", "plan_name": "test_suite"})
Expand Down
27 changes: 24 additions & 3 deletions tested/judge/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
execute_unit,
set_up_unit,
)
from tested.judge.internal_profiler import DummyProfiler, Profiler
from tested.judge.linter import run_linter
from tested.judge.planning import (
CompilationResult,
Expand Down Expand Up @@ -73,7 +74,7 @@
)


def judge(bundle: Bundle):
def judge(bundle: Bundle, profiler: Profiler | DummyProfiler):
"""
Evaluate a solution for an exercise. Execute the tests present in the
test suite. The result (the judgment) is sent to stdout, so Dodona can pick it
Expand All @@ -92,6 +93,7 @@
c. Process the results.

:param bundle: The configuration bundle.
:param profiler: The (dummy) profiler to measure timings.
"""
# Begin by checking if the given test suite is executable in this language.
_logger.info("Checking supported features...")
Expand All @@ -116,17 +118,21 @@
max_time = float(bundle.config.time_limit) * 0.9
start = time.perf_counter()

profiler.start("Linter")
# Run the linter.
# TODO: do this in parallel
run_linter(bundle, collector, max_time)
if time.perf_counter() - start > max_time:
terminate(bundle, collector, Status.TIME_LIMIT_EXCEEDED)
return
profiler.stop("Linter")

planned_units = plan_test_suite(bundle, strategy=PlanStrategy.OPTIMAL)

profiler.start("Supporting file generation")
# Attempt to precompile everything.
common_dir, dependencies, selector = _generate_files(bundle, planned_units)
profiler.stop("Supporting file generation")

# Create an execution plan.
plan = ExecutionPlan(
Expand All @@ -138,8 +144,10 @@
start_time=start,
)

profiler.start("Precompilation")
_logger.debug("Attempting precompilation")
compilation_results = precompile(bundle, plan)
profiler.stop("Precompilation")

# If something went horribly wrong, and the compilation itself caused a timeout or memory issue, bail now.
if _is_fatal_compilation_error(compilation_results):
Expand All @@ -161,9 +169,12 @@
def _process_one_unit(
index: int,
) -> tuple[CompilationResult, ExecutionResult | None, Path]:
return _execute_one_unit(bundle, plan, compilation_results, index)
profiler.start(f"Running execution unit {index}")
result = _execute_one_unit(bundle, plan, compilation_results, index, profiler)
profiler.stop(f"Running execution unit {index}")
return result

if bundle.config.options.parallel:
if bundle.config.options.parallel and not bundle.config.options.profile:
max_workers = None
else:
max_workers = 1
Expand Down Expand Up @@ -206,6 +217,9 @@
terminate(bundle, collector, Status.TIME_LIMIT_EXCEEDED)
return

if isinstance(profiler, Profiler):
collector.add_messages([profiler.to_message()])

Check warning on line 221 in tested/judge/core.py

View check run for this annotation

Codecov / codecov/patch

tested/judge/core.py#L221

Added line #L221 was not covered by tests

# Close the last tab.
terminate(bundle, collector, Status.CORRECT)

Expand All @@ -215,25 +229,32 @@
plan: ExecutionPlan,
compilation_results: CompilationResult | None,
index: int,
profiler: Profiler | DummyProfiler,
) -> tuple[CompilationResult, ExecutionResult | None, Path]:
planned_unit = plan.units[index]
profiler.start("Preparing")
# Prepare the unit.
execution_dir, dependencies = set_up_unit(bundle, plan, index)
profiler.stop("Preparing")

# If compilation is necessary, do it.
if compilation_results is None:
profiler.start("Compiling")
local_compilation_results, dependencies = compile_unit(
bundle, plan, index, execution_dir, dependencies
)
profiler.stop("Compiling")
else:
local_compilation_results = compilation_results

# Execute the unit.
if local_compilation_results.status == Status.CORRECT:
remaining_time = plan.remaining_time()
profiler.start("Executing")
execution_result, status = execute_unit(
bundle, planned_unit, execution_dir, dependencies, remaining_time
)
profiler.stop("Executing")
local_compilation_results.status = status
else:
execution_result = None
Expand Down
79 changes: 79 additions & 0 deletions tested/judge/internal_profiler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
from time import monotonic

from attr import define, field

from tested.dodona import ExtendedMessage, Permission


@define
class Measurement:
name: str
start: float
end: float | None = None
nested: list["Measurement"] = field(factory=list)

def to_message(self, default_open: bool = False) -> str:
assert self.end
if self.nested:
nested_result = ""
for n in self.nested:
nested_result += n.to_message()
nested_result += "\n"
return f"""

Check warning on line 22 in tested/judge/internal_profiler.py

View check run for this annotation

Codecov / codecov/patch

tested/judge/internal_profiler.py#L16-L22

Added lines #L16 - L22 were not covered by tests
<details {"open" if default_open else ""}>
<summary>{self.name}: {self.end - self.start:.2f}s</summary>
<p>{nested_result}</p>
</details>
"""
else:
return f"{self.name}: {self.end - self.start:.2f}s<br>"

Check warning on line 29 in tested/judge/internal_profiler.py

View check run for this annotation

Codecov / codecov/patch

tested/judge/internal_profiler.py#L29

Added line #L29 was not covered by tests


class Profiler:
root_measurement: Measurement
currently_open: list[Measurement]

def __init__(self):
self.root_measurement = Measurement(name="Execution", start=monotonic())
self.currently_open = [self.root_measurement]

Check warning on line 38 in tested/judge/internal_profiler.py

View check run for this annotation

Codecov / codecov/patch

tested/judge/internal_profiler.py#L37-L38

Added lines #L37 - L38 were not covered by tests

def start(self, name: str):
measurement = Measurement(name=name, start=monotonic())
self.currently_open[-1].nested.append(measurement)
self.currently_open.append(measurement)

Check warning on line 43 in tested/judge/internal_profiler.py

View check run for this annotation

Codecov / codecov/patch

tested/judge/internal_profiler.py#L41-L43

Added lines #L41 - L43 were not covered by tests

def stop(self, name: str):
assert (

Check warning on line 46 in tested/judge/internal_profiler.py

View check run for this annotation

Codecov / codecov/patch

tested/judge/internal_profiler.py#L46

Added line #L46 was not covered by tests
self.currently_open[-1].name == name
), f"Expected {self.currently_open[-1].name}, got {name}"
m = self.currently_open.pop()
m.end = monotonic()

Check warning on line 50 in tested/judge/internal_profiler.py

View check run for this annotation

Codecov / codecov/patch

tested/judge/internal_profiler.py#L49-L50

Added lines #L49 - L50 were not covered by tests

def to_message(self) -> ExtendedMessage:
if not self.root_measurement.end:
self.root_measurement.end = monotonic()
return ExtendedMessage(

Check warning on line 55 in tested/judge/internal_profiler.py

View check run for this annotation

Codecov / codecov/patch

tested/judge/internal_profiler.py#L53-L55

Added lines #L53 - L55 were not covered by tests
description=f"""
<h2>Time measurements</h2>
Note that enabling profiling disables parallel execution, if that was enabled.<br>
These measurements can be used to check which part of TESTed is using a lot of time.<br>
Measurements are rounded to .2f, so they might not add up.<br>
Not everything is measured: expect Dodona-reported execution to be longer.
<style>
details > *:not(summary) {{
margin-left: 2em;
}}
</style>
<p>{self.root_measurement.to_message(True)}</p>
""",
format="html",
permission=Permission.STAFF,
)


class DummyProfiler:
def start(self, name: str):
pass

def stop(self, name: str):
pass
11 changes: 10 additions & 1 deletion tested/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

from tested.configs import DodonaConfig, create_bundle
from tested.dsl import parse_dsl
from tested.judge.internal_profiler import DummyProfiler, Profiler
from tested.testsuite import parse_test_suite


Expand All @@ -16,6 +17,11 @@
:param config: The configuration, as received from Dodona.
:param judge_output: Where the judge output will be written to.
"""
if config.options.profile:
profiler = Profiler()

Check warning on line 21 in tested/main.py

View check run for this annotation

Codecov / codecov/patch

tested/main.py#L21

Added line #L21 was not covered by tests
else:
profiler = DummyProfiler()
profiler.start("Reading test suite")
try:
with open(f"{config.resources}/{config.test_suite}", "r") as t:
textual_suite = t.read()
Expand All @@ -25,14 +31,17 @@
"Remember that the test suite is a path relative to the 'evaluation' folder of your exercise."
)
raise e
profiler.stop("Reading test suite")

_, ext = os.path.splitext(config.test_suite)
is_yaml = ext.lower() in (".yaml", ".yml")
profiler.start("Parsing test suite")
if is_yaml:
suite = parse_dsl(textual_suite)
else:
suite = parse_test_suite(textual_suite)
profiler.stop("Parsing test suite")
pack = create_bundle(config, judge_output, suite)
from .judge import judge

judge(pack)
judge(pack, profiler)
Loading