Skip to content

Commit

Permalink
Add internal profiler
Browse files Browse the repository at this point in the history
Add a simplified version of #252.

These were removed in #323, as most issues could be found locally using
a profiler. However, recently, discrepancies have appeared,
necessitating the return of this module.
  • Loading branch information
niknetniko committed Nov 22, 2023
1 parent 52c8dcb commit 10123a1
Show file tree
Hide file tree
Showing 4 changed files with 117 additions and 4 deletions.
4 changes: 4 additions & 0 deletions tested/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,10 @@ class Options:
Longer exercises, or exercises where the solution might depend on optimization
may need this option.
"""
profile: bool = False
"""
Profile the execution or not.
"""


@fallback_field(get_converter(), {"testplan": "test_suite", "plan_name": "test_suite"})
Expand Down
27 changes: 24 additions & 3 deletions tested/judge/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
execute_unit,
set_up_unit,
)
from tested.judge.internal_profiler import DummyProfiler, Profiler
from tested.judge.linter import run_linter
from tested.judge.planning import (
CompilationResult,
Expand Down Expand Up @@ -73,7 +74,7 @@ def _handle_time_or_memory_compilation(
)


def judge(bundle: Bundle):
def judge(bundle: Bundle, profiler: Profiler | DummyProfiler):
"""
Evaluate a solution for an exercise. Execute the tests present in the
test suite. The result (the judgment) is sent to stdout, so Dodona can pick it
Expand All @@ -92,6 +93,7 @@ def judge(bundle: Bundle):
c. Process the results.
:param bundle: The configuration bundle.
:param profiler: The (dummy) profiler to measure timings.
"""
# Begin by checking if the given test suite is executable in this language.
_logger.info("Checking supported features...")
Expand All @@ -116,17 +118,21 @@ def judge(bundle: Bundle):
max_time = float(bundle.config.time_limit) * 0.9
start = time.perf_counter()

profiler.start("Linter")
# Run the linter.
# TODO: do this in parallel
run_linter(bundle, collector, max_time)
if time.perf_counter() - start > max_time:
terminate(bundle, collector, Status.TIME_LIMIT_EXCEEDED)
return
profiler.stop("Linter")

planned_units = plan_test_suite(bundle, strategy=PlanStrategy.OPTIMAL)

profiler.start("Supporting file generation")
# Attempt to precompile everything.
common_dir, dependencies, selector = _generate_files(bundle, planned_units)
profiler.stop("Supporting file generation")

# Create an execution plan.
plan = ExecutionPlan(
Expand All @@ -138,8 +144,10 @@ def judge(bundle: Bundle):
start_time=start,
)

profiler.start("Precompilation")
_logger.debug("Attempting precompilation")
compilation_results = precompile(bundle, plan)
profiler.stop("Precompilation")

# If something went horribly wrong, and the compilation itself caused a timeout or memory issue, bail now.
if _is_fatal_compilation_error(compilation_results):
Expand All @@ -161,9 +169,12 @@ def judge(bundle: Bundle):
def _process_one_unit(
index: int,
) -> tuple[CompilationResult, ExecutionResult | None, Path]:
return _execute_one_unit(bundle, plan, compilation_results, index)
profiler.start(f"Running execution unit {index}")
result = _execute_one_unit(bundle, plan, compilation_results, index, profiler)
profiler.stop(f"Running execution unit {index}")
return result

if bundle.config.options.parallel:
if not bundle.config.options.parallel and not bundle.config.options.profile:
max_workers = None
else:
max_workers = 1
Expand Down Expand Up @@ -206,6 +217,9 @@ def _process_one_unit(
terminate(bundle, collector, Status.TIME_LIMIT_EXCEEDED)
return

if isinstance(profiler, Profiler):
collector.add_messages([profiler.to_message()])

# Close the last tab.
terminate(bundle, collector, Status.CORRECT)

Expand All @@ -215,25 +229,32 @@ def _execute_one_unit(
plan: ExecutionPlan,
compilation_results: CompilationResult | None,
index: int,
profiler: Profiler | DummyProfiler,
) -> tuple[CompilationResult, ExecutionResult | None, Path]:
planned_unit = plan.units[index]
profiler.start("Preparing")
# Prepare the unit.
execution_dir, dependencies = set_up_unit(bundle, plan, index)
profiler.stop("Preparing")

# If compilation is necessary, do it.
if compilation_results is None:
profiler.start("Compiling")
local_compilation_results, dependencies = compile_unit(
bundle, plan, index, execution_dir, dependencies
)
profiler.stop("Compiling")
else:
local_compilation_results = compilation_results

# Execute the unit.
if local_compilation_results.status == Status.CORRECT:
remaining_time = plan.remaining_time()
profiler.start("Executing")
execution_result, status = execute_unit(
bundle, planned_unit, execution_dir, dependencies, remaining_time
)
profiler.stop("Executing")
local_compilation_results.status = status
else:
execution_result = None
Expand Down
79 changes: 79 additions & 0 deletions tested/judge/internal_profiler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
from time import monotonic

from attr import define, field

from tested.dodona import ExtendedMessage, Permission


@define
class Measurement:
name: str
start: float
end: float | None = None
nested: list["Measurement"] = field(factory=list)

def to_message(self, default_open: bool = False) -> str:
assert self.end
if self.nested:
nested_result = ""
for n in self.nested:
nested_result += n.to_message()
nested_result += "\n"
return f"""
<details {"open" if default_open else ""}>
<summary>{self.name}: {self.end - self.start:.2f}s</summary>
<p>{nested_result}</p>
</details>
"""
else:
return f"{self.name}: {self.end - self.start:.2f}s<br>"


class Profiler:
root_measurement: Measurement
currently_open: list[Measurement]

def __init__(self):
self.root_measurement = Measurement(name="Execution", start=monotonic())
self.currently_open = [self.root_measurement]

def start(self, name: str):
measurement = Measurement(name=name, start=monotonic())
self.currently_open[-1].nested.append(measurement)
self.currently_open.append(measurement)

def stop(self, name: str):
assert (
self.currently_open[-1].name == name
), f"Expected {self.currently_open[-1].name}, got {name}"
m = self.currently_open.pop()
m.end = monotonic()

def to_message(self) -> ExtendedMessage:
if not self.root_measurement.end:
self.root_measurement.end = monotonic()
return ExtendedMessage(
description=f"""
<h2>Time measurements</h2>
Note that enabling profiling disables parallel execution, if that was enabled.<br>
These measurements can be used to check which part of TESTed is using a lot of time.<br>
Measurements are rounded to .2f, so they might not add up.<br>
Not everything is measured: expect Dodona-reported execution to be longer.
<style>
details > *:not(summary) {{
margin-left: 2em;
}}
</style>
<p>{self.root_measurement.to_message(True)}</p>
""",
format="html",
permission=Permission.STAFF,
)


class DummyProfiler:
def start(self, name: str):
pass

def stop(self, name: str):
pass
11 changes: 10 additions & 1 deletion tested/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

from tested.configs import DodonaConfig, create_bundle
from tested.dsl import parse_dsl
from tested.judge.internal_profiler import DummyProfiler, Profiler
from tested.testsuite import parse_test_suite


Expand All @@ -16,6 +17,11 @@ def run(config: DodonaConfig, judge_output: IO):
:param config: The configuration, as received from Dodona.
:param judge_output: Where the judge output will be written to.
"""
if config.options.profile:
profiler = Profiler()
else:
profiler = DummyProfiler()
profiler.start("Reading test suite")
try:
with open(f"{config.resources}/{config.test_suite}", "r") as t:
textual_suite = t.read()
Expand All @@ -25,14 +31,17 @@ def run(config: DodonaConfig, judge_output: IO):
"Remember that the test suite is a path relative to the 'evaluation' folder of your exercise."
)
raise e
profiler.stop("Reading test suite")

_, ext = os.path.splitext(config.test_suite)
is_yaml = ext.lower() in (".yaml", ".yml")
profiler.start("Parsing test suite")
if is_yaml:
suite = parse_dsl(textual_suite)
else:
suite = parse_test_suite(textual_suite)
profiler.stop("Parsing test suite")
pack = create_bundle(config, judge_output, suite)
from .judge import judge

judge(pack)
judge(pack, profiler)

0 comments on commit 10123a1

Please sign in to comment.