diff --git a/tested/configs.py b/tested/configs.py
index 3cf9e01e..85448fdf 100644
--- a/tested/configs.py
+++ b/tested/configs.py
@@ -64,6 +64,10 @@ class Options:
Longer exercises, or exercises where the solution might depend on optimization
may need this option.
"""
+ profile: bool = False
+ """
+ Profile the execution or not.
+ """
@fallback_field(get_converter(), {"testplan": "test_suite", "plan_name": "test_suite"})
diff --git a/tested/judge/core.py b/tested/judge/core.py
index a960129d..fcdfaba4 100644
--- a/tested/judge/core.py
+++ b/tested/judge/core.py
@@ -28,6 +28,7 @@
execute_unit,
set_up_unit,
)
+from tested.judge.internal_profiler import DummyProfiler, Profiler
from tested.judge.linter import run_linter
from tested.judge.planning import (
CompilationResult,
@@ -73,7 +74,7 @@ def _handle_time_or_memory_compilation(
)
-def judge(bundle: Bundle):
+def judge(bundle: Bundle, profiler: Profiler | DummyProfiler):
"""
Evaluate a solution for an exercise. Execute the tests present in the
test suite. The result (the judgment) is sent to stdout, so Dodona can pick it
@@ -92,6 +93,7 @@ def judge(bundle: Bundle):
c. Process the results.
:param bundle: The configuration bundle.
+ :param profiler: The (dummy) profiler to measure timings.
"""
# Begin by checking if the given test suite is executable in this language.
_logger.info("Checking supported features...")
@@ -116,17 +118,21 @@ def judge(bundle: Bundle):
max_time = float(bundle.config.time_limit) * 0.9
start = time.perf_counter()
+ profiler.start("Linter")
# Run the linter.
# TODO: do this in parallel
run_linter(bundle, collector, max_time)
if time.perf_counter() - start > max_time:
terminate(bundle, collector, Status.TIME_LIMIT_EXCEEDED)
return
+ profiler.stop("Linter")
planned_units = plan_test_suite(bundle, strategy=PlanStrategy.OPTIMAL)
+ profiler.start("Supporting file generation")
# Attempt to precompile everything.
common_dir, dependencies, selector = _generate_files(bundle, planned_units)
+ profiler.stop("Supporting file generation")
# Create an execution plan.
plan = ExecutionPlan(
@@ -138,8 +144,10 @@ def judge(bundle: Bundle):
start_time=start,
)
+ profiler.start("Precompilation")
_logger.debug("Attempting precompilation")
compilation_results = precompile(bundle, plan)
+ profiler.stop("Precompilation")
# If something went horribly wrong, and the compilation itself caused a timeout or memory issue, bail now.
if _is_fatal_compilation_error(compilation_results):
@@ -161,9 +169,12 @@ def judge(bundle: Bundle):
def _process_one_unit(
index: int,
) -> tuple[CompilationResult, ExecutionResult | None, Path]:
- return _execute_one_unit(bundle, plan, compilation_results, index)
+ profiler.start(f"Running execution unit {index}")
+ result = _execute_one_unit(bundle, plan, compilation_results, index, profiler)
+ profiler.stop(f"Running execution unit {index}")
+ return result
- if bundle.config.options.parallel:
+ if not bundle.config.options.parallel and not bundle.config.options.profile:
max_workers = None
else:
max_workers = 1
@@ -206,6 +217,9 @@ def _process_one_unit(
terminate(bundle, collector, Status.TIME_LIMIT_EXCEEDED)
return
+ if isinstance(profiler, Profiler):
+ collector.add_messages([profiler.to_message()])
+
# Close the last tab.
terminate(bundle, collector, Status.CORRECT)
@@ -215,25 +229,32 @@ def _execute_one_unit(
plan: ExecutionPlan,
compilation_results: CompilationResult | None,
index: int,
+ profiler: Profiler | DummyProfiler,
) -> tuple[CompilationResult, ExecutionResult | None, Path]:
planned_unit = plan.units[index]
+ profiler.start("Preparing")
# Prepare the unit.
execution_dir, dependencies = set_up_unit(bundle, plan, index)
+ profiler.stop("Preparing")
# If compilation is necessary, do it.
if compilation_results is None:
+ profiler.start("Compiling")
local_compilation_results, dependencies = compile_unit(
bundle, plan, index, execution_dir, dependencies
)
+ profiler.stop("Compiling")
else:
local_compilation_results = compilation_results
# Execute the unit.
if local_compilation_results.status == Status.CORRECT:
remaining_time = plan.remaining_time()
+ profiler.start("Executing")
execution_result, status = execute_unit(
bundle, planned_unit, execution_dir, dependencies, remaining_time
)
+ profiler.stop("Executing")
local_compilation_results.status = status
else:
execution_result = None
diff --git a/tested/judge/internal_profiler.py b/tested/judge/internal_profiler.py
new file mode 100644
index 00000000..3780e29a
--- /dev/null
+++ b/tested/judge/internal_profiler.py
@@ -0,0 +1,79 @@
+from time import monotonic
+
+from attr import define, field
+
+from tested.dodona import ExtendedMessage, Permission
+
+
+@define
+class Measurement:
+ name: str
+ start: float
+ end: float | None = None
+ nested: list["Measurement"] = field(factory=list)
+
+ def to_message(self, default_open: bool = False) -> str:
+ assert self.end
+ if self.nested:
+ nested_result = ""
+ for n in self.nested:
+ nested_result += n.to_message()
+ nested_result += "\n"
+ return f"""
+ {nested_result}{self.name}: {self.end - self.start:.2f}s
+
"
+
+
+class Profiler:
+ root_measurement: Measurement
+ currently_open: list[Measurement]
+
+ def __init__(self):
+ self.root_measurement = Measurement(name="Execution", start=monotonic())
+ self.currently_open = [self.root_measurement]
+
+ def start(self, name: str):
+ measurement = Measurement(name=name, start=monotonic())
+ self.currently_open[-1].nested.append(measurement)
+ self.currently_open.append(measurement)
+
+ def stop(self, name: str):
+ assert (
+ self.currently_open[-1].name == name
+ ), f"Expected {self.currently_open[-1].name}, got {name}"
+ m = self.currently_open.pop()
+ m.end = monotonic()
+
+ def to_message(self) -> ExtendedMessage:
+ if not self.root_measurement.end:
+ self.root_measurement.end = monotonic()
+ return ExtendedMessage(
+ description=f"""
+
{self.root_measurement.to_message(True)}
+ """, + format="html", + permission=Permission.STAFF, + ) + + +class DummyProfiler: + def start(self, name: str): + pass + + def stop(self, name: str): + pass diff --git a/tested/main.py b/tested/main.py index 43041184..db0ec765 100644 --- a/tested/main.py +++ b/tested/main.py @@ -6,6 +6,7 @@ from tested.configs import DodonaConfig, create_bundle from tested.dsl import parse_dsl +from tested.judge.internal_profiler import DummyProfiler, Profiler from tested.testsuite import parse_test_suite @@ -16,6 +17,11 @@ def run(config: DodonaConfig, judge_output: IO): :param config: The configuration, as received from Dodona. :param judge_output: Where the judge output will be written to. """ + if config.options.profile: + profiler = Profiler() + else: + profiler = DummyProfiler() + profiler.start("Reading test suite") try: with open(f"{config.resources}/{config.test_suite}", "r") as t: textual_suite = t.read() @@ -25,14 +31,17 @@ def run(config: DodonaConfig, judge_output: IO): "Remember that the test suite is a path relative to the 'evaluation' folder of your exercise." ) raise e + profiler.stop("Reading test suite") _, ext = os.path.splitext(config.test_suite) is_yaml = ext.lower() in (".yaml", ".yml") + profiler.start("Parsing test suite") if is_yaml: suite = parse_dsl(textual_suite) else: suite = parse_test_suite(textual_suite) + profiler.stop("Parsing test suite") pack = create_bundle(config, judge_output, suite) from .judge import judge - judge(pack) + judge(pack, profiler)