Skip to content

Commit

Permalink
One last find and replace
Browse files Browse the repository at this point in the history
Signed-off-by: Jeremy Fowers <jeremy.fowers@amd.com>
  • Loading branch information
jeremyfowers committed Dec 4, 2023
1 parent 6351442 commit 47ade1f
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 22 deletions.
2 changes: 1 addition & 1 deletion src/turnkeyml/analyze/status.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def print_invocation(
if unique_invocation.stats_keys is not None:
for key in unique_invocation.stats_keys:
nice_key = _pretty_print_key(key)
value = unique_invocation.stats.build_stats[key]
value = unique_invocation.stats.evaluation_stats[key]
printing.logn(f"{ident}\t\t\t{nice_key}:\t{value}")
print()
else:
Expand Down
34 changes: 17 additions & 17 deletions src/turnkeyml/cli/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def summary_spreadsheets(args) -> None:
Path(report_dir).mkdir(parents=True, exist_ok=True)

report: List[Dict] = []
all_build_stats = []
all_evaluation_stats = []

# Add results from all user-provided cache folders
for cache_dir in cache_dirs:
Expand All @@ -69,20 +69,20 @@ def summary_spreadsheets(args) -> None:

# create a separate dict for each build
for build in model_stats[fs.Keys.EVALUATIONS].values():
build_stats = {}
evaluation_stats = {}

# Copy all of the stats for the model that are common across builds
for key, value in model_stats.items():
if key != fs.Keys.EVALUATIONS:
build_stats[key] = value
evaluation_stats[key] = value

# Copy the build-specific stats
for key, value in build.items():
# Break each value in "completed build stages" into its own column
# to make analysis easier
if key == fs.Keys.COMPLETED_BUILD_STAGES:
for subkey, subvalue in value.items():
build_stats[subkey] = subvalue
evaluation_stats[subkey] = subvalue

# If a build is still marked as "running" at reporting time, it
# must have been killed by a time out, out-of-memory (OOM), or some
Expand All @@ -93,30 +93,30 @@ def summary_spreadsheets(args) -> None:
):
value = fs.BenchmarkStatus.KILLED

build_stats[key] = value
evaluation_stats[key] = value

all_build_stats.append(build_stats)
all_evaluation_stats.append(evaluation_stats)
except yaml.scanner.ScannerError:
continue

# Scan the build stats to determine the set of columns for the CSV file.
# The CSV will have one column for every key in any build stats dict.
column_headers = []
for build_stats in all_build_stats:
for evaluation_stats in all_evaluation_stats:
# Add any key that isn't already in column_headers
for header in build_stats.keys():
for header in evaluation_stats.keys():
if header not in column_headers:
column_headers.append(header)

# Add each build to the report
for build_stats in all_build_stats:
for evaluation_stats in all_evaluation_stats:
# Start with a dictionary where all of the values are "-". If a build
# has a value for each key we will fill it in, and otherwise the "-"
# will indicate that no value was available
result = {k: "-" for k in column_headers}

for key in column_headers:
result[key] = _good_get(build_stats, key)
result[key] = _good_get(evaluation_stats, key)

report.append(result)

Expand All @@ -133,13 +133,13 @@ def summary_spreadsheets(args) -> None:

# Save the unique errors and counts to a file
errors = []
for build_stats in all_build_stats:
for evaluation_stats in all_evaluation_stats:
if (
"compilation_error" in build_stats.keys()
and "compilation_error_id" in build_stats.keys()
"compilation_error" in evaluation_stats.keys()
and "compilation_error_id" in evaluation_stats.keys()
):
error = build_stats["compilation_error"]
id = build_stats["compilation_error_id"]
error = evaluation_stats["compilation_error"]
id = evaluation_stats["compilation_error_id"]
if id != "":
unique_error = True
for reported_error in errors:
Expand All @@ -148,13 +148,13 @@ def summary_spreadsheets(args) -> None:
reported_error["count"] = reported_error["count"] + 1
reported_error["models_impacted"] = reported_error[
"models_impacted"
] + [build_stats["model_name"]]
] + [evaluation_stats["model_name"]]

if unique_error:
reported_error = {
"id": id,
"count": 1,
"models_impacted": [build_stats["model_name"]],
"models_impacted": [evaluation_stats["model_name"]],
"example": error,
}
errors.append(reported_error)
Expand Down
6 changes: 3 additions & 3 deletions test/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,11 @@ def assert_success_of_builds(
), f"{build_state.info.__dict__[info_property[0]]} == {info_property[1]}"

if check_perf:
assert stats.build_stats["mean_latency"] > 0
assert stats.build_stats["throughput"] > 0
assert stats.evaluation_stats["mean_latency"] > 0
assert stats.evaluation_stats["throughput"] > 0

if check_iteration_count:
iterations = stats.build_stats["iterations"]
iterations = stats.evaluation_stats["iterations"]
assert iterations == check_iteration_count

if check_opset:
Expand Down
2 changes: 1 addition & 1 deletion test/helpers/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,6 @@ def get_stats_and_state(
build_state.config.build_name,
build_state.evaluation_id,
)
return stats.build_stats, build_state
return stats.evaluation_stats, build_state

raise Exception(f"Stats not found for {test_script}")

0 comments on commit 47ade1f

Please sign in to comment.