Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Changes needed for Deepsparse.Analyze #304

Merged
merged 9 commits into from
Apr 18, 2023
25 changes: 23 additions & 2 deletions src/sparsezoo/analyze/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
NamedEntry,
NodeCounts,
NodeIO,
NodeTimingEntry,
OperationSummary,
OpsSummary,
ParameterComponent,
Expand Down Expand Up @@ -268,6 +269,11 @@ class BenchmarkResult(YAMLSerializableBaseModel):
description="Node level inference results",
)

supported_graph_percentage: Optional[float] = Field(
default=None,
description="Percentage of model graph supported by the runtime engine",
)


class NodeAnalysis(YAMLSerializableBaseModel):
"""
Expand Down Expand Up @@ -825,17 +831,32 @@ def from_model_analysis(
section_name="Overall",
entries=[
PerformanceEntry(
model=idx,
model=analysis.model_name,
sparsity=overall_count_summary.sparsity_percent,
quantized=overall_count_summary.quantized_percent,
latency=benchmark_result.average_latency,
throughput=benchmark_result.items_per_second,
supported_graph=0.0, # TODO: fill in correct value
supported_graph=(
benchmark_result.supported_graph_percentage or 0.0
),
)
for idx, benchmark_result in enumerate(analysis.benchmark_results)
],
)

for idx, benchmark_result in enumerate(analysis.benchmark_results):
node_timing_section = Section(
section_name=f"Node Timings for Benchmark # {idx+1}",
entries=[
NodeTimingEntry(
node_name=node_timing.name,
avg_runtime=node_timing.avg_run_time,
)
for node_timing in benchmark_result.node_timings
],
)
sections.append(node_timing_section)

sections.extend([param_section, ops_section, overall_section])
return cls(sections=sections)

Expand Down
29 changes: 27 additions & 2 deletions src/sparsezoo/analyze/utils/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,12 +369,36 @@ class PerformanceEntry(BaseEntry):
] + BaseEntry._print_order


class NodeTimingEntry(Entry):
"""
A BaseEntry with additional performance info
"""

node_name: str
avg_runtime: float

_print_order = [
"node_name",
"avg_runtime",
] + Entry._print_order


class Section(Entry):
"""
Represents a list of Entries with an optional name
"""

entries: List[Union[NamedEntry, TypedEntry, SizedModelEntry, ModelEntry, BaseEntry]]
entries: List[
rahul-tuli marked this conversation as resolved.
Show resolved Hide resolved
Union[
NodeTimingEntry,
PerformanceEntry,
NamedEntry,
TypedEntry,
SizedModelEntry,
ModelEntry,
BaseEntry,
]
]

section_name: str = ""

Expand Down Expand Up @@ -425,7 +449,7 @@ def __sub__(self, other: "Section"):
def get_comparable_entries(self, other: "Section") -> Tuple[List[Entry], ...]:
"""
Get comparable entries by same name or type if they belong to
`NamedEntry` or `TypedEntry`, else return all entries
`NamedEntry`, `TypedEntry`, or `NodeTimingEntry`, else return all entries

:return: A tuple composed of two lists, containing comparable entries
in correct order from current and other Section objects
Expand All @@ -434,6 +458,7 @@ def get_comparable_entries(self, other: "Section") -> Tuple[List[Entry], ...]:
entry_type_to_extractor = {
"NamedEntry": lambda entry: entry.name,
"TypedEntry": lambda entry: entry.type,
"NodeTimingEntry": lambda entry: entry.node_name,
}
entry_type = self.entries[0].__class__.__name__

Expand Down