Skip to content

Commit

Permalink
[fbsync] Adding FLOPs and size to model metadata (#6936)
Browse files Browse the repository at this point in the history
Summary:
* Adding FLOPs and size to model metadata

* Adding weight size to quantization models

* Small refactor of rich metadata

* Removing unused code

* Fixing wrong entries

* Adding .DS_Store to gitignore

* Renaming _flops to _ops

* Adding number of operations to quantization models

* Reflecting _flops change to _ops

* Renamed ops and weight size in individual model doc pages

* Linter fixes

* Rounding ops to first decimal

* Rounding num ops and sizes to 3 decimals

* Change naming of columns.

* Update tables

Reviewed By: NicolasHug

Differential Revision: D41265180

fbshipit-source-id: e6f8629ba3f2177411716113430b87c1710982c0

Co-authored-by: Toni Blaslov <tblaslov@fb.com>
Co-authored-by: Vasilis Vryniotis <datumbox@users.noreply.github.com>
  • Loading branch information
3 people authored and facebook-github-bot committed Nov 14, 2022
1 parent 158b69e commit 47d3ee1
Show file tree
Hide file tree
Showing 39 changed files with 353 additions and 10 deletions.
33 changes: 26 additions & 7 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,14 @@ def inject_weight_metadata(app, what, name, obj, options, lines):
max_visible = 3
v_sample = ", ".join(v[:max_visible])
v = f"{v_sample}, ... ({len(v)-max_visible} omitted)" if len(v) > max_visible else v_sample
elif k == "_ops":
if obj.__name__.endswith("_QuantizedWeights"):
v = f"{v} giga instructions per sec"
else:
v = f"{v} giga floating-point operations per sec"
elif k == "_weight_size":
v = f"{v} MB (file size)"

table.append((str(k), str(v)))
table = tabulate(table, tablefmt="rst")
lines += [".. rst-class:: table-weights"] # Custom CSS class, see custom_torchvision.css
Expand All @@ -385,27 +393,38 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern
if exclude_patterns is not None:
weights = [w for w in weights if all(p not in str(w) for p in exclude_patterns)]

ops_name = "GIPS" if "QuantizedWeights" in weights_endswith else "GFLOPS"

metrics_keys, metrics_names = zip(*metrics)
column_names = ["Weight"] + list(metrics_names) + ["Params", "Recipe"]
column_names = (
["Weight"] + list(metrics_names) + ["Params"] + [ops_name, "Size (MB)", "Recipe"]
) # Final column order
column_names = [f"**{name}**" for name in column_names] # Add bold

content = [
(
content = []
for w in weights:
row = [
f":class:`{w} <{type(w).__name__}>`",
*(w.meta["_metrics"][dataset][metric] for metric in metrics_keys),
f"{w.meta['num_params']/1e6:.1f}M",
f"{w.meta['_ops']:.3f}",
f"{round(w.meta['_weight_size'], 1):.1f}",
f"`link <{w.meta['recipe']}>`__",
)
for w in weights
]
]

content.append(row)

column_widths = ["110"] + ["18"] * len(metrics_names) + ["18"] * 3 + ["10"]
widths_table = " ".join(column_widths)

table = tabulate(content, headers=column_names, tablefmt="rst")

generated_dir = Path("generated")
generated_dir.mkdir(exist_ok=True)
with open(generated_dir / f"{table_name}_table.rst", "w+") as table_file:
table_file.write(".. rst-class:: table-weights\n") # Custom CSS class, see custom_torchvision.css
table_file.write(".. table::\n")
table_file.write(f" :widths: 100 {'20 ' * len(metrics_names)} 20 10\n\n")
table_file.write(f" :widths: {widths_table} \n\n")
table_file.write(f"{textwrap.indent(table, ' ' * 4)}\n\n")


Expand Down
4 changes: 3 additions & 1 deletion test/test_extended_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,13 @@ def test_schema_meta_validation(model_fn):
"recipe",
"unquantized",
"_docs",
"_ops",
"_weight_size",
}
# mandatory fields for each computer vision task
classification_fields = {"categories", ("_metrics", "ImageNet-1K", "acc@1"), ("_metrics", "ImageNet-1K", "acc@5")}
defaults = {
"all": {"_metrics", "min_size", "num_params", "recipe", "_docs"},
"all": {"_metrics", "min_size", "num_params", "recipe", "_docs", "_weight_size", "_ops"},
"models": classification_fields,
"detection": {"categories", ("_metrics", "COCO-val2017", "box_map")},
"quantization": classification_fields | {"backend", "unquantized"},
Expand Down
2 changes: 2 additions & 0 deletions torchvision/models/alexnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@ class AlexNet_Weights(WeightsEnum):
"acc@5": 79.066,
}
},
"_ops": 0.714,
"_weight_size": 233.087,
"_docs": """
These weights reproduce closely the results of the paper using a simplified training recipe.
""",
Expand Down
8 changes: 8 additions & 0 deletions torchvision/models/convnext.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,8 @@ class ConvNeXt_Tiny_Weights(WeightsEnum):
"acc@5": 96.146,
}
},
"_ops": 4.456,
"_weight_size": 109.119,
},
)
DEFAULT = IMAGENET1K_V1
Expand All @@ -237,6 +239,8 @@ class ConvNeXt_Small_Weights(WeightsEnum):
"acc@5": 96.650,
}
},
"_ops": 8.684,
"_weight_size": 191.703,
},
)
DEFAULT = IMAGENET1K_V1
Expand All @@ -255,6 +259,8 @@ class ConvNeXt_Base_Weights(WeightsEnum):
"acc@5": 96.870,
}
},
"_ops": 15.355,
"_weight_size": 338.064,
},
)
DEFAULT = IMAGENET1K_V1
Expand All @@ -273,6 +279,8 @@ class ConvNeXt_Large_Weights(WeightsEnum):
"acc@5": 96.976,
}
},
"_ops": 34.361,
"_weight_size": 754.537,
},
)
DEFAULT = IMAGENET1K_V1
Expand Down
10 changes: 8 additions & 2 deletions torchvision/models/densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface


__all__ = [
"DenseNet",
"DenseNet121_Weights",
Expand Down Expand Up @@ -278,6 +277,8 @@ class DenseNet121_Weights(WeightsEnum):
"acc@5": 91.972,
}
},
"_ops": 2.834,
"_weight_size": 30.845,
},
)
DEFAULT = IMAGENET1K_V1
Expand All @@ -296,6 +297,8 @@ class DenseNet161_Weights(WeightsEnum):
"acc@5": 93.560,
}
},
"_ops": 7.728,
"_weight_size": 110.369,
},
)
DEFAULT = IMAGENET1K_V1
Expand All @@ -314,6 +317,8 @@ class DenseNet169_Weights(WeightsEnum):
"acc@5": 92.806,
}
},
"_ops": 3.36,
"_weight_size": 54.708,
},
)
DEFAULT = IMAGENET1K_V1
Expand All @@ -332,6 +337,8 @@ class DenseNet201_Weights(WeightsEnum):
"acc@5": 93.370,
}
},
"_ops": 4.291,
"_weight_size": 77.373,
},
)
DEFAULT = IMAGENET1K_V1
Expand Down Expand Up @@ -444,7 +451,6 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs


model_urls = _ModelURLs(
{
"densenet121": DenseNet121_Weights.IMAGENET1K_V1.url,
Expand Down
8 changes: 8 additions & 0 deletions torchvision/models/detection/faster_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,6 +388,8 @@ class FasterRCNN_ResNet50_FPN_Weights(WeightsEnum):
"box_map": 37.0,
}
},
"_ops": 134.38,
"_weight_size": 159.743,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
Expand All @@ -407,6 +409,8 @@ class FasterRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
"box_map": 46.7,
}
},
"_ops": 280.371,
"_weight_size": 167.104,
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
},
)
Expand All @@ -426,6 +430,8 @@ class FasterRCNN_MobileNet_V3_Large_FPN_Weights(WeightsEnum):
"box_map": 32.8,
}
},
"_ops": 4.494,
"_weight_size": 74.239,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
Expand All @@ -445,6 +451,8 @@ class FasterRCNN_MobileNet_V3_Large_320_FPN_Weights(WeightsEnum):
"box_map": 22.8,
}
},
"_ops": 0.719,
"_weight_size": 74.239,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
Expand Down
2 changes: 2 additions & 0 deletions torchvision/models/detection/fcos.py
Original file line number Diff line number Diff line change
Expand Up @@ -662,6 +662,8 @@ class FCOS_ResNet50_FPN_Weights(WeightsEnum):
"box_map": 39.2,
}
},
"_ops": 128.207,
"_weight_size": 123.608,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
Expand Down
4 changes: 4 additions & 0 deletions torchvision/models/detection/keypoint_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum):
"kp_map": 61.1,
}
},
"_ops": 133.924,
"_weight_size": 226.054,
"_docs": """
These weights were produced by following a similar training recipe as on the paper but use a checkpoint
from an early epoch.
Expand All @@ -347,6 +349,8 @@ class KeypointRCNN_ResNet50_FPN_Weights(WeightsEnum):
"kp_map": 65.0,
}
},
"_ops": 137.42,
"_weight_size": 226.054,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
Expand Down
4 changes: 4 additions & 0 deletions torchvision/models/detection/mask_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,6 +370,8 @@ class MaskRCNN_ResNet50_FPN_Weights(WeightsEnum):
"mask_map": 34.6,
}
},
"_ops": 134.38,
"_weight_size": 169.84,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
Expand All @@ -390,6 +392,8 @@ class MaskRCNN_ResNet50_FPN_V2_Weights(WeightsEnum):
"mask_map": 41.8,
}
},
"_ops": 333.577,
"_weight_size": 177.219,
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
},
)
Expand Down
4 changes: 4 additions & 0 deletions torchvision/models/detection/retinanet.py
Original file line number Diff line number Diff line change
Expand Up @@ -690,6 +690,8 @@ class RetinaNet_ResNet50_FPN_Weights(WeightsEnum):
"box_map": 36.4,
}
},
"_ops": 151.54,
"_weight_size": 130.267,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
Expand All @@ -709,6 +711,8 @@ class RetinaNet_ResNet50_FPN_V2_Weights(WeightsEnum):
"box_map": 41.5,
}
},
"_ops": 152.238,
"_weight_size": 146.037,
"_docs": """These weights were produced using an enhanced training recipe to boost the model accuracy.""",
},
)
Expand Down
2 changes: 2 additions & 0 deletions torchvision/models/detection/ssd.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ class SSD300_VGG16_Weights(WeightsEnum):
"box_map": 25.1,
}
},
"_ops": 34.858,
"_weight_size": 135.988,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
Expand Down
2 changes: 2 additions & 0 deletions torchvision/models/detection/ssdlite.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,8 @@ class SSDLite320_MobileNet_V3_Large_Weights(WeightsEnum):
"box_map": 21.3,
}
},
"_ops": 0.583,
"_weight_size": 13.418,
"_docs": """These weights were produced by following a similar training recipe as on the paper.""",
},
)
Expand Down
24 changes: 24 additions & 0 deletions torchvision/models/efficientnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,8 @@ class EfficientNet_B0_Weights(WeightsEnum):
"acc@5": 93.532,
}
},
"_ops": 0.386,
"_weight_size": 20.451,
"_docs": """These weights are ported from the original paper.""",
},
)
Expand All @@ -486,6 +488,8 @@ class EfficientNet_B1_Weights(WeightsEnum):
"acc@5": 94.186,
}
},
"_ops": 0.687,
"_weight_size": 30.134,
"_docs": """These weights are ported from the original paper.""",
},
)
Expand All @@ -504,6 +508,8 @@ class EfficientNet_B1_Weights(WeightsEnum):
"acc@5": 94.934,
}
},
"_ops": 0.687,
"_weight_size": 30.136,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
Expand All @@ -530,6 +536,8 @@ class EfficientNet_B2_Weights(WeightsEnum):
"acc@5": 95.310,
}
},
"_ops": 1.088,
"_weight_size": 35.174,
"_docs": """These weights are ported from the original paper.""",
},
)
Expand All @@ -552,6 +560,8 @@ class EfficientNet_B3_Weights(WeightsEnum):
"acc@5": 96.054,
}
},
"_ops": 1.827,
"_weight_size": 47.184,
"_docs": """These weights are ported from the original paper.""",
},
)
Expand All @@ -574,6 +584,8 @@ class EfficientNet_B4_Weights(WeightsEnum):
"acc@5": 96.594,
}
},
"_ops": 4.394,
"_weight_size": 74.489,
"_docs": """These weights are ported from the original paper.""",
},
)
Expand All @@ -596,6 +608,8 @@ class EfficientNet_B5_Weights(WeightsEnum):
"acc@5": 96.628,
}
},
"_ops": 10.266,
"_weight_size": 116.864,
"_docs": """These weights are ported from the original paper.""",
},
)
Expand All @@ -618,6 +632,8 @@ class EfficientNet_B6_Weights(WeightsEnum):
"acc@5": 96.916,
}
},
"_ops": 19.068,
"_weight_size": 165.362,
"_docs": """These weights are ported from the original paper.""",
},
)
Expand All @@ -640,6 +656,8 @@ class EfficientNet_B7_Weights(WeightsEnum):
"acc@5": 96.908,
}
},
"_ops": 37.746,
"_weight_size": 254.675,
"_docs": """These weights are ported from the original paper.""",
},
)
Expand All @@ -664,6 +682,8 @@ class EfficientNet_V2_S_Weights(WeightsEnum):
"acc@5": 96.878,
}
},
"_ops": 8.366,
"_weight_size": 82.704,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
Expand Down Expand Up @@ -692,6 +712,8 @@ class EfficientNet_V2_M_Weights(WeightsEnum):
"acc@5": 97.156,
}
},
"_ops": 24.582,
"_weight_size": 208.01,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
Expand Down Expand Up @@ -723,6 +745,8 @@ class EfficientNet_V2_L_Weights(WeightsEnum):
"acc@5": 97.788,
}
},
"_ops": 56.08,
"_weight_size": 454.573,
"_docs": """These weights are ported from the original paper.""",
},
)
Expand Down
Loading

0 comments on commit 47d3ee1

Please sign in to comment.