diff --git a/backends/cortex_m/ops/TARGETS b/backends/cortex_m/ops/TARGETS index 12044266ca1..18b387c8ad2 100644 --- a/backends/cortex_m/ops/TARGETS +++ b/backends/cortex_m/ops/TARGETS @@ -17,6 +17,7 @@ runtime.python_library( deps = [ "fbcode//caffe2:torch", "//executorch/backends/cortex_m/passes:passes_utils", + "//executorch/backends/cortex_m/quantizer:quantization_configs", ], ) diff --git a/backends/cortex_m/quantizer/TARGETS b/backends/cortex_m/quantizer/TARGETS new file mode 100644 index 00000000000..0af105efef0 --- /dev/null +++ b/backends/cortex_m/quantizer/TARGETS @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +load("@fbcode_macros//build_defs:python_library.bzl", "python_library") + +oncall("executorch") + +python_library( + name = "quantizer", + srcs = [ + "__init__.py", + "operator_configs.py", + "quantization_configs.py", + "quantizer.py", + ], + deps = [ + "//caffe2:torch", + "//executorch/backends/arm/quantizer:quantization_config", + "//pytorch/ao:torchao", + ], +) + +python_library( + name = "quantization_configs", + srcs = [ + "quantization_configs.py", + ], + deps = [ + "//caffe2:torch", + "//executorch/backends/arm/quantizer:quantization_config", + "//pytorch/ao:torchao", + ], +) diff --git a/backends/cortex_m/quantizer/__init__.py b/backends/cortex_m/quantizer/__init__.py new file mode 100644 index 00000000000..39a3de431ff --- /dev/null +++ b/backends/cortex_m/quantizer/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from .quantization_configs import ( # noqa + CMSIS_SOFTMAX_SCALE, + CMSIS_SOFTMAX_ZERO_POINT, + INT8_ACTIVATION_PER_CHANNEL_QSPEC, + INT8_ACTIVATION_PER_TENSOR_QSPEC, + INT8_PER_CHANNEL_CONFIG, + INT8_PER_TENSOR_CONFIG, + INT8_WEIGHT_PER_CHANNEL_QSPEC, + INT8_WEIGHT_PER_TENSOR_QSPEC, + SOFTMAX_OUTPUT_FIXED_QSPEC, + SOFTMAX_PER_TENSOR_CONFIG, +) +from .quantizer import CortexMQuantizer, SharedQspecQuantizer # noqa