From 1adcaec3223f58949a4fe00131f129407e57299b Mon Sep 17 00:00:00 2001 From: Nitin Jain Date: Mon, 25 Aug 2025 13:03:46 -0700 Subject: [PATCH 1/2] Add 16A8W quantization configuration utility for ARM backend (#13641) Summary: This diff implements a 16A8W (16-bit activations, 8-bit weights) quantization configuration utility for the ExecutorTorch ARM backend, following the feedback from D79746479. ## Key Changes **1. New Quantization Configuration Function** - Add `get_16a8w_quantization_config()` in `fbcode/executorch/backends/arm/quantizer/arm_quantizer.py` - Provides 16-bit activations with HistogramObserver (better precision than 8A8W) - Maintains 8-bit weights with MinMaxObserver/PerChannelMinMaxObserver (memory efficient) - **Technically supported by TOSA through [EXT-INT16 extension/profile](https://www.mlplatform.org/tosa/tosa_spec.html#_conv2d)** ## Benefits - **Better Precision**: 16-bit activations provide higher precision than 8-bit. Useful for carrying precision for recurring neural nets. ghstack-source-id: 305459598 exported-using-ghexport Reviewed By: 3l1 Differential Revision: D79763381 --- backends/arm/quantizer/arm_quantizer.py | 80 +++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/backends/arm/quantizer/arm_quantizer.py b/backends/arm/quantizer/arm_quantizer.py index 9fa15568cc4..92ef5be5781 100644 --- a/backends/arm/quantizer/arm_quantizer.py +++ b/backends/arm/quantizer/arm_quantizer.py @@ -145,6 +145,86 @@ def get_symmetric_quantization_config( return quantization_config +@functools.lru_cache +def get_symmetric_a16w8_quantization_config( + is_per_channel: bool = True, + is_qat: bool = False, + is_dynamic: bool = False, + weight_qmin: int = -127, + weight_qmax: int = 127, +): + """ + 16A8W quantization config: 16-bit activations, 8-bit weights. + + This configuration provides better accuracy than 8A8W while maintaining + reasonable memory usage through 8-bit weights. + + Args: + is_per_channel: Whether to use per-channel quantization for weights + is_qat: Whether this is for Quantization Aware Training + is_dynamic: Whether to use dynamic quantization + weight_qmin: Minimum quantization value for weights + weight_qmax: Maximum quantization value for weights + + Returns: + QuantizationConfig with 16-bit activations and 8-bit weights + """ + extra_args: Dict[str, Any] = {"eps": 2**-12} + + # Setup observer/fake-quant for 16-bit activations + if is_qat: + if is_dynamic: + act_observer_or_fake_quant_ctr = FakeQuantize + dynamic_quant_observer = MovingAverageMinMaxObserver.with_args( + averaging_constant=1 + ) + extra_args["observer"] = dynamic_quant_observer + else: + act_observer_or_fake_quant_ctr = FusedMovingAvgObsFakeQuantize # type: ignore[assignment] + else: + if is_dynamic: + act_observer_or_fake_quant_ctr = PlaceholderObserver # type: ignore[assignment] + else: + # HistogramObserver works well for 16-bit range + act_observer_or_fake_quant_ctr = HistogramObserver # type: ignore[assignment] + + # 16-bit activation quantization spec + act_quantization_spec = QuantizationSpec( + dtype=torch.int16, + quant_min=torch.iinfo(torch.int16).min, # -32768 + quant_max=torch.iinfo(torch.int16).max, # 32767 + qscheme=torch.per_tensor_symmetric, + is_dynamic=is_dynamic, + observer_or_fake_quant_ctr=act_observer_or_fake_quant_ctr.with_args( + **extra_args, + ), + ) + + # Instead of reconstructing quantization_config, just clone and update as needed + # Clone the quantization_config from get_symmetric_quantization_config and update activation spec + base_config = get_symmetric_quantization_config( + is_per_channel=is_per_channel, + is_qat=is_qat, + is_dynamic=is_dynamic, + ) + # Replace activation quantization spec with 16-bit version + if is_dynamic: + quantization_config = QuantizationConfig( + act_quantization_spec, # 16-bit input activations + None, + base_config.weight, # 8-bit weights from base config + None, + ) + else: + quantization_config = QuantizationConfig( + act_quantization_spec, # 16-bit input activations + act_quantization_spec, # 16-bit output activations + base_config.weight, # 8-bit weights from base config + None, + ) + return quantization_config + + NodeFilterType = Callable[[Node], bool] """Type for a Node Filter used by annotators. A Node filter is a function that takes a Node and returns whether the node should be annotated or not. From acf553fda3ee5b1ca088884ac1c86568feff05d0 Mon Sep 17 00:00:00 2001 From: Nitin Jain Date: Mon, 25 Aug 2025 13:03:46 -0700 Subject: [PATCH 2/2] Add 16A8W linear ops support and test (#13448) Summary: - Adds linear ops test using the 16A8W config in INT16 profile. - Adds support in view ops validation for INT16 Dtype. - Validated with TOSA pipeline test. - Checked earlier marked flaky tests no longer flaky and remove markers. Note: Not verified with tosa reference model run. Reviewed By: digantdesai Differential Revision: D80308822 --- backends/arm/operators/op_transpose.py | 2 +- backends/arm/operators/op_view.py | 8 ++- backends/arm/test/ops/test_linear.py | 71 ++++++++++++++++++++++++-- 3 files changed, 76 insertions(+), 5 deletions(-) diff --git a/backends/arm/operators/op_transpose.py b/backends/arm/operators/op_transpose.py index 91614874d23..8a21e79dd74 100644 --- a/backends/arm/operators/op_transpose.py +++ b/backends/arm/operators/op_transpose.py @@ -47,7 +47,7 @@ def define_node( validate_valid_dtype( self.target, [inputs[0], output], - [ts.DType.INT8, ts.DType.INT32, ts.DType.FP32], + [ts.DType.INT8, ts.DType.INT16, ts.DType.INT32, ts.DType.FP32], output.tosa_spec, ) diff --git a/backends/arm/operators/op_view.py b/backends/arm/operators/op_view.py index 1e8c06b691f..7a81bbcb454 100644 --- a/backends/arm/operators/op_view.py +++ b/backends/arm/operators/op_view.py @@ -44,7 +44,13 @@ def define_node( validate_valid_dtype( self.target, [inputs[0], output], - [ts.DType.INT8, ts.DType.INT32, ts.DType.FP32, ts.DType.BOOL], + [ + ts.DType.INT8, + ts.DType.INT16, + ts.DType.INT32, + ts.DType.FP32, + ts.DType.BOOL, + ], output.tosa_spec, ) diff --git a/backends/arm/test/ops/test_linear.py b/backends/arm/test/ops/test_linear.py index 57ce490dae8..3e6bc460012 100644 --- a/backends/arm/test/ops/test_linear.py +++ b/backends/arm/test/ops/test_linear.py @@ -9,9 +9,12 @@ from typing import Tuple import pytest - import torch -from executorch.backends.arm.test import common +from executorch.backends.arm.quantizer.arm_quantizer import ( + get_symmetric_a16w8_quantization_config, + TOSAQuantizer, +) +from executorch.backends.arm.test import common, conftest from executorch.backends.arm.test.tester.test_pipeline import ( EthosU55PipelineINT, @@ -20,6 +23,8 @@ TosaPipelineINT, VgfPipeline, ) +from executorch.backends.arm.tosa_specification import TosaSpecification +from executorch.backends.xnnpack.test.tester import Quantize aten_op = "torch.ops.aten.linear.default" @@ -143,7 +148,6 @@ def test_linear_tosa_FP(test_data: torch.Tensor): pipeline.run() -@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness. @common.parametrize("test_data", test_data_rank1_INT | test_data_rank4_INT) def test_linear_tosa_INT(test_data: torch.Tensor): test_data, out_features, has_bias, per_channel_quantization = test_data() @@ -258,3 +262,64 @@ def test_linear_vgf_INT(test_data: torch.Tensor): per_channel_quantization=per_channel_quantization, ) pipeline.run() + + +def get_symmetric_a16w8_linear_quantizer( + u55_config=False, per_channel_quantization=False +): + tosa_version = conftest.get_option("tosa_version") + tosa_profiles = { + "1.0": TosaSpecification.create_from_string("TOSA-1.0+INT+int16"), + } + + quantizer = TOSAQuantizer(tosa_profiles[tosa_version]) + quantizer.set_global( + get_symmetric_a16w8_quantization_config(is_per_channel=per_channel_quantization) + ) + quantizer.set_module_type( + torch.nn.Linear, + get_symmetric_a16w8_quantization_config( + is_per_channel=per_channel_quantization + ), + ) + + return Quantize( + quantizer, + get_symmetric_a16w8_quantization_config( + is_per_channel=per_channel_quantization + ), + ) + + +@common.parametrize("test_data", test_data_rank1_INT | test_data_rank4_INT) +@pytest.mark.xfail( + reason="missing int16 linear ops support; fails at TOSA reference model run with Invalid TOSA graph" +) +def test_linear_16a8w_tosa_INT(test_data: torch.Tensor): + """Test linear operation with 16A8W quantization (16-bit activations, 8-bit weights)""" + test_data, out_features, has_bias, per_channel_quantization = test_data() + in_features = test_data.shape[-1] + + # Create pipeline with custom 16A8W quantization config + pipeline = TosaPipelineINT[input_t1]( + Linear( + in_features=in_features, + out_features=out_features, + bias=has_bias, + ), + (test_data,), + aten_op, + exir_op=[], + per_channel_quantization=per_channel_quantization, + use_to_edge_transform_and_lower=True, + tosa_extensions=["int16"], + ) + + pipeline.change_args( + "quantize", + get_symmetric_a16w8_linear_quantizer( + per_channel_quantization=per_channel_quantization + ), + ) + # Run the pipeline + pipeline.run()