From 0975dbc6dd8270363faad25f2d5b4e0ce8ba5e25 Mon Sep 17 00:00:00 2001 From: "Wang, Chang" Date: Wed, 10 Jan 2024 17:12:42 +0800 Subject: [PATCH 1/2] Update pytorch.py --- neural_compressor/utils/pytorch.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/neural_compressor/utils/pytorch.py b/neural_compressor/utils/pytorch.py index ad2d090b0a8..cee6af147c1 100644 --- a/neural_compressor/utils/pytorch.py +++ b/neural_compressor/utils/pytorch.py @@ -481,8 +481,10 @@ def recover_model_from_json(model, json_file_path, example_inputs): ipex = LazyImport("intel_extension_for_pytorch") from torch.ao.quantization.observer import MinMaxObserver - - qconfig = ipex.quantization.get_smooth_quant_qconfig_mapping(alpha=0.5, act_observer=MinMaxObserver()) + if ipex.__version__ >= "2.1.100": + qconfig = ipex.quantization.get_smooth_quant_qconfig_mapping(alpha=0.5, act_observer=MinMaxObserver) + else: + qconfig = ipex.quantization.get_smooth_quant_qconfig_mapping(alpha=0.5, act_observer=MinMaxObserver()) if isinstance(example_inputs, dict): model = ipex.quantization.prepare(model, qconfig, example_kwarg_inputs=example_inputs, inplace=True) else: From 28cfcfe7eb055f46aa16380285581df805d24b31 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 10 Jan 2024 09:15:27 +0000 Subject: [PATCH 2/2] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- neural_compressor/utils/pytorch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/neural_compressor/utils/pytorch.py b/neural_compressor/utils/pytorch.py index cee6af147c1..dfa9433fe8d 100644 --- a/neural_compressor/utils/pytorch.py +++ b/neural_compressor/utils/pytorch.py @@ -481,6 +481,7 @@ def recover_model_from_json(model, json_file_path, example_inputs): ipex = LazyImport("intel_extension_for_pytorch") from torch.ao.quantization.observer import MinMaxObserver + if ipex.__version__ >= "2.1.100": qconfig = ipex.quantization.get_smooth_quant_qconfig_mapping(alpha=0.5, act_observer=MinMaxObserver) else: