Skip to content

Commit 2c75f79

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 6c3fed9 commit 2c75f79

File tree

1 file changed

+6
-5
lines changed
  • neural_compressor/onnxrt/algorithms/layer_wise

1 file changed

+6
-5
lines changed

neural_compressor/onnxrt/algorithms/layer_wise/core.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,14 @@
1919
# limitations under the License.
2020

2121
import os
22-
import transformers
2322
from copy import deepcopy
2423
from pathlib import Path
2524
from typing import Callable, List, Union
26-
from packaging.version import Version
2725

2826
import onnx
2927
import onnxruntime as ort
28+
import transformers
29+
from packaging.version import Version
3030

3131
from neural_compressor.common import Logger
3232
from neural_compressor.onnxrt.quantization.calibrate import CalibrationDataReader
@@ -61,9 +61,10 @@ def layer_wise_quant(
6161
"""
6262
# TODO: remove the limitation for lwq
6363
if Version(transformers.__version__) > Version("4.37.2"):
64-
logger.warning("Model (such as llama-2) exported with transformers {} may fail in layer-wise quant. "
65-
"we recommend downgrading transformers to 4.37.2 and try again.".format(
66-
transformers.__version__))
64+
logger.warning(
65+
"Model (such as llama-2) exported with transformers {} may fail in layer-wise quant. "
66+
"we recommend downgrading transformers to 4.37.2 and try again.".format(transformers.__version__)
67+
)
6768

6869
# check whether model shape is inferred
6970
if not check_model_with_infer_shapes(model):

0 commit comments

Comments
 (0)