Skip to content

Commit 6c3fed9

Browse files
committed
fix typo
Signed-off-by: yuwenzho <yuwen.zhou@intel.com>
1 parent c344d19 commit 6c3fed9

File tree

1 file changed

+5
-6
lines changed
  • neural_compressor/onnxrt/algorithms/layer_wise

1 file changed

+5
-6
lines changed

neural_compressor/onnxrt/algorithms/layer_wise/core.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,14 @@
1919
# limitations under the License.
2020

2121
import os
22+
import transformers
2223
from copy import deepcopy
2324
from pathlib import Path
2425
from typing import Callable, List, Union
26+
from packaging.version import Version
2527

2628
import onnx
2729
import onnxruntime as ort
28-
import transformers
29-
from packaging.version import Version
3030

3131
from neural_compressor.common import Logger
3232
from neural_compressor.onnxrt.quantization.calibrate import CalibrationDataReader
@@ -61,10 +61,9 @@ def layer_wise_quant(
6161
"""
6262
# TODO: remove the limitation for lwq
6363
if Version(transformers.__version__) > Version("4.37.2"):
64-
logger.warning(
65-
"Model (such as llama-2) exported with transformers {} may fail in layer-wise quant. "
66-
"we recommand downgrading transformers to 4.37.2 and try again.".format(transformers.__version__)
67-
)
64+
logger.warning("Model (such as llama-2) exported with transformers {} may fail in layer-wise quant. "
65+
"we recommend downgrading transformers to 4.37.2 and try again.".format(
66+
transformers.__version__))
6867

6968
# check whether model shape is inferred
7069
if not check_model_with_infer_shapes(model):

0 commit comments

Comments
 (0)