Skip to content

Commit 0fc89a4

Browse files
committed
Avoid normalization layers in HF's quantization_config
1 parent 8525185 commit 0fc89a4

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

torchao/prototype/parq/quant/config_torchao.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,8 +193,9 @@ def _attach_hf_quantization_config(
193193
if not hasattr(module, "weight"):
194194
continue
195195

196+
# Do not quantize pointers to tied weights or normalization layers
196197
data_ptr = module.weight.data_ptr()
197-
if data_ptr in seen_data_ptrs: # do not re-quantize tied weight
198+
if data_ptr in seen_data_ptrs or name.endswith("norm"):
198199
modules_to_not_convert.append(name)
199200
continue
200201
seen_data_ptrs.add(data_ptr)

0 commit comments

Comments
 (0)