We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 8525185 commit 0fc89a4Copy full SHA for 0fc89a4
torchao/prototype/parq/quant/config_torchao.py
@@ -193,8 +193,9 @@ def _attach_hf_quantization_config(
193
if not hasattr(module, "weight"):
194
continue
195
196
+ # Do not quantize pointers to tied weights or normalization layers
197
data_ptr = module.weight.data_ptr()
- if data_ptr in seen_data_ptrs: # do not re-quantize tied weight
198
+ if data_ptr in seen_data_ptrs or name.endswith("norm"):
199
modules_to_not_convert.append(name)
200
201
seen_data_ptrs.add(data_ptr)
0 commit comments