Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions examples/models/llama/source_transformation/quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,10 @@ def quantize( # noqa C901
# Check for required args
if group_size is None:
raise Exception("For 8da4w quantization, group size must be specified.")
from torchao.quantization.quant_api import Int8DynActInt4WeightQuantizer

model = Int8DynActInt4WeightQuantizer(
precision=torch_dtype, groupsize=group_size
).quantize(model)
from torchao.quantization import int8_dynamic_activation_int4_weight, quantize_

quantize_(model, int8_dynamic_activation_int4_weight(group_size=group_size))
Copy link
Contributor

@jerryzh168 jerryzh168 Feb 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is torch_dtype not applied here? should it be applied to model?

Copy link
Contributor Author

@jackzhxng jackzhxng Mar 3, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah I think it would good to ensure the dtype here by applying it to the model in general, but the model is already in fp32 for when the test passes before this PR and when it fails after this PR


if verbose:
print("quantized model:", model)
Expand Down Expand Up @@ -663,7 +662,7 @@ def convert_for_runtime(self) -> nn.Module:
def quantized_model(self) -> nn.Module:
model_updated_state_dict = self.create_quantized_state_dict(self.packed)
self.convert_for_runtime()
self.mod.load_state_dict(model_updated_state_dict)
self.mod.load_state_dict(model_updated_state_dict, assign=True)
return self.mod


Expand Down
Loading