Skip to content

Commit

Permalink
Add missing None check for hf_quantizer (#28804)
Browse files Browse the repository at this point in the history
* Add missing None check for hf_quantizer

* Add test, fix logic.

* make style

* Switch test model to Mistral

* Comment

* Update tests/test_modeling_utils.py

---------

Co-authored-by: Younes Belkada <49240599+younesbelkada@users.noreply.github.com>
  • Loading branch information
2 people authored and Ita Zaporozhets committed May 14, 2024
1 parent 0edb950 commit 2680509
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 4 deletions.
10 changes: 6 additions & 4 deletions src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3727,10 +3727,12 @@ def _fix_key(key):

if param.device == torch.device("meta"):
value = torch.empty(*param.size(), dtype=target_dtype)
if getattr(
hf_quantizer, "requires_parameters_quantization", False
) or not hf_quantizer.check_quantized_param(
model, param_value=value, param_name=key, state_dict={}
if (
hf_quantizer is None
or getattr(hf_quantizer, "requires_parameters_quantization", False)
or not hf_quantizer.check_quantized_param(
model, param_value=value, param_name=key, state_dict={}
)
):
set_module_tensor_to_device(model, key, "cpu", value)
else:
Expand Down
11 changes: 11 additions & 0 deletions tests/test_modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from transformers import (
AutoConfig,
AutoModel,
AutoModelForSequenceClassification,
OwlViTForObjectDetection,
PretrainedConfig,
is_torch_available,
Expand Down Expand Up @@ -201,6 +202,7 @@ def forward(self, mask, inputs_embeds):

TINY_T5 = "patrickvonplaten/t5-tiny-random"
TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification"
TINY_MISTRAL = "hf-internal-testing/tiny-random-MistralForCausalLM"


def check_models_equal(model1, model2):
Expand Down Expand Up @@ -300,6 +302,15 @@ def test_model_from_pretrained_with_different_pretrained_model_name(self):
BertModel.from_pretrained(TINY_T5)
self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out)

@require_accelerate
def test_model_from_pretrained_with_none_quantization_config(self):
# Needs a device_map for to enter the low_cpu_mem branch. We also load AutoModelForSequenceClassification
# deliberately to enter the missing keys branch.
model = AutoModelForSequenceClassification.from_pretrained(
TINY_MISTRAL, device_map="auto", quantization_config=None
)
self.assertIsNotNone(model)

def test_model_from_config_torch_dtype(self):
# test that the model can be instantiated with dtype of user's choice - as long as it's a
# float dtype. To make it happen config.torch_dtype needs to be set before instantiating the
Expand Down

0 comments on commit 2680509

Please sign in to comment.