diff --git a/tests/models/albert/test_modeling_albert.py b/tests/models/albert/test_modeling_albert.py index 823315bc6785..684a7d62c8cd 100644 --- a/tests/models/albert/test_modeling_albert.py +++ b/tests/models/albert/test_modeling_albert.py @@ -326,6 +326,12 @@ def test_model_from_pretrained(self): model = AlbertModel.from_pretrained(model_name) self.assertIsNotNone(model) + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + @require_torch class AlbertModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/deberta/test_modeling_deberta.py b/tests/models/deberta/test_modeling_deberta.py index 52758e2222ae..8a5044719abd 100644 --- a/tests/models/deberta/test_modeling_deberta.py +++ b/tests/models/deberta/test_modeling_deberta.py @@ -278,6 +278,12 @@ def test_model_from_pretrained(self): model = DebertaModel.from_pretrained(model_name) self.assertIsNotNone(model) + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + @require_torch @require_sentencepiece diff --git a/tests/models/deberta_v2/test_modeling_deberta_v2.py b/tests/models/deberta_v2/test_modeling_deberta_v2.py index abfbe7402c93..151c2690b372 100644 --- a/tests/models/deberta_v2/test_modeling_deberta_v2.py +++ b/tests/models/deberta_v2/test_modeling_deberta_v2.py @@ -296,6 +296,12 @@ def test_model_from_pretrained(self): model = DebertaV2Model.from_pretrained(model_name) self.assertIsNotNone(model) + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + @require_torch @require_sentencepiece diff --git a/tests/models/encodec/test_modeling_encodec.py b/tests/models/encodec/test_modeling_encodec.py index 8f1b06da06c8..c8bd25ce28be 100644 --- a/tests/models/encodec/test_modeling_encodec.py +++ b/tests/models/encodec/test_modeling_encodec.py @@ -420,6 +420,12 @@ def test_identity_shortcut(self): config.use_conv_shortcut = False self.model_tester.create_and_check_model_forward(config, inputs_dict) + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - TypeError: _weight_norm_interface() missing 1 required positional argument: 'dim'" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + def normalize(arr): norm = np.linalg.norm(arr) diff --git a/tests/models/flava/test_modeling_flava.py b/tests/models/flava/test_modeling_flava.py index 48a070d9fe31..e0ff3bad3fdc 100644 --- a/tests/models/flava/test_modeling_flava.py +++ b/tests/models/flava/test_modeling_flava.py @@ -1256,6 +1256,12 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/fnet/test_modeling_fnet.py b/tests/models/fnet/test_modeling_fnet.py index 83b84edddccd..948e218b3139 100644 --- a/tests/models/fnet/test_modeling_fnet.py +++ b/tests/models/fnet/test_modeling_fnet.py @@ -468,6 +468,12 @@ def test_model_from_pretrained(self): model = FNetModel.from_pretrained(model_name) self.assertIsNotNone(model) + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + @require_torch class FNetModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/ibert/test_modeling_ibert.py b/tests/models/ibert/test_modeling_ibert.py index b552cb75a5a6..63847ac14510 100644 --- a/tests/models/ibert/test_modeling_ibert.py +++ b/tests/models/ibert/test_modeling_ibert.py @@ -380,8 +380,14 @@ def test_inputs_embeds(self): inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) - with torch.no_grad(): - model(**inputs)[0] + with torch.no_grad(): + model(**inputs)[0] + + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass @require_torch diff --git a/tests/models/lxmert/test_modeling_lxmert.py b/tests/models/lxmert/test_modeling_lxmert.py index 63d83de36b75..9ca12cd5386c 100644 --- a/tests/models/lxmert/test_modeling_lxmert.py +++ b/tests/models/lxmert/test_modeling_lxmert.py @@ -767,6 +767,12 @@ def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): return tf_inputs_dict + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + @require_torch class LxmertModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/mobilebert/test_modeling_mobilebert.py b/tests/models/mobilebert/test_modeling_mobilebert.py index e4ebca4b6e5b..2885c7cce293 100644 --- a/tests/models/mobilebert/test_modeling_mobilebert.py +++ b/tests/models/mobilebert/test_modeling_mobilebert.py @@ -341,6 +341,12 @@ def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*config_and_inputs) + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + def _long_tensor(tok_lst): return torch.tensor( diff --git a/tests/models/realm/test_modeling_realm.py b/tests/models/realm/test_modeling_realm.py index 4d6d9fd0ff11..2b3c23c599e7 100644 --- a/tests/models/realm/test_modeling_realm.py +++ b/tests/models/realm/test_modeling_realm.py @@ -441,6 +441,12 @@ def test_scorer_from_pretrained(self): model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer") self.assertIsNotNone(model) + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + @require_torch class RealmModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/roformer/test_modeling_roformer.py b/tests/models/roformer/test_modeling_roformer.py index 6c130ae1746c..6b61cd9280c9 100644 --- a/tests/models/roformer/test_modeling_roformer.py +++ b/tests/models/roformer/test_modeling_roformer.py @@ -504,6 +504,12 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + @require_torch class RoFormerModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/squeezebert/test_modeling_squeezebert.py b/tests/models/squeezebert/test_modeling_squeezebert.py index bf86792f57f1..4690af405f8f 100644 --- a/tests/models/squeezebert/test_modeling_squeezebert.py +++ b/tests/models/squeezebert/test_modeling_squeezebert.py @@ -281,6 +281,12 @@ def test_model_from_pretrained(self): model = SqueezeBertModel.from_pretrained(model_name) self.assertIsNotNone(model) + @unittest.skip( + reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!" + ) + def test_save_load_low_cpu_mem_usage(self): + pass + @require_sentencepiece @require_tokenizers diff --git a/tests/models/timm_backbone/test_modeling_timm_backbone.py b/tests/models/timm_backbone/test_modeling_timm_backbone.py index 60ab9e2a217e..c0e4e723df3c 100644 --- a/tests/models/timm_backbone/test_modeling_timm_backbone.py +++ b/tests/models/timm_backbone/test_modeling_timm_backbone.py @@ -273,3 +273,9 @@ def test_create_from_modified_config(self): model.to(torch_device) model.eval() result = model(**inputs_dict) + + @unittest.skip( + reason="Does not support low_cpu_mem_usage - weights are loaded from timm and not from transformers checkpoint." + ) + def test_save_load_low_cpu_mem_usage(self): + pass