Skip to content

Commit

Permalink
Skip instead of quietly succeeding tests that are not applicable
Browse files Browse the repository at this point in the history
  • Loading branch information
akx committed Feb 15, 2024
1 parent b160162 commit 287df6b
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 18 deletions.
2 changes: 1 addition & 1 deletion tests/test_decoder_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls
def test_prompt_tuning_text_prepare_for_training(self, test_name, model_id, config_cls, config_kwargs):
# Test that prompt tuning works with text init
if config_cls != PromptTuningConfig:
return
return pytest.skip(f"This test does not apply to {config_cls}")

config_kwargs = config_kwargs.copy()
config_kwargs["prompt_tuning_init"] = PromptTuningInit.TEXT
Expand Down
36 changes: 19 additions & 17 deletions tests/testing_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ def _test_save_pretrained(self, model_id, config_cls, config_kwargs, safe_serial
def _test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs, safe_serialization=True):
if issubclass(config_cls, AdaLoraConfig):
# AdaLora does not support adding more than 1 adapter
return
return pytest.skip(f"Test not applicable for {config_cls}")

# ensure that the weights are randomly initialized
if issubclass(config_cls, LoraConfig):
Expand Down Expand Up @@ -433,7 +433,7 @@ def _test_from_pretrained_config_construction(self, model_id, config_cls, config
def _test_merge_layers_fp16(self, model_id, config_cls, config_kwargs):
if config_cls not in (LoraConfig, IA3Config):
# Merge layers only supported for LoRA and IA³
return
return pytest.skip(f"Test not applicable for {config_cls}")

if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig):
self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)")
Expand Down Expand Up @@ -506,7 +506,8 @@ def _test_merge_layers_nan(self, model_id, config_cls, config_kwargs):

def _test_merge_layers(self, model_id, config_cls, config_kwargs):
if issubclass(config_cls, PromptLearningConfig):
return
return pytest.skip(f"Test not applicable for {config_cls}")

if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig):
self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)")

Expand Down Expand Up @@ -681,7 +682,7 @@ def _test_generate_pos_args(self, model_id, config_cls, config_kwargs, raises_er

def _test_generate_half_prec(self, model_id, config_cls, config_kwargs):
if config_cls not in (IA3Config, LoraConfig, PrefixTuningConfig):
return
return pytest.skip(f"Test not applicable for {config_cls}")

if self.torch_device == "mps": # BFloat16 is not supported on MPS
return pytest.skip("BFloat16 is not supported on MPS")
Expand All @@ -702,7 +703,7 @@ def _test_generate_half_prec(self, model_id, config_cls, config_kwargs):

def _test_prefix_tuning_half_prec_conversion(self, model_id, config_cls, config_kwargs):
if config_cls not in (PrefixTuningConfig,):
return
return pytest.skip(f"Test not applicable for {config_cls}")

config = config_cls(
base_model_name_or_path=model_id,
Expand All @@ -717,7 +718,7 @@ def _test_prefix_tuning_half_prec_conversion(self, model_id, config_cls, config_

def _test_training(self, model_id, config_cls, config_kwargs):
if issubclass(config_cls, PromptLearningConfig):
return
return pytest.skip(f"Test not applicable for {config_cls}")
if (config_cls == AdaLoraConfig) and ("roberta" in model_id.lower()):
# TODO: no gradients on the "dense" layer, other layers work, not sure why
self.skipTest("AdaLora with RoBERTa does not work correctly")
Expand Down Expand Up @@ -783,7 +784,7 @@ def _test_inference_safetensors(self, model_id, config_cls, config_kwargs):

def _test_training_layer_indexing(self, model_id, config_cls, config_kwargs):
if config_cls not in (LoraConfig,):
return
return pytest.skip(f"Test not applicable for {config_cls}")

config = config_cls(
base_model_name_or_path=model_id,
Expand Down Expand Up @@ -837,15 +838,16 @@ def _test_training_layer_indexing(self, model_id, config_cls, config_kwargs):

def _test_training_gradient_checkpointing(self, model_id, config_cls, config_kwargs):
if issubclass(config_cls, PromptLearningConfig):
return
return pytest.skip(f"Test not applicable for {config_cls}")

if (config_cls == AdaLoraConfig) and ("roberta" in model_id.lower()):
# TODO: no gradients on the "dense" layer, other layers work, not sure why
self.skipTest("AdaLora with RoBERTa does not work correctly")

model = self.transformers_class.from_pretrained(model_id)

if not getattr(model, "supports_gradient_checkpointing", False):
return
return pytest.skip(f"Model {model_id} does not support gradient checkpointing")

model.gradient_checkpointing_enable()

Expand All @@ -872,7 +874,7 @@ def _test_training_gradient_checkpointing(self, model_id, config_cls, config_kwa

def _test_peft_model_device_map(self, model_id, config_cls, config_kwargs):
if config_cls not in (LoraConfig,):
return
return pytest.skip(f"Test not applicable for {config_cls}")

config = config_cls(
base_model_name_or_path=model_id,
Expand All @@ -894,7 +896,7 @@ def _test_peft_model_device_map(self, model_id, config_cls, config_kwargs):

def _test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs):
if not issubclass(config_cls, PromptLearningConfig):
return
return pytest.skip(f"Test not applicable for {config_cls}")

model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
Expand Down Expand Up @@ -924,7 +926,7 @@ def _test_delete_adapter(self, model_id, config_cls, config_kwargs):
**config_kwargs,
)
if config.peft_type not in supported_peft_types:
return
return pytest.skip(f"Test not applicable for {config.peft_type}")

model = self.transformers_class.from_pretrained(model_id)
adapter_to_delete = "delete_me"
Expand Down Expand Up @@ -962,7 +964,7 @@ def _test_delete_inactive_adapter(self, model_id, config_cls, config_kwargs):
**config_kwargs,
)
if config.peft_type not in supported_peft_types:
return
return pytest.skip(f"Test not applicable for {config.peft_type}")

model = self.transformers_class.from_pretrained(model_id)
adapter_to_delete = "delete_me"
Expand Down Expand Up @@ -1019,16 +1021,16 @@ def _test_unload_adapter(self, model_id, config_cls, config_kwargs):
def _test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs):
if issubclass(config_cls, AdaLoraConfig):
# AdaLora does not support adding more than 1 adapter
return
return pytest.skip(f"Test not applicable for {config_cls}")

adapter_list = ["adapter1", "adapter_2", "adapter_3"]
weight_list = [0.5, 1.5, 1.5]
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
if not isinstance(config, (LoraConfig)):
return
if not isinstance(config, LoraConfig):
return pytest.skip(f"Test not applicable for {config}")

model = self.transformers_class.from_pretrained(model_id)
model = get_peft_model(model, config, adapter_list[0])
Expand Down Expand Up @@ -1286,7 +1288,7 @@ def _test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls,
# When trying to add multiple adapters with bias in Lora or AdaLora, an error should be
# raised. Also, the peft model should not be left in a half-initialized state.
if not issubclass(config_cls, (LoraConfig, AdaLoraConfig)):
return
return pytest.skip(f"Test not applicable for {config_cls}")

config_kwargs = config_kwargs.copy()
config_kwargs["bias"] = "all"
Expand Down

0 comments on commit 287df6b

Please sign in to comment.