Skip to content

Commit

Permalink
[CI] Add more ci tests (#223)
Browse files Browse the repository at this point in the history
* add more tests

* fix

* add generate tests

* make style

* fix test

* add -n

* skip llama
  • Loading branch information
younesbelkada authored Mar 29, 2023
1 parent d8d1007 commit df71b84
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 11 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@ style:
doc-builder style src tests --max_len 119

test:
pytest tests/
pytest -n 3 tests/
39 changes: 34 additions & 5 deletions tests/test_peft_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,14 @@


# This has to be in the order: model_id, lora_kwargs, prefix_tuning_kwargs, prompt_encoder_kwargs, prompt_tuning_kwargs
PEFT_MODELS_TO_TEST = [
("hf-internal-testing/tiny-random-OPTForCausalLM", {"target_modules": ["q_proj", "v_proj"]}, {}, {}, {}),
PEFT_DECODER_MODELS_TO_TEST = [
# ("HuggingFaceM4/tiny-random-LlamaForCausalLM", {}, {}, {}, {}), wait until the next `transformers` release
("hf-internal-testing/tiny-random-OPTForCausalLM", {}, {}, {}, {}),
("hf-internal-testing/tiny-random-GPTNeoXForCausalLM", {}, {}, {}, {}),
("hf-internal-testing/tiny-random-GPT2LMHeadModel", {}, {}, {}, {}),
("hf-internal-testing/tiny-random-BloomForCausalLM", {}, {}, {}, {}),
("hf-internal-testing/tiny-random-gpt_neo", {}, {}, {}, {}),
("hf-internal-testing/tiny-random-GPTJForCausalLM", {}, {}, {}, {}),
]


Expand All @@ -48,7 +54,7 @@ class PeftModelTester(unittest.TestCase, PeftTestMixin):
We use parametrized.expand for debugging purposes to test each model individually.
"""

@parameterized.expand(PeftTestConfigManager.get_grid_parameters(PEFT_MODELS_TO_TEST))
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(PEFT_DECODER_MODELS_TO_TEST))
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_model_attr(model_id, config_cls, config_kwargs)

Expand Down Expand Up @@ -105,7 +111,7 @@ def make_inputs_require_grad(module, input, output):

self.assertTrue(dummy_output.requires_grad)

@parameterized.expand(PeftTestConfigManager.get_grid_parameters(PEFT_MODELS_TO_TEST))
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(PEFT_DECODER_MODELS_TO_TEST))
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_prepare_for_training(model_id, config_cls, config_kwargs)

Expand Down Expand Up @@ -151,6 +157,29 @@ def _test_save_pretrained(self, model_id, config_cls, config_kwargs):
# check if `config.json` is not present
self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "config.json")))

@parameterized.expand(PeftTestConfigManager.get_grid_parameters(PEFT_MODELS_TO_TEST))
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(PEFT_DECODER_MODELS_TO_TEST))
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs)

def _test_generate(self, model_id, config_cls, config_kwargs):
model = AutoModelForCausalLM.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)

input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)

# check if `generate` works
_ = model.generate(input_ids=input_ids, attention_mask=attention_mask)

with self.assertRaises(TypeError):
# check if `generate` raises an error if no positional arguments are passed
_ = model.generate(input_ids, attention_mask=attention_mask)

@parameterized.expand(PeftTestConfigManager.get_grid_parameters(PEFT_DECODER_MODELS_TO_TEST))
def test_generate(self, test_name, model_id, config_cls, config_kwargs):
self._test_generate(model_id, config_cls, config_kwargs)
11 changes: 6 additions & 5 deletions tests/testing_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,23 +79,24 @@ def get_grid_parameters(self, model_list):
for model_tuple in model_list:
model_id, lora_kwargs, prefix_tuning_kwargs, prompt_encoder_kwargs, prompt_tuning_kwargs = model_tuple
for key, value in self.items():
peft_method = value[1].copy()
if key == "lora":
# update value[1] if necessary
if lora_kwargs is not None:
value[1].update(lora_kwargs)
peft_method.update(lora_kwargs)
elif key == "prefix_tuning":
# update value[1] if necessary
if prefix_tuning_kwargs is not None:
value[1].update(prefix_tuning_kwargs)
peft_method.update(prefix_tuning_kwargs)
elif key == "prompt_encoder":
# update value[1] if necessary
if prompt_encoder_kwargs is not None:
value[1].update(prompt_encoder_kwargs)
peft_method.update(prompt_encoder_kwargs)
else:
# update value[1] if necessary
if prompt_tuning_kwargs is not None:
value[1].update(prompt_tuning_kwargs)
grid_parameters.append((f"test_{model_id}_{key}", model_id, value[0], value[1]))
peft_method.update(prompt_tuning_kwargs)
grid_parameters.append((f"test_{model_id}_{key}", model_id, value[0], peft_method))

return grid_parameters

Expand Down

0 comments on commit df71b84

Please sign in to comment.