From eaf9d8d8d38e1301f2f047b13e1c003c0ae79d80 Mon Sep 17 00:00:00 2001 From: Benjamin Bossan Date: Tue, 12 Aug 2025 11:32:17 +0200 Subject: [PATCH 1/6] FIX DynamicCache max_cache_len attribute error Resolves current CI errors with prefix tuning. Due to some recent changes in transformers (surfaced by https://github.com/huggingface/transformers/pull/39797), checking hasattr(cache, max_cache_len) results in an error: >>> cache = DynamicCache() >>> hasattr(cache, "max_cache_len") Traceback (most recent call last): File "/home/name/work/forks/transformers/foo.py", line 9, in hasattr(cache, "max_cache_len") ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/name/work/forks/transformers/src/transformers/cache_utils.py", line 916, in max_cache_len return max(values) ^^^^^^^^^^^ ValueError: max() iterable argument is empty This has been reported and will be fixed in transformers. On the PEFT side, it is safeest check the cache type and avoid accessing this attribute in the first place, which is what this PR does. Morever, that PR also changed the argument order to initialize HybridCache (will probably also be reverted in transformers), which is also taken into account in this PR by only using keyword arguments. --- src/peft/peft_model.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/peft/peft_model.py b/src/peft/peft_model.py index 032465eb81..4603c98cec 100644 --- a/src/peft/peft_model.py +++ b/src/peft/peft_model.py @@ -750,16 +750,16 @@ def get_prompt( past_key_values = post_process_fn(past_key_values) elif ("gemma2" in model_type) or ("gemma3_text" in model_type): # Gemma2 and Gemma3 only support HybridCache (which does not have the from_legacy_cache method) - if max_cache_len is None: + if (max_cache_len is None) or (max_cache_len == -1): raise ValueError( - "max_cache_len is None but it should have been passed. Something went wrong, please open an " + "max_cache_len is missing but it should have been passed. Something went wrong, please open an " "issue on GitHub with a reproducer: https://github.com/huggingface/peft/issues" ) base_config = base_model.config if hasattr(base_config, "get_text_config"): base_config = base_config.get_text_config() new_cache = HybridCache( - base_config, + config=base_config, max_batch_size=batch_size, max_cache_len=max_cache_len, dtype=past_key_values[0].dtype, @@ -2068,15 +2068,18 @@ def prepare_inputs_for_generation(self, *args, task_ids: Optional[torch.Tensor] ) kwargs["token_type_ids"] = None + cache: transformers.Cache | None = model_kwargs.get("past_key_values", None) # no past_key_values or past_key_values empty cache - requires_prompt_injection = (model_kwargs.get("past_key_values", None) is None) or ( - isinstance(model_kwargs["past_key_values"], transformers.Cache) - and not model_kwargs["past_key_values"].get_seq_length() + requires_prompt_injection = (cache is None) or ( + isinstance(cache, transformers.Cache) and not cache.get_seq_length() ) if requires_prompt_injection and peft_config.peft_type == PeftType.PREFIX_TUNING: - # some archs require max_cache_len to re-initialize the cache - max_cache_len = getattr(model_kwargs.get("past_key_values", None), "max_cache_len", None) + # some archs require max_cache_len to re-initialize the cache, but DynamicCache has no max len + if isinstance(cache, transformers.Cache) and not isinstance(cache, transformers.DynamicCache): + max_cache_len = cache.max_cache_len + else: + max_cache_len = -1 # -1 means no max length new_past_key_values = self.get_prompt( batch_size=model_kwargs["input_ids"].shape[0], max_cache_len=max_cache_len, From dc50a16faef4995e6dfa7de87aca795f40e782fa Mon Sep 17 00:00:00 2001 From: Benjamin Bossan Date: Mon, 18 Aug 2025 17:37:19 +0200 Subject: [PATCH 2/6] Fix for Gemma using DynamicCache now --- src/peft/peft_model.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/src/peft/peft_model.py b/src/peft/peft_model.py index 4603c98cec..b92fbf7d52 100644 --- a/src/peft/peft_model.py +++ b/src/peft/peft_model.py @@ -749,8 +749,12 @@ def get_prompt( post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] past_key_values = post_process_fn(past_key_values) elif ("gemma2" in model_type) or ("gemma3_text" in model_type): + # TODO: remove this logic once transformers < 4.56 is dropped + transformers_le_4_55 = ( + packaging.version.parse(transformers.__version__) <= packaging.version.parse("4.55.2") + ) # Gemma2 and Gemma3 only support HybridCache (which does not have the from_legacy_cache method) - if (max_cache_len is None) or (max_cache_len == -1): + if transformers_le_4_55 and ((max_cache_len is None) or (max_cache_len == -1)): raise ValueError( "max_cache_len is missing but it should have been passed. Something went wrong, please open an " "issue on GitHub with a reproducer: https://github.com/huggingface/peft/issues" @@ -758,13 +762,17 @@ def get_prompt( base_config = base_model.config if hasattr(base_config, "get_text_config"): base_config = base_config.get_text_config() - new_cache = HybridCache( - config=base_config, - max_batch_size=batch_size, - max_cache_len=max_cache_len, - dtype=past_key_values[0].dtype, - device=past_key_values[0].device, - ) + if transformers_le_4_55: + new_cache = HybridCache( + config=base_config, + max_batch_size=batch_size, + max_cache_len=max_cache_len, + dtype=past_key_values[0].dtype, + device=past_key_values[0].device, + ) + else: + # transformers 4.56+ uses DynamicCache for gemma + new_cache = DynamicCache(config=base_config) cache_position = torch.arange(peft_config.num_virtual_tokens, device=past_key_values[0].device) for layer_idx in range(peft_config.num_layers): key_states, value_states = past_key_values[0][layer_idx], past_key_values[1][layer_idx] From 13adedfe59f35dc348f5a1c0c8e273e8af35dfd0 Mon Sep 17 00:00:00 2001 From: Benjamin Bossan Date: Mon, 18 Aug 2025 17:39:05 +0200 Subject: [PATCH 3/6] format --- src/peft/peft_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/peft/peft_model.py b/src/peft/peft_model.py index b92fbf7d52..4f5957152c 100644 --- a/src/peft/peft_model.py +++ b/src/peft/peft_model.py @@ -750,8 +750,8 @@ def get_prompt( past_key_values = post_process_fn(past_key_values) elif ("gemma2" in model_type) or ("gemma3_text" in model_type): # TODO: remove this logic once transformers < 4.56 is dropped - transformers_le_4_55 = ( - packaging.version.parse(transformers.__version__) <= packaging.version.parse("4.55.2") + transformers_le_4_55 = packaging.version.parse(transformers.__version__) <= packaging.version.parse( + "4.55.2" ) # Gemma2 and Gemma3 only support HybridCache (which does not have the from_legacy_cache method) if transformers_le_4_55 and ((max_cache_len is None) or (max_cache_len == -1)): From 072c4911e09927cefe21e0715e1b41aa25152ac0 Mon Sep 17 00:00:00 2001 From: Benjamin Bossan Date: Thu, 21 Aug 2025 10:55:21 +0200 Subject: [PATCH 4/6] Change version comparison to < 4.56.0.dev0 --- src/peft/peft_model.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/peft/peft_model.py b/src/peft/peft_model.py index 4f5957152c..8e864ee9bc 100644 --- a/src/peft/peft_model.py +++ b/src/peft/peft_model.py @@ -750,11 +750,11 @@ def get_prompt( past_key_values = post_process_fn(past_key_values) elif ("gemma2" in model_type) or ("gemma3_text" in model_type): # TODO: remove this logic once transformers < 4.56 is dropped - transformers_le_4_55 = packaging.version.parse(transformers.__version__) <= packaging.version.parse( - "4.55.2" + transformers_lt_4_56 = packaging.version.parse(transformers.__version__) <= packaging.version.parse( + "4.56.0.dev0" ) # Gemma2 and Gemma3 only support HybridCache (which does not have the from_legacy_cache method) - if transformers_le_4_55 and ((max_cache_len is None) or (max_cache_len == -1)): + if transformers_lt_4_56 and ((max_cache_len is None) or (max_cache_len == -1)): raise ValueError( "max_cache_len is missing but it should have been passed. Something went wrong, please open an " "issue on GitHub with a reproducer: https://github.com/huggingface/peft/issues" @@ -762,7 +762,7 @@ def get_prompt( base_config = base_model.config if hasattr(base_config, "get_text_config"): base_config = base_config.get_text_config() - if transformers_le_4_55: + if transformers_lt_4_56: new_cache = HybridCache( config=base_config, max_batch_size=batch_size, From 5a795d5787c62ebda4f7a72238df5d01d2b28906 Mon Sep 17 00:00:00 2001 From: Benjamin Bossan Date: Thu, 21 Aug 2025 12:28:57 +0200 Subject: [PATCH 5/6] Make HybridCache import local It will be deprecated and later removed, so move the import inside a version guard. --- src/peft/peft_model.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/peft/peft_model.py b/src/peft/peft_model.py index 8e864ee9bc..ef9ddd1655 100644 --- a/src/peft/peft_model.py +++ b/src/peft/peft_model.py @@ -34,7 +34,7 @@ from safetensors import safe_open from safetensors.torch import save_file as safe_save_file from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss -from transformers import Cache, DynamicCache, EncoderDecoderCache, HybridCache, PreTrainedModel +from transformers import Cache, DynamicCache, EncoderDecoderCache, PreTrainedModel from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin @@ -763,6 +763,10 @@ def get_prompt( if hasattr(base_config, "get_text_config"): base_config = base_config.get_text_config() if transformers_lt_4_56: + # HybridCache is deprecated, and will be removed in 4.60.0 + # see https://github.com/huggingface/transformers/pull/40276 + from transformers import HybridCache + new_cache = HybridCache( config=base_config, max_batch_size=batch_size, From 15ea916620a07a2a78d54002284caae08e5b7744 Mon Sep 17 00:00:00 2001 From: Benjamin Bossan Date: Thu, 21 Aug 2025 14:17:09 +0200 Subject: [PATCH 6/6] Fix comparison from <= to < --- src/peft/peft_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/peft/peft_model.py b/src/peft/peft_model.py index ef9ddd1655..075a11fc95 100644 --- a/src/peft/peft_model.py +++ b/src/peft/peft_model.py @@ -750,7 +750,7 @@ def get_prompt( past_key_values = post_process_fn(past_key_values) elif ("gemma2" in model_type) or ("gemma3_text" in model_type): # TODO: remove this logic once transformers < 4.56 is dropped - transformers_lt_4_56 = packaging.version.parse(transformers.__version__) <= packaging.version.parse( + transformers_lt_4_56 = packaging.version.parse(transformers.__version__) < packaging.version.parse( "4.56.0.dev0" ) # Gemma2 and Gemma3 only support HybridCache (which does not have the from_legacy_cache method)