We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent eb00c25 commit cb7a379Copy full SHA for cb7a379
src/transformers/integrations/executorch.py
@@ -662,7 +662,7 @@ def __init__(
662
raise AssertionError("Model must have caching enabled.")
663
664
# Initialize the cache
665
- self.cache = StaticCache(config=config, max_cache_len=generation_config.cache_config.get("max_cache_len"))
+ self.cache = StaticCache(config=config, max_cache_len=max_cache_length)
666
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
667
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
668
dtype = self.model.dtype
0 commit comments