Skip to content

Commit cb7a379

Browse files
committed
Removed redundant .get
1 parent eb00c25 commit cb7a379

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

src/transformers/integrations/executorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -662,7 +662,7 @@ def __init__(
662662
raise AssertionError("Model must have caching enabled.")
663663

664664
# Initialize the cache
665-
self.cache = StaticCache(config=config, max_cache_len=generation_config.cache_config.get("max_cache_len"))
665+
self.cache = StaticCache(config=config, max_cache_len=max_cache_length)
666666
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
667667
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
668668
dtype = self.model.dtype

0 commit comments

Comments
 (0)