Skip to content

Commit 1e6f647

Browse files
committed
Removed redundant .get
1 parent eb00c25 commit 1e6f647

File tree

1 file changed

+1
-2
lines changed

1 file changed

+1
-2
lines changed

src/transformers/integrations/executorch.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -517,7 +517,6 @@ def __init__(
517517
)
518518
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
519519
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
520-
device = device
521520
dtype = self.model.dtype
522521
# We need this call to initialize all the layers (otherwise it's done lazily, which is not exportable)
523522
self.static_cache.early_initialization(batch_size, num_heads, head_dim, dtype, device)
@@ -662,7 +661,7 @@ def __init__(
662661
raise AssertionError("Model must have caching enabled.")
663662

664663
# Initialize the cache
665-
self.cache = StaticCache(config=config, max_cache_len=generation_config.cache_config.get("max_cache_len"))
664+
self.cache = StaticCache(config=config, max_cache_len=max_cache_len)
666665
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
667666
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
668667
dtype = self.model.dtype

0 commit comments

Comments
 (0)