diff --git a/src/transformers/models/code_llama/tokenization_code_llama_fast.py b/src/transformers/models/code_llama/tokenization_code_llama_fast.py index 84fbddeecc2..b815a6d3e92 100644 --- a/src/transformers/models/code_llama/tokenization_code_llama_fast.py +++ b/src/transformers/models/code_llama/tokenization_code_llama_fast.py @@ -151,7 +151,6 @@ def __init__( self.update_post_processor() self.vocab_file = vocab_file - self.can_save_slow_tokenizer = False if not self.vocab_file else True self._prefix_token = prefix_token self._middle_token = middle_token @@ -159,6 +158,10 @@ def __init__( self._eot_token = eot_token self.fill_token = fill_token + @property + def can_save_slow_tokenizer(self) -> bool: + return os.path.isfile(self.vocab_file) if self.vocab_file else False + def update_post_processor(self): """ Updates the underlying post processor with the current `bos_token` and `eos_token`.