Skip to content

Commit

Permalink
revert
Browse files Browse the repository at this point in the history
  • Loading branch information
Ita Zaporozhets authored and Ita Zaporozhets committed May 14, 2024
1 parent 7a095f2 commit 336b263
Showing 1 changed file with 0 additions and 12 deletions.
12 changes: 0 additions & 12 deletions src/transformers/tokenization_utils_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2860,7 +2860,6 @@ def __call__(
"return_special_tokens_mask": return_special_tokens_mask,
"return_offsets_mapping": return_offsets_mapping,
"return_length": return_length,
"split_special_tokens": kwargs.pop("split_special_tokens", self.split_special_tokens),
"verbose": verbose,
}
all_kwargs.update(kwargs)
Expand Down Expand Up @@ -2905,7 +2904,6 @@ def _call_one(
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
split_special_tokens: bool = False,
**kwargs,
) -> BatchEncoding:
# Input type checking for clearer error
Expand Down Expand Up @@ -2975,7 +2973,6 @@ def _is_valid_text_input(t):
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
split_special_tokens=split_special_tokens,
**kwargs,
)
else:
Expand All @@ -2997,7 +2994,6 @@ def _is_valid_text_input(t):
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
split_special_tokens=split_special_tokens,
**kwargs,
)

Expand Down Expand Up @@ -3071,7 +3067,6 @@ def encode_plus(
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
split_special_tokens=kwargs.pop("split_special_tokens", self.split_special_tokens),
**kwargs,
)

Expand All @@ -3094,7 +3089,6 @@ def _encode_plus(
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
split_special_tokens: bool = False,
**kwargs,
) -> BatchEncoding:
raise NotImplementedError
Expand Down Expand Up @@ -3125,7 +3119,6 @@ def batch_encode_plus(
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
split_special_tokens: bool = False,
**kwargs,
) -> BatchEncoding:
"""
Expand Down Expand Up @@ -3171,7 +3164,6 @@ def batch_encode_plus(
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
split_special_tokens=split_special_tokens,
**kwargs,
)

Expand Down Expand Up @@ -3200,7 +3192,6 @@ def _batch_encode_plus(
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
split_special_tokens: bool = False,
**kwargs,
) -> BatchEncoding:
raise NotImplementedError
Expand Down Expand Up @@ -3278,9 +3269,6 @@ def pad(
- `'np'`: Return Numpy `np.ndarray` objects.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
split_special_tokens ( `bool`, *optional*, default to `False`):
Whether or not the special tokens should be encoded. If `True`, they are encoded, and will
be split by the tokenizer. This should be activated for safe prompting.
"""
if self.__class__.__name__.endswith("Fast"):
if not self.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False):
Expand Down

0 comments on commit 336b263

Please sign in to comment.