Skip to content

Commit

Permalink
remove defaults to None if optional (#11703)
Browse files Browse the repository at this point in the history
  • Loading branch information
PhilipMay authored May 12, 2021
1 parent 6797cdc commit 77f4c46
Show file tree
Hide file tree
Showing 11 changed files with 18 additions and 18 deletions.
4 changes: 2 additions & 2 deletions examples/research_projects/wav2vec2/run_asr.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,15 +144,15 @@ class Orthography:
Args:
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to accept lowercase input and lowercase the output when decoding.
vocab_file (:obj:`str`, `optional`, defaults to :obj:`None`):
vocab_file (:obj:`str`, `optional`):
File containing the vocabulary.
word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`"|"`):
The token used for delimiting words; it needs to be in the vocabulary.
translation_table (:obj:`Dict[str, str]`, `optional`, defaults to :obj:`{}`):
Table to use with `str.translate()` when preprocessing text (e.g., "-" -> " ").
words_to_remove (:obj:`Set[str]`, `optional`, defaults to :obj:`set()`):
Words to remove when preprocessing text (e.g., "sil").
untransliterator (:obj:`Callable[[str], str]`, `optional`, defaults to :obj:`None`):
untransliterator (:obj:`Callable[[str], str]`, `optional`):
Function that untransliterates text back into native writing system.
"""

Expand Down
2 changes: 1 addition & 1 deletion src/transformers/debug_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ class DebugUnderflowOverflow:
How many frames back to record
trace_batch_nums(:obj:`List[int]`, `optional`, defaults to ``[]``):
Which batch numbers to trace (turns detection off)
abort_after_batch_num (:obj:`int`, `optional`, defaults to :obj:`None`):
abort_after_batch_num (:obj:`int`, `optional`):
Whether to abort after a certain batch number has finished
"""
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/modeling_tf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1128,7 +1128,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
mirror(:obj:`str`, `optional`, defaults to :obj:`None`):
mirror(:obj:`str`, `optional`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -975,7 +975,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
mirror(:obj:`str`, `optional`, defaults to :obj:`None`):
mirror(:obj:`str`, `optional`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/albert/tokenization_albert_fast.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def build_inputs_with_special_tokens(
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
Expand Down Expand Up @@ -201,7 +201,7 @@ def create_token_type_ids_from_sequences(
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def build_inputs_with_special_tokens(
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
Expand All @@ -174,7 +174,7 @@ def get_special_tokens_mask(
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True if the token list is already formatted with special tokens for the model
Expand Down Expand Up @@ -212,7 +212,7 @@ def create_token_type_ids_from_sequences(
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/ibert/quant_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ class QuantAct(nn.Module):
Momentum for updating the activation quantization range.
per_channel (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to or not use channel-wise quantization.
channel_len (:obj:`int`, `optional`, defaults to :obj:`None`):
channel_len (:obj:`int`, `optional`):
Specify the channel length when set the `per_channel` True.
quant_mode (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the layer is quantized.
Expand Down Expand Up @@ -755,9 +755,9 @@ class FixedPointMul(Function):
Quantization bitwidth.
z_scaling_factor (:obj:`torch.Tensor`):
Scaling factor of the output tensor.
identity (:obj:`torch.Tensor`, `optional`, defaults to :obj:`None`):
identity (:obj:`torch.Tensor`, `optional`):
Identity tensor, if exists.
identity_scaling_factor (:obj:`torch.Tensor`, `optional`, defaults to :obj:`None`):
identity_scaling_factor (:obj:`torch.Tensor`, `optional`):
Scaling factor of the identity tensor `identity`, if exists.
Returns:
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/mpnet/modeling_mpnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,7 @@ def forward(self, hidden_states):
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/mpnet/tokenization_mpnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ def build_inputs_with_special_tokens(
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def build_inputs_with_special_tokens(
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/pipelines/text2text_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,10 +295,10 @@ def __call__(
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
src_lang (:obj:`str`, `optional`, defaults to :obj:`None`):
src_lang (:obj:`str`, `optional`):
The language of the input. Might be required for multilingual models. Will not have any effect for
single pair translation models
tgt_lang (:obj:`str`, `optional`, defaults to :obj:`None`):
tgt_lang (:obj:`str`, `optional`):
The language of the desired output. Might be required for multilingual models. Will not have any effect
for single pair translation models
generate_kwargs:
Expand Down

0 comments on commit 77f4c46

Please sign in to comment.