Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use logger.warning_once to avoid massive outputs #27428

Merged
merged 2 commits into from
Dec 11, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/transformers/models/big_bird/modeling_big_bird.py
Original file line number Diff line number Diff line change
Expand Up @@ -2227,7 +2227,7 @@ def _pad_to_block_size(

padding_len = (block_size - seq_len % block_size) % block_size
if padding_len > 0:
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.block_size`: {block_size}"
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2018,7 +2018,7 @@ def _pad_to_block_size(self, hidden_states: torch.Tensor, attention_mask: torch.

padding_len = (block_size - seq_len % block_size) % block_size
if padding_len > 0:
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.block_size`: {block_size}"
)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/led/modeling_led.py
Original file line number Diff line number Diff line change
Expand Up @@ -1706,7 +1706,7 @@ def _pad_to_window_size(

padding_len = (attention_window - seq_len % attention_window) % attention_window
if padding_len > 0:
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.attention_window`: {attention_window}"
)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/led/modeling_tf_led.py
Original file line number Diff line number Diff line change
Expand Up @@ -1859,7 +1859,7 @@ def _pad_to_window_size(
padding_len = (attention_window - seq_len % attention_window) % attention_window

if padding_len > 0:
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.attention_window`: {attention_window}"
)
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/longformer/modeling_longformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1598,7 +1598,7 @@ def _pad_to_window_size(

# this path should be recorded in the ONNX export, it is fine with padding_len == 0 as well
if padding_len > 0:
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.attention_window`: {attention_window}"
)
Expand Down Expand Up @@ -1917,7 +1917,7 @@ def forward(
return_dict = return_dict if return_dict is not None else self.config.use_return_dict

if global_attention_mask is None:
logger.info("Initializing global attention on CLS token...")
logger.warning_once("Initializing global attention on CLS token...")
global_attention_mask = torch.zeros_like(input_ids)
# global attention on cls token
global_attention_mask[:, 0] = 1
Expand Down Expand Up @@ -2270,7 +2270,7 @@ def forward(

# set global attention on question tokens
if global_attention_mask is None and input_ids is not None:
logger.info("Initializing global attention on multiple choice...")
logger.warning_once("Initializing global attention on multiple choice...")
# put global attention on all tokens after `config.sep_token_id`
global_attention_mask = torch.stack(
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2213,7 +2213,7 @@ def call(
)
global_attention_mask = tf.cast(tf.fill(shape_list(input_ids), value=0), tf.int64)
else:
logger.info("Initializing global attention on question tokens...")
logger.warning_once("Initializing global attention on question tokens...")
# put global attention on all tokens until `config.sep_token_id` is reached
sep_token_indices = tf.where(input_ids == self.config.sep_token_id)
sep_token_indices = tf.cast(sep_token_indices, dtype=tf.int64)
Expand Down Expand Up @@ -2341,7 +2341,7 @@ def call(
global_attention_mask = tf.cast(global_attention_mask, tf.int64)

if global_attention_mask is None and input_ids is not None:
logger.info("Initializing global attention on CLS token...")
logger.warning_once("Initializing global attention on CLS token...")
# global attention on cls token
global_attention_mask = tf.zeros_like(input_ids)
updates = tf.ones(shape_list(input_ids)[0], dtype=tf.int64)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/reformer/modeling_reformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2139,7 +2139,7 @@ def _pad_to_mult_of_chunk_length(
padded_seq_length=None,
device=None,
):
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {input_shape[-1]} to {input_shape[-1] + padding_length} to be a "
f"multiple of `config.chunk_length`: {padded_seq_length}"
)
Expand Down
Loading