Skip to content

Commit

Permalink
use logger.warning_once to avoid massive outputs (huggingface#27428)
Browse files Browse the repository at this point in the history
* use logger.warning_once to avoid massive outputs when training/finetuning longformer

* update more
  • Loading branch information
ranchlai authored and iantbutler01 committed Dec 16, 2023
1 parent 0af7c48 commit fff5577
Show file tree
Hide file tree
Showing 7 changed files with 10 additions and 10 deletions.
2 changes: 1 addition & 1 deletion src/transformers/models/big_bird/modeling_big_bird.py
Original file line number Diff line number Diff line change
Expand Up @@ -2223,7 +2223,7 @@ def _pad_to_block_size(

padding_len = (block_size - seq_len % block_size) % block_size
if padding_len > 0:
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.block_size`: {block_size}"
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2014,7 +2014,7 @@ def _pad_to_block_size(self, hidden_states: torch.Tensor, attention_mask: torch.

padding_len = (block_size - seq_len % block_size) % block_size
if padding_len > 0:
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.block_size`: {block_size}"
)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/led/modeling_led.py
Original file line number Diff line number Diff line change
Expand Up @@ -1706,7 +1706,7 @@ def _pad_to_window_size(

padding_len = (attention_window - seq_len % attention_window) % attention_window
if padding_len > 0:
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.attention_window`: {attention_window}"
)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/led/modeling_tf_led.py
Original file line number Diff line number Diff line change
Expand Up @@ -1859,7 +1859,7 @@ def _pad_to_window_size(
padding_len = (attention_window - seq_len % attention_window) % attention_window

if padding_len > 0:
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.attention_window`: {attention_window}"
)
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/longformer/modeling_longformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1598,7 +1598,7 @@ def _pad_to_window_size(

# this path should be recorded in the ONNX export, it is fine with padding_len == 0 as well
if padding_len > 0:
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.attention_window`: {attention_window}"
)
Expand Down Expand Up @@ -1917,7 +1917,7 @@ def forward(
return_dict = return_dict if return_dict is not None else self.config.use_return_dict

if global_attention_mask is None:
logger.info("Initializing global attention on CLS token...")
logger.warning_once("Initializing global attention on CLS token...")
global_attention_mask = torch.zeros_like(input_ids)
# global attention on cls token
global_attention_mask[:, 0] = 1
Expand Down Expand Up @@ -2270,7 +2270,7 @@ def forward(

# set global attention on question tokens
if global_attention_mask is None and input_ids is not None:
logger.info("Initializing global attention on multiple choice...")
logger.warning_once("Initializing global attention on multiple choice...")
# put global attention on all tokens after `config.sep_token_id`
global_attention_mask = torch.stack(
[
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/longformer/modeling_tf_longformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2213,7 +2213,7 @@ def call(
)
global_attention_mask = tf.cast(tf.fill(shape_list(input_ids), value=0), tf.int64)
else:
logger.info("Initializing global attention on question tokens...")
logger.warning_once("Initializing global attention on question tokens...")
# put global attention on all tokens until `config.sep_token_id` is reached
sep_token_indices = tf.where(input_ids == self.config.sep_token_id)
sep_token_indices = tf.cast(sep_token_indices, dtype=tf.int64)
Expand Down Expand Up @@ -2341,7 +2341,7 @@ def call(
global_attention_mask = tf.cast(global_attention_mask, tf.int64)

if global_attention_mask is None and input_ids is not None:
logger.info("Initializing global attention on CLS token...")
logger.warning_once("Initializing global attention on CLS token...")
# global attention on cls token
global_attention_mask = tf.zeros_like(input_ids)
updates = tf.ones(shape_list(input_ids)[0], dtype=tf.int64)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/reformer/modeling_reformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2139,7 +2139,7 @@ def _pad_to_mult_of_chunk_length(
padded_seq_length=None,
device=None,
):
logger.info(
logger.warning_once(
f"Input ids are automatically padded from {input_shape[-1]} to {input_shape[-1] + padding_length} to be a "
f"multiple of `config.chunk_length`: {padded_seq_length}"
)
Expand Down

0 comments on commit fff5577

Please sign in to comment.