Skip to content

Commit 1989e87

Browse files
authored
Merge branch 'main' into ruff_ci
2 parents 74d06e9 + 895b3eb commit 1989e87

28 files changed

+40
-40
lines changed

src/transformers/generation/flax_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,7 @@ def _expand_to_num_beams(tensor, num_beams):
221221
def _adapt_logits_for_beam_search(self, logits):
222222
"""
223223
This function can be overwritten in the specific modeling_flax_<model-name>.py classes to allow for custom beam
224-
search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`].
224+
search behavior. Note that the only model that overwrites this method is [`~transformers.FlaxMarianMTModel`].
225225
"""
226226
return logits
227227

src/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -779,12 +779,12 @@ __global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel(
779779

780780
__syncthreads();
781781

782-
int num_distint_query = query_counter[0];
782+
int num_distinct_query = query_counter[0];
783783

784-
if (num_distint_query > 0) {
785-
for (int idx_base = 0; idx_base < num_distint_query; idx_base = idx_base + num_warps) {
784+
if (num_distinct_query > 0) {
785+
for (int idx_base = 0; idx_base < num_distinct_query; idx_base = idx_base + num_warps) {
786786
int idx = idx_base + warp_idx;
787-
if (idx < num_distint_query) {
787+
if (idx < num_distinct_query) {
788788
int query_idx = inserted_query[idx];
789789
int batch_idx__query_idx = batch_idx * num_query + query_idx;
790790

@@ -813,7 +813,7 @@ __global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel(
813813
}
814814
} else {
815815

816-
// all computation is completed if num_distint_query == 0
816+
// all computation is completed if num_distinct_query == 0
817817
break;
818818

819819
}

src/transformers/models/clap/modeling_clap.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1717,7 +1717,7 @@ def forward(
17171717
>>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
17181718
>>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-unfused")
17191719
1720-
>>> input_text = ["Sound of a dog", "Sound of vaccum cleaner"]
1720+
>>> input_text = ["Sound of a dog", "Sound of vacuum cleaner"]
17211721
17221722
>>> inputs = processor(text=input_text, audios=audio_sample, return_tensors="pt", padding=True)
17231723

src/transformers/models/clipseg/modeling_clipseg.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -625,7 +625,7 @@ def forward(
625625
input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
626626
]
627627
else:
628-
# The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
628+
# The config gets updated `eos_token_id` from PR #24773 (so the use of extra new tokens is possible)
629629
pooled_output = last_hidden_state[
630630
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
631631
# We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)

src/transformers/models/deepseek_vl/image_processing_deepseek_vl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -355,7 +355,7 @@ def pad_to_square(
355355
background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
356356
The color to use for the padding. Can be an integer for single channel or a
357357
tuple of integers representing for multi-channel images. If passed as integer
358-
in mutli-channel mode, it will default to `0` in subsequent channels.
358+
in multi-channel mode, it will default to `0` in subsequent channels.
359359
data_format (`str` or `ChannelDimension`, *optional*):
360360
The channel dimension format for the output image. Can be one of:
361361
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.

src/transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def pad_to_square(
113113
background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
114114
The color to use for the padding. Can be an integer for single channel or a
115115
tuple of integers representing for multi-channel images. If passed as integer
116-
in mutli-channel mode, it will default to `0` in subsequent channels.
116+
in multi-channel mode, it will default to `0` in subsequent channels.
117117
118118
Returns:
119119
`torch.Tensor`: The padded images.

src/transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,7 @@ def pad_to_square(
428428
background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
429429
The color to use for the padding. Can be an integer for single channel or a
430430
tuple of integers representing for multi-channel images. If passed as integer
431-
in mutli-channel mode, it will default to `0` in subsequent channels.
431+
in multi-channel mode, it will default to `0` in subsequent channels.
432432
data_format (`str` or `ChannelDimension`, *optional*):
433433
The channel dimension format for the output image. Can be one of:
434434
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.

src/transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ def pad_to_square(
147147
background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
148148
The color to use for the padding. Can be an integer for single channel or a
149149
tuple of integers representing for multi-channel images. If passed as integer
150-
in mutli-channel mode, it will default to `0` in subsequent channels.
150+
in multi-channel mode, it will default to `0` in subsequent channels.
151151
152152
Returns:
153153
`torch.Tensor`: The padded images.

src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,9 @@ def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.T
7575
7676
Args:
7777
router_probs (`torch.Tensor`):
78-
Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts].
78+
Probability assigned to each expert per token. Shape: [batch_size, sequence_length, num_experts].
7979
expert_indices (`torch.Tensor`):
80-
Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token.
80+
Indices tensor of shape [batch_size, sequence_length] identifying the selected expert for a given token.
8181
8282
Returns:
8383
The auxiliary loss.

src/transformers/models/dpt/image_processing_dpt_fast.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@
6262
class DPTFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
6363
"""
6464
ensure_multiple_of (`int`, *optional*, defaults to 1):
65-
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overidden
65+
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overridden
6666
by `ensure_multiple_of` in `preprocess`.
6767
do_pad (`bool`, *optional*, defaults to `False`):
6868
Whether to apply center padding. This was introduced in the DINOv2 paper, which uses the model in
@@ -72,7 +72,7 @@ class DPTFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
7272
DINOv2 paper, which uses the model in combination with DPT.
7373
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
7474
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. Can
75-
be overidden by `keep_aspect_ratio` in `preprocess`.
75+
be overridden by `keep_aspect_ratio` in `preprocess`.
7676
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
7777
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
7878
is used for background, and background itself is not included in all classes of a dataset (e.g.

0 commit comments

Comments
 (0)