Skip to content

Commit

Permalink
Improved type hinting for all attention parameters (huggingface#28479)
Browse files Browse the repository at this point in the history
* Changed type hinting for all attention inputs to 'Optional[Tuple[torch.FloatTensor,...]] = None'

* Fixed the ruff formatting issue

* fixed type hinting for all hidden_states to 'Optional[Tuple[torch.FloatTensor, ...]] = None'

* Changed type hinting in these 12 scripts modeling_dpr.py,modeling_nat.py,idefics/vision.py,modeling_tf_dpr.py,modeling_luke.py,modeling_swin.py,modeling_tf_swin.py,modeling_blip.py,modeling_tf_blip.py,modeling_donut_swin.py,modeling_dinat.py,modeling_swinv2.py

* test fail update

* fixed type hinting for these 15 scripts modeling_xlnet.py,modeling_tf_xlnet.py,modeling_led.py,modeling_tf_led.py,modleing_rwkv.py,modeling_dpt.py,modeling_tf_cvt.py,modeling_clip.py,modeling_flax_clip.py,modeling_tf_clip.py,modeling_longformer.py,modeling_tf_longformer.py,modeling_siglip.py,modeling_clap.py,modeling_git.py

* Changed type hinting in these 12 scripts modeling_dpr.py,modeling_nat.py,idefics/vision.py,modeling_tf_dpr.py,modeling_luke.py,modeling_swin.py,modeling_tf_swin.py,modeling_blip.py,modeling_tf_blip.py,modeling_donut_swin.py,modeling_dinat.py,modeling_swinv2.py

* test fail update

* Removed the myvenv file

* Fixed type hinting for these 8 scripts modeling_tvlt.py,modeling_sam.py,modeling_tf_sam.py,modeling_tvp.py,modeling_rag.py,modeling_tf_rag.py,modeling_tf_xlm.py,modeling_xlm.py
  • Loading branch information
nakranivaibhav authored and DavidAfonsoValente committed Jan 25, 2024
1 parent a7ede1b commit e4e821c
Show file tree
Hide file tree
Showing 36 changed files with 408 additions and 408 deletions.
226 changes: 113 additions & 113 deletions src/transformers/modeling_outputs.py

Large diffs are not rendered by default.

12 changes: 6 additions & 6 deletions src/transformers/models/blip/modeling_blip.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ class BlipForConditionalGenerationModelOutput(ModelOutput):
logits: Optional[Tuple[torch.FloatTensor]] = None
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None

@property
def decoder_logits(self):
Expand Down Expand Up @@ -140,8 +140,8 @@ class BlipTextVisionModelOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -181,9 +181,9 @@ class BlipImageTextMatchingModelOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
vision_pooler_output: Optional[torch.FloatTensor] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
question_embeds: Optional[Tuple[torch.FloatTensor]] = None


Expand Down
12 changes: 6 additions & 6 deletions src/transformers/models/blip/modeling_tf_blip.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ class TFBlipForConditionalGenerationModelOutput(ModelOutput):
logits: Tuple[tf.Tensor] | None = None
image_embeds: tf.Tensor | None = None
last_hidden_state: tf.Tensor = None
hidden_states: Tuple[tf.Tensor] | None = None
attentions: Tuple[tf.Tensor] | None = None
hidden_states: Tuple[tf.Tensor, ...] | None = None
attentions: Tuple[tf.Tensor, ...] | None = None

@property
def decoder_logits(self):
Expand Down Expand Up @@ -150,8 +150,8 @@ class TFBlipTextVisionModelOutput(ModelOutput):
loss: tf.Tensor | None = None
image_embeds: tf.Tensor | None = None
last_hidden_state: tf.Tensor = None
hidden_states: Tuple[tf.Tensor] | None = None
attentions: Tuple[tf.Tensor] | None = None
hidden_states: Tuple[tf.Tensor, ...] | None = None
attentions: Tuple[tf.Tensor, ...] | None = None


@dataclass
Expand Down Expand Up @@ -191,9 +191,9 @@ class TFBlipImageTextMatchingModelOutput(ModelOutput):
loss: tf.Tensor | None = None
image_embeds: tf.Tensor | None = None
last_hidden_state: tf.Tensor = None
hidden_states: Tuple[tf.Tensor] | None = None
hidden_states: Tuple[tf.Tensor, ...] | None = None
vision_pooler_output: tf.Tensor | None = None
attentions: Tuple[tf.Tensor] | None = None
attentions: Tuple[tf.Tensor, ...] | None = None
question_embeds: Tuple[tf.Tensor] | None = None


Expand Down
8 changes: 4 additions & 4 deletions src/transformers/models/clap/modeling_clap.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,8 +159,8 @@ class ClapTextModelOutput(ModelOutput):

text_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -188,8 +188,8 @@ class ClapAudioModelOutput(ModelOutput):

audio_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down
8 changes: 4 additions & 4 deletions src/transformers/models/clip/modeling_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ class CLIPVisionModelOutput(ModelOutput):

image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -112,8 +112,8 @@ class CLIPTextModelOutput(ModelOutput):

text_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/clip/modeling_flax_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,8 @@ class FlaxCLIPTextModelOutput(ModelOutput):

text_embeds: jnp.ndarray = None
last_hidden_state: jnp.ndarray = None
hidden_states: Optional[Tuple[jnp.ndarray]] = None
attentions: Optional[Tuple[jnp.ndarray]] = None
hidden_states: Optional[Tuple[jnp.ndarray, ...]] = None
attentions: Optional[Tuple[jnp.ndarray, ...]] = None


@flax.struct.dataclass
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/cvt/modeling_cvt.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class BaseModelOutputWithCLSToken(ModelOutput):

last_hidden_state: torch.FloatTensor = None
cls_token_value: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None


# Copied from transformers.models.beit.modeling_beit.drop_path
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/cvt/modeling_tf_cvt.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class TFBaseModelOutputWithCLSToken(ModelOutput):

last_hidden_state: tf.Tensor = None
cls_token_value: tf.Tensor = None
hidden_states: Tuple[tf.Tensor] | None = None
hidden_states: Tuple[tf.Tensor, ...] | None = None


class TFCvtDropPath(tf.keras.layers.Layer):
Expand Down
18 changes: 9 additions & 9 deletions src/transformers/models/dinat/modeling_dinat.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,9 @@ class DinatEncoderOutput(ModelOutput):
"""

last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -142,9 +142,9 @@ class DinatModelOutput(ModelOutput):

last_hidden_state: torch.FloatTensor = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -179,9 +179,9 @@ class DinatImageClassifierOutput(ModelOutput):

loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None


# Copied from transformers.models.nat.modeling_nat.NatEmbeddings with Nat->Dinat
Expand Down
12 changes: 6 additions & 6 deletions src/transformers/models/donut/modeling_donut_swin.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,9 @@ class DonutSwinEncoderOutput(ModelOutput):
"""

last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -120,9 +120,9 @@ class DonutSwinModelOutput(ModelOutput):

last_hidden_state: torch.FloatTensor = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None


# Copied from transformers.models.swin.modeling_swin.window_partition
Expand Down
12 changes: 6 additions & 6 deletions src/transformers/models/dpr/modeling_dpr.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ class DPRContextEncoderOutput(ModelOutput):
"""

pooler_output: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand All @@ -110,8 +110,8 @@ class DPRQuestionEncoderOutput(ModelOutput):
"""

pooler_output: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -143,8 +143,8 @@ class DPRReaderOutput(ModelOutput):
start_logits: torch.FloatTensor
end_logits: torch.FloatTensor = None
relevance_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


class DPRPreTrainedModel(PreTrainedModel):
Expand Down
12 changes: 6 additions & 6 deletions src/transformers/models/dpr/modeling_tf_dpr.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ class TFDPRContextEncoderOutput(ModelOutput):
"""

pooler_output: tf.Tensor = None
hidden_states: Tuple[tf.Tensor] | None = None
attentions: Tuple[tf.Tensor] | None = None
hidden_states: Tuple[tf.Tensor, ...] | None = None
attentions: Tuple[tf.Tensor, ...] | None = None


@dataclass
Expand All @@ -110,8 +110,8 @@ class TFDPRQuestionEncoderOutput(ModelOutput):
"""

pooler_output: tf.Tensor = None
hidden_states: Tuple[tf.Tensor] | None = None
attentions: Tuple[tf.Tensor] | None = None
hidden_states: Tuple[tf.Tensor, ...] | None = None
attentions: Tuple[tf.Tensor, ...] | None = None


@dataclass
Expand Down Expand Up @@ -143,8 +143,8 @@ class TFDPRReaderOutput(ModelOutput):
start_logits: tf.Tensor = None
end_logits: tf.Tensor = None
relevance_logits: tf.Tensor = None
hidden_states: Tuple[tf.Tensor] | None = None
attentions: Tuple[tf.Tensor] | None = None
hidden_states: Tuple[tf.Tensor, ...] | None = None
attentions: Tuple[tf.Tensor, ...] | None = None


class TFDPREncoderLayer(tf.keras.layers.Layer):
Expand Down
8 changes: 4 additions & 4 deletions src/transformers/models/dpt/modeling_dpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class BaseModelOutputWithIntermediateActivations(ModelOutput):
"""

last_hidden_states: torch.FloatTensor = None
intermediate_activations: Optional[Tuple[torch.FloatTensor]] = None
intermediate_activations: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -110,9 +110,9 @@ class BaseModelOutputWithPoolingAndIntermediateActivations(ModelOutput):

last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
intermediate_activations: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
intermediate_activations: Optional[Tuple[torch.FloatTensor, ...]] = None


class DPTViTHybridEmbeddings(nn.Module):
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/git/modeling_git.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ class GitVisionModelOutput(ModelOutput):

image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


class GitEmbeddings(nn.Module):
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/idefics/vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ class IdeficsVisionModelOutput(ModelOutput):

image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


# Adapted from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings
Expand Down
Loading

0 comments on commit e4e821c

Please sign in to comment.