Skip to content

Commit

Permalink
Fixed type hinting for these 8 scripts modeling_tvlt.py,modeling_sam.…
Browse files Browse the repository at this point in the history
…py,modeling_tf_sam.py,modeling_tvp.py,modeling_rag.py,modeling_tf_rag.py,modeling_tf_xlm.py,modeling_xlm.py
  • Loading branch information
nakranivaibhav committed Jan 23, 2024
1 parent da3fc78 commit 1348952
Show file tree
Hide file tree
Showing 9 changed files with 48 additions and 49 deletions.
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,6 @@ venv/
ENV/
env.bak/
venv.bak/
myvenv/

# Spyder project settings
.spyderproject
Expand Down
28 changes: 14 additions & 14 deletions src/transformers/models/rag/modeling_rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,14 +120,14 @@ class RetrievAugLMMarginOutput(ModelOutput):
context_input_ids: Optional[torch.LongTensor] = None
context_attention_mask: Optional[torch.LongTensor] = None
question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
question_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
question_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None
question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
generator_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
generator_dec_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -210,14 +210,14 @@ class RetrievAugLMOutput(ModelOutput):
context_input_ids: Optional[torch.LongTensor] = None
context_attention_mask: Optional[torch.LongTensor] = None
question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
question_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
question_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None
question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
generator_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
generator_dec_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


class RagPreTrainedModel(PreTrainedModel):
Expand Down
24 changes: 12 additions & 12 deletions src/transformers/models/rag/modeling_tf_rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,13 +123,13 @@ class TFRetrievAugLMMarginOutput(ModelOutput):
context_input_ids: tf.Tensor | None = None
context_attention_mask: tf.Tensor | None = None
question_encoder_last_hidden_state: tf.Tensor | None = None
question_enc_hidden_states: Tuple[tf.Tensor] | None = None
question_enc_attentions: Tuple[tf.Tensor] | None = None
question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
question_enc_attentions: Tuple[tf.Tensor, ...] | None = None
generator_enc_last_hidden_state: tf.Tensor | None = None
generator_enc_hidden_states: Tuple[tf.Tensor] | None = None
generator_enc_attentions: Tuple[tf.Tensor] | None = None
generator_dec_hidden_states: Tuple[tf.Tensor] | None = None
generator_dec_attentions: Tuple[tf.Tensor] | None = None
generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None
generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None
generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None


@dataclass
Expand Down Expand Up @@ -206,13 +206,13 @@ class TFRetrievAugLMOutput(ModelOutput):
context_input_ids: tf.Tensor | None = None
context_attention_mask: tf.Tensor | None = None
question_encoder_last_hidden_state: tf.Tensor | None = None
question_enc_hidden_states: Tuple[tf.Tensor] | None = None
question_enc_attentions: Tuple[tf.Tensor] | None = None
question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
question_enc_attentions: Tuple[tf.Tensor, ...] | None = None
generator_enc_last_hidden_state: tf.Tensor | None = None
generator_enc_hidden_states: Tuple[tf.Tensor] | None = None
generator_enc_attentions: Tuple[tf.Tensor] | None = None
generator_dec_hidden_states: Tuple[tf.Tensor] | None = None
generator_dec_attentions: Tuple[tf.Tensor] | None = None
generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None
generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None
generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None


class TFRagPreTrainedModel(TFPreTrainedModel):
Expand Down
10 changes: 5 additions & 5 deletions src/transformers/models/sam/modeling_sam.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ class SamVisionEncoderOutput(ModelOutput):

image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -106,9 +106,9 @@ class SamImageSegmentationOutput(ModelOutput):

iou_scores: torch.FloatTensor = None
pred_masks: torch.FloatTensor = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
mask_decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
mask_decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


class SamPatchEmbeddings(nn.Module):
Expand Down
10 changes: 5 additions & 5 deletions src/transformers/models/sam/modeling_tf_sam.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ class TFSamVisionEncoderOutput(ModelOutput):

image_embeds: tf.Tensor | None = None
last_hidden_state: tf.Tensor = None
hidden_states: Tuple[tf.Tensor] | None = None
attentions: Tuple[tf.Tensor] | None = None
hidden_states: Tuple[tf.Tensor, ...] | None = None
attentions: Tuple[tf.Tensor, ...] | None = None


@dataclass
Expand Down Expand Up @@ -109,9 +109,9 @@ class TFSamImageSegmentationOutput(ModelOutput):

iou_scores: tf.Tensor = None
pred_masks: tf.Tensor = None
vision_hidden_states: Tuple[tf.Tensor] | None = None
vision_attentions: Tuple[tf.Tensor] | None = None
mask_decoder_attentions: Tuple[tf.Tensor] | None = None
vision_hidden_states: Tuple[tf.Tensor, ...] | None = None
vision_attentions: Tuple[tf.Tensor, ...] | None = None
mask_decoder_attentions: Tuple[tf.Tensor, ...] | None = None


class TFSamPatchEmbeddings(tf.keras.layers.Layer):
Expand Down
12 changes: 6 additions & 6 deletions src/transformers/models/tvlt/modeling_tvlt.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ class TvltModelOutput(ModelOutput):
audio_label_masks: torch.LongTensor = None
pixel_ids_restore: torch.LongTensor = None
audio_ids_restore: torch.LongTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand All @@ -111,8 +111,8 @@ class TvltDecoderOutput(ModelOutput):
"""

logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


@dataclass
Expand Down Expand Up @@ -145,8 +145,8 @@ class TvltForPreTrainingOutput(ModelOutput):
matching_logits: torch.FloatTensor = None
pixel_logits: torch.FloatTensor = None
audio_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


def generate_pixel_mask_noise(pixel_values, pixel_mask=None, mask_ratio=0.75):
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/tvp/modeling_tvp.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ class TvpVideoGroundingOutput(ModelOutput):

loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


class TvpLoss(nn.Module):
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/xlm/modeling_tf_xlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -614,8 +614,8 @@ class TFXLMWithLMHeadModelOutput(ModelOutput):
"""

logits: tf.Tensor = None
hidden_states: Tuple[tf.Tensor] | None = None
attentions: Tuple[tf.Tensor] | None = None
hidden_states: Tuple[tf.Tensor, ...] | None = None
attentions: Tuple[tf.Tensor, ...] | None = None


XLM_START_DOCSTRING = r"""
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/xlm/modeling_xlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,8 +297,8 @@ class XLMForQuestionAnsweringOutput(ModelOutput):
end_top_log_probs: Optional[torch.FloatTensor] = None
end_top_index: Optional[torch.LongTensor] = None
cls_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None


XLM_START_DOCSTRING = r"""
Expand Down

0 comments on commit 1348952

Please sign in to comment.