Skip to content

Commit

Permalink
Register -> Export. Export all in __all__. Sensible defaults accordin…
Browse files Browse the repository at this point in the history
…g to filename.
  • Loading branch information
LysandreJik committed Jul 25, 2024
1 parent 74f0f5a commit 908dceb
Show file tree
Hide file tree
Showing 17 changed files with 197 additions and 178 deletions.
3 changes: 0 additions & 3 deletions src/transformers/models/albert/configuration_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,8 @@

from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils.import_utils import register


@register()
class AlbertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AlbertModel`] or a [`TFAlbertModel`]. It is used
Expand Down Expand Up @@ -153,7 +151,6 @@ def __init__(


# Copied from transformers.models.bert.configuration_bert.BertOnnxConfig with Roberta->Albert
@register()
class AlbertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
Expand Down
10 changes: 0 additions & 10 deletions src/transformers/models/albert/modeling_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
logging,
replace_return_docstrings,
)
from ...utils.import_utils import register
from .configuration_albert import AlbertConfig


Expand All @@ -53,7 +52,6 @@
_CONFIG_FOR_DOC = "AlbertConfig"


@register()
def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
Expand Down Expand Up @@ -489,7 +487,6 @@ def forward(
)


@register(backends=("torch",))
class AlbertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
Expand Down Expand Up @@ -621,7 +618,6 @@ class AlbertForPreTrainingOutput(ModelOutput):
"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
@register(backends=("torch",))
class AlbertModel(AlbertPreTrainedModel):
config_class = AlbertConfig
base_model_prefix = "albert"
Expand Down Expand Up @@ -751,7 +747,6 @@ def forward(
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("torch",))
class AlbertForPreTraining(AlbertPreTrainedModel):
_tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]

Expand Down Expand Up @@ -904,7 +899,6 @@ def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
"Albert Model with a `language modeling` head on top.",
ALBERT_START_DOCSTRING,
)
@register(backends=("torch",))
class AlbertForMaskedLM(AlbertPreTrainedModel):
_tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]

Expand Down Expand Up @@ -1020,7 +1014,6 @@ def forward(
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("torch",))
class AlbertForSequenceClassification(AlbertPreTrainedModel):
def __init__(self, config: AlbertConfig):
super().__init__(config)
Expand Down Expand Up @@ -1122,7 +1115,6 @@ def forward(
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("torch",))
class AlbertForTokenClassification(AlbertPreTrainedModel):
def __init__(self, config: AlbertConfig):
super().__init__(config)
Expand Down Expand Up @@ -1206,7 +1198,6 @@ def forward(
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("torch",))
class AlbertForQuestionAnswering(AlbertPreTrainedModel):
def __init__(self, config: AlbertConfig):
super().__init__(config)
Expand Down Expand Up @@ -1310,7 +1301,6 @@ def forward(
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("torch",))
class AlbertForMultipleChoice(AlbertPreTrainedModel):
def __init__(self, config: AlbertConfig):
super().__init__(config)
Expand Down
9 changes: 0 additions & 9 deletions src/transformers/models/albert/modeling_flax_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
overwrite_call_docstring,
)
from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from ...utils.import_utils import register
from .configuration_albert import AlbertConfig


Expand Down Expand Up @@ -506,7 +505,6 @@ def __call__(self, pooled_output, deterministic=True):
return logits


@register(backends=("flax",))
class FlaxAlbertPreTrainedModel(FlaxPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
Expand Down Expand Up @@ -676,7 +674,6 @@ def __call__(
"The bare Albert Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
@register(backends=("flax",))
class FlaxAlbertModel(FlaxAlbertPreTrainedModel):
module_class = FlaxAlbertModule

Expand Down Expand Up @@ -745,7 +742,6 @@ def __call__(
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("flax",))
class FlaxAlbertForPreTraining(FlaxAlbertPreTrainedModel):
module_class = FlaxAlbertForPreTrainingModule

Expand Down Expand Up @@ -829,7 +825,6 @@ def __call__(


@add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING)
@register(backends=("flax",))
class FlaxAlbertForMaskedLM(FlaxAlbertPreTrainedModel):
module_class = FlaxAlbertForMaskedLMModule

Expand Down Expand Up @@ -900,7 +895,6 @@ def __call__(
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("flax",))
class FlaxAlbertForSequenceClassification(FlaxAlbertPreTrainedModel):
module_class = FlaxAlbertForSequenceClassificationModule

Expand Down Expand Up @@ -974,7 +968,6 @@ def __call__(
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("flax",))
class FlaxAlbertForMultipleChoice(FlaxAlbertPreTrainedModel):
module_class = FlaxAlbertForMultipleChoiceModule

Expand Down Expand Up @@ -1048,7 +1041,6 @@ def __call__(
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("flax",))
class FlaxAlbertForTokenClassification(FlaxAlbertPreTrainedModel):
module_class = FlaxAlbertForTokenClassificationModule

Expand Down Expand Up @@ -1117,7 +1109,6 @@ def __call__(
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("flax",))
class FlaxAlbertForQuestionAnswering(FlaxAlbertPreTrainedModel):
module_class = FlaxAlbertForQuestionAnsweringModule

Expand Down
10 changes: 0 additions & 10 deletions src/transformers/models/albert/modeling_tf_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@
logging,
replace_return_docstrings,
)
from ...utils.import_utils import register
from .configuration_albert import AlbertConfig


Expand Down Expand Up @@ -511,7 +510,6 @@ def build(self, input_shape=None):
layer.build(None)


@register(backends=("tf",))
class TFAlbertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
Expand Down Expand Up @@ -587,7 +585,6 @@ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:


@keras_serializable
@register(backends=("tf",))
class TFAlbertMainLayer(keras.layers.Layer):
config_class = AlbertConfig

Expand Down Expand Up @@ -861,7 +858,6 @@ class TFAlbertForPreTrainingOutput(ModelOutput):
"The bare Albert Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
@register(backends=("tf",))
class TFAlbertModel(TFAlbertPreTrainedModel):
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
Expand Down Expand Up @@ -919,7 +915,6 @@ def build(self, input_shape=None):
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("tf",))
class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"]
Expand Down Expand Up @@ -1051,7 +1046,6 @@ def build(self, input_shape=None):


@add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING)
@register(backends=("tf",))
class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"]
Expand Down Expand Up @@ -1165,7 +1159,6 @@ def build(self, input_shape=None):
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("tf",))
class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"predictions"]
Expand Down Expand Up @@ -1260,7 +1253,6 @@ def build(self, input_shape=None):
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("tf",))
class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
Expand Down Expand Up @@ -1356,7 +1348,6 @@ def build(self, input_shape=None):
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("tf",))
class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
Expand Down Expand Up @@ -1464,7 +1455,6 @@ def build(self, input_shape=None):
""",
ALBERT_START_DOCSTRING,
)
@register(backends=("tf",))
class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/albert/tokenization_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
from ...utils.import_utils import register
from ...utils.import_utils import export


logger = logging.get_logger(__name__)
Expand All @@ -33,7 +33,7 @@
SPIECE_UNDERLINE = "▁"


@register(backends=("sentencepiece",))
@export(backends=("sentencepiece",))
class AlbertTokenizer(PreTrainedTokenizer):
"""
Construct an ALBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
Expand Down
2 changes: 0 additions & 2 deletions src/transformers/models/albert/tokenization_albert_fast.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
from ...utils.import_utils import register


if is_sentencepiece_available():
Expand All @@ -36,7 +35,6 @@
SPIECE_UNDERLINE = "▁"


@register(backends=("tokenizers",))
class AlbertTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on
Expand Down
5 changes: 0 additions & 5 deletions src/transformers/models/align/configuration_align.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
import os
from typing import TYPE_CHECKING, List, Union

from ...utils.import_utils import register


if TYPE_CHECKING:
pass
Expand All @@ -30,7 +28,6 @@
logger = logging.get_logger(__name__)


@register()
class AlignTextConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AlignTextModel`]. It is used to instantiate a
Expand Down Expand Up @@ -155,7 +152,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike],
return cls.from_dict(config_dict, **kwargs)


@register()
class AlignVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AlignVisionModel`]. It is used to instantiate a
Expand Down Expand Up @@ -295,7 +291,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike],
return cls.from_dict(config_dict, **kwargs)


@register()
class AlignConfig(PretrainedConfig):
r"""
[`AlignConfig`] is the configuration class to store the configuration of a [`AlignModel`]. It is used to
Expand Down
6 changes: 1 addition & 5 deletions src/transformers/models/align/modeling_align.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import torch
import torch.utils.checkpoint
from IPython.terminal.pt_inputhooks import backends
from torch import nn

from ...activations import ACT2FN
Expand All @@ -38,7 +39,6 @@
logging,
replace_return_docstrings,
)
from ...utils.import_utils import register
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig


Expand Down Expand Up @@ -1166,7 +1166,6 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return pooled_output


@register(backends=("torch",))
class AlignPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
Expand Down Expand Up @@ -1200,7 +1199,6 @@ def _init_weights(self, module):
"""The text model from ALIGN without any head or projection on top.""",
ALIGN_START_DOCSTRING,
)
@register(backends=("torch",))
class AlignTextModel(AlignPreTrainedModel):
config_class = AlignTextConfig
_no_split_modules = ["AlignTextEmbeddings"]
Expand Down Expand Up @@ -1328,7 +1326,6 @@ def forward(
"""The vision model from ALIGN without any head or projection on top.""",
ALIGN_START_DOCSTRING,
)
@register(backends=("torch",))
class AlignVisionModel(AlignPreTrainedModel):
config_class = AlignVisionConfig
main_input_name = "pixel_values"
Expand Down Expand Up @@ -1415,7 +1412,6 @@ def forward(


@add_start_docstrings(ALIGN_START_DOCSTRING)
@register(backends=("torch",))
class AlignModel(AlignPreTrainedModel):
config_class = AlignConfig

Expand Down
3 changes: 0 additions & 3 deletions src/transformers/models/align/processing_align.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@
Image/Text processor class for ALIGN
"""

from ...utils.import_utils import register


try:
from typing import Unpack
Expand All @@ -42,7 +40,6 @@ class AlignProcessorKwargs(ProcessingKwargs, total=False):
}


@register()
class AlignProcessor(ProcessorMixin):
r"""
Constructs an ALIGN processor which wraps [`EfficientNetImageProcessor`] and
Expand Down
Loading

0 comments on commit 908dceb

Please sign in to comment.