Skip to content

Commit

Permalink
Make properties settable for modeling classes
Browse files Browse the repository at this point in the history
This improves the readability of our subclasses, instead of writing
to a hidden property `self.backbone` and reading from `self._backbone`,
we can use a consistent style everywhere.
  • Loading branch information
mattdangerw committed Jan 28, 2023
1 parent 5737da9 commit eaed20e
Show file tree
Hide file tree
Showing 17 changed files with 34 additions and 22 deletions.
2 changes: 1 addition & 1 deletion keras_nlp/models/albert/albert_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def __init__(
**kwargs,
):
super().__init__(**kwargs)
self._tokenizer = tokenizer
self.tokenizer = tokenizer
self.packer = MultiSegmentPacker(
start_value=self.tokenizer.cls_token_id,
end_value=self.tokenizer.sep_token_id,
Expand Down
4 changes: 2 additions & 2 deletions keras_nlp/models/bert/bert_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,8 +185,8 @@ def __init__(
**kwargs,
)
# All references to `self` below this line
self._backbone = backbone
self._preprocessor = preprocessor
self.backbone = backbone
self.preprocessor = preprocessor
self.num_classes = num_classes
self.dropout = dropout

Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/bert/bert_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def __init__(
**kwargs,
):
super().__init__(**kwargs)
self._tokenizer = tokenizer
self.tokenizer = tokenizer
self.packer = MultiSegmentPacker(
start_value=self.tokenizer.cls_token_id,
end_value=self.tokenizer.sep_token_id,
Expand Down
4 changes: 2 additions & 2 deletions keras_nlp/models/deberta_v3/deberta_v3_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,8 @@ def __init__(
**kwargs,
)
# All references to `self` below this line
self._backbone = backbone
self._preprocessor = preprocessor
self.backbone = backbone
self.preprocessor = preprocessor
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.dropout = dropout
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/deberta_v3/deberta_v3_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def __init__(
**kwargs,
):
super().__init__(**kwargs)
self._tokenizer = tokenizer
self.tokenizer = tokenizer
self.packer = MultiSegmentPacker(
start_value=self.tokenizer.cls_token_id,
end_value=self.tokenizer.sep_token_id,
Expand Down
4 changes: 2 additions & 2 deletions keras_nlp/models/distil_bert/distil_bert_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,8 +196,8 @@ def __init__(
**kwargs,
)
# All references to `self` below this line
self._backbone = backbone
self._preprocessor = preprocessor
self.backbone = backbone
self.preprocessor = preprocessor
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.dropout = dropout
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/distil_bert/distil_bert_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def __init__(
**kwargs,
):
super().__init__(**kwargs)
self._tokenizer = tokenizer
self.tokenizer = tokenizer
self.packer = MultiSegmentPacker(
start_value=self.tokenizer.cls_token_id,
end_value=self.tokenizer.sep_token_id,
Expand Down
4 changes: 2 additions & 2 deletions keras_nlp/models/f_net/f_net_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,8 @@ def __init__(
**kwargs,
)
# All references to `self` below this line
self._backbone = backbone
self._preprocessor = preprocessor
self.backbone = backbone
self.preprocessor = preprocessor
self.num_classes = num_classes
self.dropout = dropout

Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/f_net/f_net_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def __init__(
**kwargs,
):
super().__init__(**kwargs)
self._tokenizer = tokenizer
self.tokenizer = tokenizer
self.packer = MultiSegmentPacker(
start_value=self.tokenizer.cls_token_id,
end_value=self.tokenizer.sep_token_id,
Expand Down
4 changes: 2 additions & 2 deletions keras_nlp/models/gpt2/gpt2_causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,8 +180,8 @@ def __init__(self, backbone, preprocessor=None, **kwargs):
**kwargs,
)

self._backbone = backbone
self._preprocessor = preprocessor
self.backbone = backbone
self.preprocessor = preprocessor

@classproperty
def presets(cls):
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/gpt2/gpt2_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def __init__(

super().__init__(**kwargs)

self._tokenizer = tokenizer
self.tokenizer = tokenizer
self.sequence_length = sequence_length

def get_config(self):
Expand Down
4 changes: 4 additions & 0 deletions keras_nlp/models/preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,10 @@ def tokenizer(self):
"""The tokenizer used to tokenize strings."""
return self._tokenizer

@tokenizer.setter
def tokenizer(self, value):
self._tokenizer = value

def get_config(self):
config = super().get_config()
config["tokenizer"] = keras.layers.serialize(self.tokenizer)
Expand Down
4 changes: 2 additions & 2 deletions keras_nlp/models/roberta/roberta_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,8 +188,8 @@ def __init__(
**kwargs,
)
# All references to `self` below this line
self._backbone = backbone
self._preprocessor = preprocessor
self.backbone = backbone
self.preprocessor = preprocessor
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.dropout = dropout
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/roberta/roberta_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def __init__(
):
super().__init__(**kwargs)

self._tokenizer = tokenizer
self.tokenizer = tokenizer
self.packer = RobertaMultiSegmentPacker(
start_value=self.tokenizer.start_token_id,
end_value=self.tokenizer.end_token_id,
Expand Down
8 changes: 8 additions & 0 deletions keras_nlp/models/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,19 @@ def backbone(self):
"""A `keras.Model` instance providing the backbone submodel."""
return self._backbone

@backbone.setter
def backbone(self, value):
self._backbone = value

@property
def preprocessor(self):
"""A `keras.layers.Layer` instance used to preprocess inputs."""
return self._preprocessor

@preprocessor.setter
def preprocessor(self, value):
self._preprocessor = value

def get_config(self):
# Don't chain to super here. The default `get_config()` for functional
# models is nested and cannot be passed to our Task constructors.
Expand Down
4 changes: 2 additions & 2 deletions keras_nlp/models/xlm_roberta/xlm_roberta_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,8 @@ def __init__(
**kwargs,
)
# All references to `self` below this line
self._backbone = backbone
self._preprocessor = preprocessor
self.backbone = backbone
self.preprocessor = preprocessor
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.dropout = dropout
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def __init__(
):
super().__init__(**kwargs)

self._tokenizer = tokenizer
self.tokenizer = tokenizer

self.packer = RobertaMultiSegmentPacker(
start_value=self.tokenizer.start_token_id,
Expand Down

0 comments on commit eaed20e

Please sign in to comment.