diff --git a/keras_nlp/layers/cached_multi_head_attention_test.py b/keras_nlp/layers/cached_multi_head_attention_test.py index 80672f4e94..fd146a24ae 100644 --- a/keras_nlp/layers/cached_multi_head_attention_test.py +++ b/keras_nlp/layers/cached_multi_head_attention_test.py @@ -20,9 +20,10 @@ from keras_nlp.layers.cached_multi_head_attention import ( CachedMultiHeadAttention, ) +from keras_nlp.tests.test_case import TestCase -class CachedMultiHeadAttentionTest(tf.test.TestCase, parameterized.TestCase): +class CachedMultiHeadAttentionTest(TestCase): def test_valid_call(self): layer = CachedMultiHeadAttention(num_heads=2, key_dim=4) x = tf.random.uniform(shape=[2, 2, 8]) diff --git a/keras_nlp/layers/f_net_encoder_test.py b/keras_nlp/layers/f_net_encoder_test.py index b53021f665..6e74454a8d 100644 --- a/keras_nlp/layers/f_net_encoder_test.py +++ b/keras_nlp/layers/f_net_encoder_test.py @@ -20,9 +20,10 @@ from tensorflow import keras from keras_nlp.layers import f_net_encoder +from keras_nlp.tests.test_case import TestCase -class FNetEncoderTest(tf.test.TestCase, parameterized.TestCase): +class FNetEncoderTest(TestCase): def test_valid_call(self): encoder = f_net_encoder.FNetEncoder(intermediate_dim=4) model = keras.Sequential( diff --git a/keras_nlp/layers/masked_lm_head_test.py b/keras_nlp/layers/masked_lm_head_test.py index ff266c75f5..6b21fbf9b8 100644 --- a/keras_nlp/layers/masked_lm_head_test.py +++ b/keras_nlp/layers/masked_lm_head_test.py @@ -20,9 +20,10 @@ from tensorflow import keras from keras_nlp.layers import masked_lm_head +from keras_nlp.tests.test_case import TestCase -class MaskedLMHeadTest(tf.test.TestCase, parameterized.TestCase): +class MaskedLMHeadTest(TestCase): def test_valid_call(self): head = masked_lm_head.MaskedLMHead( vocabulary_size=100, diff --git a/keras_nlp/layers/masked_lm_mask_generator_test.py b/keras_nlp/layers/masked_lm_mask_generator_test.py index a246c2ea6f..4056760859 100644 --- a/keras_nlp/layers/masked_lm_mask_generator_test.py +++ b/keras_nlp/layers/masked_lm_mask_generator_test.py @@ -15,9 +15,10 @@ import tensorflow as tf from keras_nlp.layers.masked_lm_mask_generator import MaskedLMMaskGenerator +from keras_nlp.tests.test_case import TestCase -class MaskedLMMaskGeneratorTest(tf.test.TestCase): +class MaskedLMMaskGeneratorTest(TestCase): def setUp(self): super().setUp() self.VOCAB = [ diff --git a/keras_nlp/layers/multi_segment_packer_test.py b/keras_nlp/layers/multi_segment_packer_test.py index 1984ae06c4..b18b88a9ff 100644 --- a/keras_nlp/layers/multi_segment_packer_test.py +++ b/keras_nlp/layers/multi_segment_packer_test.py @@ -20,9 +20,10 @@ from tensorflow import keras from keras_nlp.layers.multi_segment_packer import MultiSegmentPacker +from keras_nlp.tests.test_case import TestCase -class MultiSegmentPackerTest(tf.test.TestCase, parameterized.TestCase): +class MultiSegmentPackerTest(TestCase): def test_trim_single_input_ints(self): input_data = tf.range(3, 10) packer = MultiSegmentPacker(8, start_value=1, end_value=2) diff --git a/keras_nlp/layers/position_embedding_test.py b/keras_nlp/layers/position_embedding_test.py index 03d0776543..c05c0f4aa3 100644 --- a/keras_nlp/layers/position_embedding_test.py +++ b/keras_nlp/layers/position_embedding_test.py @@ -20,6 +20,7 @@ from tensorflow import keras from keras_nlp.layers import position_embedding +from keras_nlp.tests.test_case import TestCase def custom_init(shape, dtype=None): @@ -29,7 +30,7 @@ def custom_init(shape, dtype=None): return tf.reshape(tf.range(count, dtype=dtype), shape) -class PositionEmbeddingLayerTest(tf.test.TestCase, parameterized.TestCase): +class PositionEmbeddingLayerTest(TestCase): def test_static_layer_output_shape(self): # Create a 3-dimensional input (the first dimension is implicit). sequence_length = 21 diff --git a/keras_nlp/layers/random_deletion_test.py b/keras_nlp/layers/random_deletion_test.py index 9f028b5700..b980d90607 100644 --- a/keras_nlp/layers/random_deletion_test.py +++ b/keras_nlp/layers/random_deletion_test.py @@ -17,9 +17,10 @@ from tensorflow import keras from keras_nlp.layers.random_deletion import RandomDeletion +from keras_nlp.tests.test_case import TestCase -class RandomDeletionTest(tf.test.TestCase): +class RandomDeletionTest(TestCase): def test_shape_and_output_from_word_deletion(self): keras.utils.set_random_seed(1337) inputs = ["Hey I like", "Keras and Tensorflow"] diff --git a/keras_nlp/layers/random_swap_test.py b/keras_nlp/layers/random_swap_test.py index d03772e814..1e50e6f47d 100644 --- a/keras_nlp/layers/random_swap_test.py +++ b/keras_nlp/layers/random_swap_test.py @@ -17,9 +17,10 @@ from tensorflow import keras from keras_nlp.layers.random_swap import RandomSwap +from keras_nlp.tests.test_case import TestCase -class RandomSwapTest(tf.test.TestCase): +class RandomSwapTest(TestCase): def test_shape_and_output_from_word_swap(self): keras.utils.set_random_seed(1337) inputs = ["Hey I like", "Keras and Tensorflow"] diff --git a/keras_nlp/layers/sine_position_encoding_test.py b/keras_nlp/layers/sine_position_encoding_test.py index 60ca26b6f7..0538787ba1 100644 --- a/keras_nlp/layers/sine_position_encoding_test.py +++ b/keras_nlp/layers/sine_position_encoding_test.py @@ -17,9 +17,10 @@ from tensorflow import keras from keras_nlp.layers import sine_position_encoding +from keras_nlp.tests.test_case import TestCase -class SinePositionEncodingTest(tf.test.TestCase): +class SinePositionEncodingTest(TestCase): def test_valid_call(self): pos_encoding = sine_position_encoding.SinePositionEncoding() model = keras.Sequential( diff --git a/keras_nlp/layers/start_end_packer_test.py b/keras_nlp/layers/start_end_packer_test.py index c894fb767d..d971b06573 100644 --- a/keras_nlp/layers/start_end_packer_test.py +++ b/keras_nlp/layers/start_end_packer_test.py @@ -13,15 +13,14 @@ # limitations under the License. """Tests for Start End Packer layer.""" - - import tensorflow as tf from tensorflow import keras from keras_nlp.layers.start_end_packer import StartEndPacker +from keras_nlp.tests.test_case import TestCase -class StartEndPackerTest(tf.test.TestCase): +class StartEndPackerTest(TestCase): def test_dense_input(self): input_data = tf.constant([5, 6, 7]) start_end_packer = StartEndPacker(sequence_length=5) diff --git a/keras_nlp/layers/token_and_position_embedding_test.py b/keras_nlp/layers/token_and_position_embedding_test.py index 4b6272a015..1446b9e259 100644 --- a/keras_nlp/layers/token_and_position_embedding_test.py +++ b/keras_nlp/layers/token_and_position_embedding_test.py @@ -22,9 +22,10 @@ from keras_nlp.layers.token_and_position_embedding import ( TokenAndPositionEmbedding, ) +from keras_nlp.tests.test_case import TestCase -class TokenAndPositionEmbeddingTest(tf.test.TestCase, parameterized.TestCase): +class TokenAndPositionEmbeddingTest(TestCase): def test_get_config_and_from_config(self): token_and_position_embed = TokenAndPositionEmbedding( vocabulary_size=5, diff --git a/keras_nlp/layers/transformer_decoder_test.py b/keras_nlp/layers/transformer_decoder_test.py index 3e2f688c9f..a3bd714c38 100644 --- a/keras_nlp/layers/transformer_decoder_test.py +++ b/keras_nlp/layers/transformer_decoder_test.py @@ -21,9 +21,10 @@ from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice from keras_nlp.layers import transformer_decoder +from keras_nlp.tests.test_case import TestCase -class TransformerDecoderTest(tf.test.TestCase, parameterized.TestCase): +class TransformerDecoderTest(TestCase): @parameterized.named_parameters( ("without_norm_first", False), ("with_norm_first", True), diff --git a/keras_nlp/layers/transformer_encoder_test.py b/keras_nlp/layers/transformer_encoder_test.py index e7e40b854e..70f0008796 100644 --- a/keras_nlp/layers/transformer_encoder_test.py +++ b/keras_nlp/layers/transformer_encoder_test.py @@ -20,9 +20,10 @@ from tensorflow import keras from keras_nlp.layers import transformer_encoder +from keras_nlp.tests.test_case import TestCase -class TransformerEncoderTest(tf.test.TestCase, parameterized.TestCase): +class TransformerEncoderTest(TestCase): @parameterized.named_parameters( ("without_norm_first", False), ("with_norm_first", True), diff --git a/keras_nlp/layers/transformer_layer_utils_test.py b/keras_nlp/layers/transformer_layer_utils_test.py index 462aaab176..45e6d0c2c8 100644 --- a/keras_nlp/layers/transformer_layer_utils_test.py +++ b/keras_nlp/layers/transformer_layer_utils_test.py @@ -15,9 +15,10 @@ import tensorflow as tf import keras_nlp.layers.transformer_layer_utils as utils +from keras_nlp.tests.test_case import TestCase -class TransformerEncoderTest(tf.test.TestCase): +class TransformerEncoderTest(TestCase): def test_compute_causal_mask(self): mask = utils.compute_causal_mask(1, 2, 2) self.assertTrue((mask.numpy() == [[1, 0], [1, 1]]).all()) diff --git a/keras_nlp/metrics/bleu_test.py b/keras_nlp/metrics/bleu_test.py index e5f21adc51..84d3caae5f 100644 --- a/keras_nlp/metrics/bleu_test.py +++ b/keras_nlp/metrics/bleu_test.py @@ -13,15 +13,15 @@ # limitations under the License. """Tests for Bleu.""" - import tensorflow as tf from tensorflow import keras from keras_nlp.metrics.bleu import Bleu +from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.byte_tokenizer import ByteTokenizer -class BleuTest(tf.test.TestCase): +class BleuTest(TestCase): def test_initialization(self): bleu = Bleu() result = bleu.result() diff --git a/keras_nlp/metrics/edit_distance_test.py b/keras_nlp/metrics/edit_distance_test.py index f9e6d66245..ace28245df 100644 --- a/keras_nlp/metrics/edit_distance_test.py +++ b/keras_nlp/metrics/edit_distance_test.py @@ -13,14 +13,14 @@ # limitations under the License. """Tests for EditDistance.""" - import tensorflow as tf from tensorflow import keras from keras_nlp.metrics.edit_distance import EditDistance +from keras_nlp.tests.test_case import TestCase -class EditDistanceTest(tf.test.TestCase): +class EditDistanceTest(TestCase): def test_initialization(self): edit_distance = EditDistance() result = edit_distance.result() diff --git a/keras_nlp/metrics/perplexity_test.py b/keras_nlp/metrics/perplexity_test.py index 8ef61cbc14..2f5bdf12ab 100644 --- a/keras_nlp/metrics/perplexity_test.py +++ b/keras_nlp/metrics/perplexity_test.py @@ -13,13 +13,13 @@ # limitations under the License. """Tests for Perplexity.""" - import tensorflow as tf from keras_nlp.metrics.perplexity import Perplexity +from keras_nlp.tests.test_case import TestCase -class PerplexityTest(tf.test.TestCase): +class PerplexityTest(TestCase): def test_vars_after_initializing_class(self): perplexity = Perplexity() self.assertEqual(perplexity.result(), 0.0) diff --git a/keras_nlp/metrics/rouge_l_test.py b/keras_nlp/metrics/rouge_l_test.py index cc8ebfcb2a..725aff6f3c 100644 --- a/keras_nlp/metrics/rouge_l_test.py +++ b/keras_nlp/metrics/rouge_l_test.py @@ -13,13 +13,13 @@ # limitations under the License. """Tests for RougeL.""" - import tensorflow as tf from keras_nlp.metrics.rouge_l import RougeL +from keras_nlp.tests.test_case import TestCase -class RougeLTest(tf.test.TestCase): +class RougeLTest(TestCase): def test_initialization(self): rouge = RougeL() result = rouge.result() diff --git a/keras_nlp/metrics/rouge_n_test.py b/keras_nlp/metrics/rouge_n_test.py index ce9935b2ea..203c1bb8a1 100644 --- a/keras_nlp/metrics/rouge_n_test.py +++ b/keras_nlp/metrics/rouge_n_test.py @@ -13,14 +13,14 @@ # limitations under the License. """Tests for RougeN.""" - import tensorflow as tf from tensorflow import keras from keras_nlp.metrics.rouge_n import RougeN +from keras_nlp.tests.test_case import TestCase -class RougeNTest(tf.test.TestCase): +class RougeNTest(TestCase): def test_initialization(self): rouge = RougeN() result = rouge.result() diff --git a/keras_nlp/models/albert/albert_backbone_test.py b/keras_nlp/models/albert/albert_backbone_test.py index da29944f51..f1b301d49a 100644 --- a/keras_nlp/models/albert/albert_backbone_test.py +++ b/keras_nlp/models/albert/albert_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.albert.albert_backbone import AlbertBackbone +from keras_nlp.tests.test_case import TestCase -class AlbertBackboneTest(tf.test.TestCase, parameterized.TestCase): +class AlbertBackboneTest(TestCase): def setUp(self): self.backbone = AlbertBackbone( vocabulary_size=10, @@ -111,7 +112,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class AlbertBackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class AlbertBackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.backbone = AlbertBackbone( diff --git a/keras_nlp/models/albert/albert_classifier_test.py b/keras_nlp/models/albert/albert_classifier_test.py index 8ede5d8dd1..769e3293b6 100644 --- a/keras_nlp/models/albert/albert_classifier_test.py +++ b/keras_nlp/models/albert/albert_classifier_test.py @@ -26,9 +26,10 @@ from keras_nlp.models.albert.albert_classifier import AlbertClassifier from keras_nlp.models.albert.albert_preprocessor import AlbertPreprocessor from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer +from keras_nlp.tests.test_case import TestCase -class AlbertClassifierTest(tf.test.TestCase, parameterized.TestCase): +class AlbertClassifierTest(TestCase): def setUp(self): # Setup model diff --git a/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py b/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py index 01c37843b0..fb9773f275 100644 --- a/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py @@ -26,9 +26,10 @@ AlbertMaskedLMPreprocessor, ) from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer +from keras_nlp.tests.test_case import TestCase -class AlbertMaskedLMPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class AlbertMaskedLMPreprocessorTest(TestCase): def setUp(self): vocab_data = tf.data.Dataset.from_tensor_slices( ["the quick brown fox", "the earth is round"] diff --git a/keras_nlp/models/albert/albert_masked_lm_test.py b/keras_nlp/models/albert/albert_masked_lm_test.py index 33236d73c3..9ea1fb5d4f 100644 --- a/keras_nlp/models/albert/albert_masked_lm_test.py +++ b/keras_nlp/models/albert/albert_masked_lm_test.py @@ -28,9 +28,10 @@ AlbertMaskedLMPreprocessor, ) from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer +from keras_nlp.tests.test_case import TestCase -class AlbertMaskedLMTest(tf.test.TestCase, parameterized.TestCase): +class AlbertMaskedLMTest(TestCase): def setUp(self): # Setup model. vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/albert/albert_preprocessor_test.py b/keras_nlp/models/albert/albert_preprocessor_test.py index 4367db794e..4ea5888da0 100644 --- a/keras_nlp/models/albert/albert_preprocessor_test.py +++ b/keras_nlp/models/albert/albert_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for ALBERT preprocessor layer.""" - import io import os @@ -25,9 +24,10 @@ from keras_nlp.models.albert.albert_preprocessor import AlbertPreprocessor from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer +from keras_nlp.tests.test_case import TestCase -class AlbertPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class AlbertPreprocessorTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/albert/albert_presets_test.py b/keras_nlp/models/albert/albert_presets_test.py index bf75505a1d..535091f0c6 100644 --- a/keras_nlp/models/albert/albert_presets_test.py +++ b/keras_nlp/models/albert/albert_presets_test.py @@ -21,10 +21,11 @@ from keras_nlp.models.albert.albert_classifier import AlbertClassifier from keras_nlp.models.albert.albert_preprocessor import AlbertPreprocessor from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer +from keras_nlp.tests.test_case import TestCase @pytest.mark.large -class AlbertPresetSmokeTest(tf.test.TestCase, parameterized.TestCase): +class AlbertPresetSmokeTest(TestCase): """ A smoke test for ALBERT presets we run continuously. This only tests the smallest weights we have available. Run with: @@ -121,7 +122,7 @@ def test_unknown_preset_error(self, cls, kwargs): @pytest.mark.extra_large -class AlbertPresetFullTest(tf.test.TestCase, parameterized.TestCase): +class AlbertPresetFullTest(TestCase): """ Test the full enumeration of our preset. This tests every ALBERT preset and is only run manually. diff --git a/keras_nlp/models/albert/albert_tokenizer_test.py b/keras_nlp/models/albert/albert_tokenizer_test.py index a8965198de..efa83a96ef 100644 --- a/keras_nlp/models/albert/albert_tokenizer_test.py +++ b/keras_nlp/models/albert/albert_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for ALBERT tokenizer.""" - import io import os @@ -24,9 +23,10 @@ from tensorflow import keras from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer +from keras_nlp.tests.test_case import TestCase -class AlbertTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class AlbertTokenizerTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/bart/bart_backbone_test.py b/keras_nlp/models/bart/bart_backbone_test.py index 1d56a10565..3681f8c2f2 100644 --- a/keras_nlp/models/bart/bart_backbone_test.py +++ b/keras_nlp/models/bart/bart_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.bart.bart_backbone import BartBackbone +from keras_nlp.tests.test_case import TestCase -class BartBackboneTest(tf.test.TestCase, parameterized.TestCase): +class BartBackboneTest(TestCase): def setUp(self): self.model = BartBackbone( vocabulary_size=1000, @@ -120,7 +121,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class BartBackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class BartBackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.model = BartBackbone( diff --git a/keras_nlp/models/bart/bart_preprocessor_test.py b/keras_nlp/models/bart/bart_preprocessor_test.py index 41981b66f0..abaf3e9681 100644 --- a/keras_nlp/models/bart/bart_preprocessor_test.py +++ b/keras_nlp/models/bart/bart_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for BART preprocessor layer.""" - import os import pytest @@ -23,9 +22,10 @@ from keras_nlp.models.bart.bart_preprocessor import BartPreprocessor from keras_nlp.models.bart.bart_tokenizer import BartTokenizer +from keras_nlp.tests.test_case import TestCase -class BartPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class BartPreprocessorTest(TestCase): def setUp(self): vocab = { "": 0, diff --git a/keras_nlp/models/bart/bart_presets_test.py b/keras_nlp/models/bart/bart_presets_test.py index a20241d485..8c8b837289 100644 --- a/keras_nlp/models/bart/bart_presets_test.py +++ b/keras_nlp/models/bart/bart_presets_test.py @@ -13,6 +13,8 @@ # limitations under the License. # Copyright 2023 The KerasNLP Authors # +from keras_nlp.tests.test_case import TestCase + # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -35,7 +37,7 @@ @pytest.mark.large -class BartPresetSmokeTest(tf.test.TestCase, parameterized.TestCase): +class BartPresetSmokeTest(TestCase): """ A smoke test for BART presets we run continuously. @@ -98,7 +100,7 @@ def test_unknown_preset_error(self, cls): @pytest.mark.extra_large -class BartPresetFullTest(tf.test.TestCase, parameterized.TestCase): +class BartPresetFullTest(TestCase): """ Test the full enumeration of our preset. diff --git a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py index 6a7fbc287c..3e9bca7d47 100644 --- a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py +++ b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for BART preprocessor layer.""" - import os import pytest @@ -25,9 +24,10 @@ BartSeq2SeqLMPreprocessor, ) from keras_nlp.models.bart.bart_tokenizer import BartTokenizer +from keras_nlp.tests.test_case import TestCase -class BartSeq2SeqLMPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class BartSeq2SeqLMPreprocessorTest(TestCase): def setUp(self): vocab = { "": 0, diff --git a/keras_nlp/models/bart/bart_seq_2_seq_lm_test.py b/keras_nlp/models/bart/bart_seq_2_seq_lm_test.py index 6838a932d0..daf8f320c6 100644 --- a/keras_nlp/models/bart/bart_seq_2_seq_lm_test.py +++ b/keras_nlp/models/bart/bart_seq_2_seq_lm_test.py @@ -28,9 +28,10 @@ BartSeq2SeqLMPreprocessor, ) from keras_nlp.models.bart.bart_tokenizer import BartTokenizer +from keras_nlp.tests.test_case import TestCase -class BartSeq2SeqLMTest(tf.test.TestCase, parameterized.TestCase): +class BartSeq2SeqLMTest(TestCase): def setUp(self): # For DTensor. keras.backend.experimental.enable_tf_random_generator() diff --git a/keras_nlp/models/bart/bart_tokenizer_test.py b/keras_nlp/models/bart/bart_tokenizer_test.py index 9be5688899..154917b979 100644 --- a/keras_nlp/models/bart/bart_tokenizer_test.py +++ b/keras_nlp/models/bart/bart_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for BART tokenizer.""" - import os import tensorflow as tf @@ -21,9 +20,10 @@ from tensorflow import keras from keras_nlp.models.bart.bart_tokenizer import BartTokenizer +from keras_nlp.tests.test_case import TestCase -class BartTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class BartTokenizerTest(TestCase): def setUp(self): vocab = { "": 0, diff --git a/keras_nlp/models/bert/bert_backbone_test.py b/keras_nlp/models/bert/bert_backbone_test.py index c0cae4645f..b264400338 100644 --- a/keras_nlp/models/bert/bert_backbone_test.py +++ b/keras_nlp/models/bert/bert_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.bert.bert_backbone import BertBackbone +from keras_nlp.tests.test_case import TestCase -class BertBackboneTest(tf.test.TestCase, parameterized.TestCase): +class BertBackboneTest(TestCase): def setUp(self): self.backbone = BertBackbone( vocabulary_size=10, @@ -97,7 +98,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class BertBackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class BertBackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.backbone = BertBackbone( diff --git a/keras_nlp/models/bert/bert_classifier_test.py b/keras_nlp/models/bert/bert_classifier_test.py index df6d6093d0..0315a27b46 100644 --- a/keras_nlp/models/bert/bert_classifier_test.py +++ b/keras_nlp/models/bert/bert_classifier_test.py @@ -24,9 +24,10 @@ from keras_nlp.models.bert.bert_classifier import BertClassifier from keras_nlp.models.bert.bert_preprocessor import BertPreprocessor from keras_nlp.models.bert.bert_tokenizer import BertTokenizer +from keras_nlp.tests.test_case import TestCase -class BertClassifierTest(tf.test.TestCase, parameterized.TestCase): +class BertClassifierTest(TestCase): def setUp(self): # Setup model. self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] diff --git a/keras_nlp/models/bert/bert_masked_lm_preprocessor_test.py b/keras_nlp/models/bert/bert_masked_lm_preprocessor_test.py index 29938d3b34..55860a3215 100644 --- a/keras_nlp/models/bert/bert_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/bert/bert_masked_lm_preprocessor_test.py @@ -24,9 +24,10 @@ BertMaskedLMPreprocessor, ) from keras_nlp.models.bert.bert_tokenizer import BertTokenizer +from keras_nlp.tests.test_case import TestCase -class BertMaskedLMPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class BertMaskedLMPreprocessorTest(TestCase): def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] diff --git a/keras_nlp/models/bert/bert_masked_lm_test.py b/keras_nlp/models/bert/bert_masked_lm_test.py index 2efa268e0d..62376b3622 100644 --- a/keras_nlp/models/bert/bert_masked_lm_test.py +++ b/keras_nlp/models/bert/bert_masked_lm_test.py @@ -26,9 +26,10 @@ BertMaskedLMPreprocessor, ) from keras_nlp.models.bert.bert_tokenizer import BertTokenizer +from keras_nlp.tests.test_case import TestCase -class BertMaskedLMTest(tf.test.TestCase, parameterized.TestCase): +class BertMaskedLMTest(TestCase): def setUp(self): # Setup model. self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] diff --git a/keras_nlp/models/bert/bert_preprocessor_test.py b/keras_nlp/models/bert/bert_preprocessor_test.py index ebd35b937c..b245287255 100644 --- a/keras_nlp/models/bert/bert_preprocessor_test.py +++ b/keras_nlp/models/bert/bert_preprocessor_test.py @@ -22,9 +22,10 @@ from keras_nlp.models.bert.bert_preprocessor import BertPreprocessor from keras_nlp.models.bert.bert_tokenizer import BertTokenizer +from keras_nlp.tests.test_case import TestCase -class BertPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class BertPreprocessorTest(TestCase): def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] diff --git a/keras_nlp/models/bert/bert_presets_test.py b/keras_nlp/models/bert/bert_presets_test.py index 57e1812c01..e9e7f36cac 100644 --- a/keras_nlp/models/bert/bert_presets_test.py +++ b/keras_nlp/models/bert/bert_presets_test.py @@ -21,10 +21,11 @@ from keras_nlp.models.bert.bert_classifier import BertClassifier from keras_nlp.models.bert.bert_preprocessor import BertPreprocessor from keras_nlp.models.bert.bert_tokenizer import BertTokenizer +from keras_nlp.tests.test_case import TestCase @pytest.mark.large -class BertPresetSmokeTest(tf.test.TestCase, parameterized.TestCase): +class BertPresetSmokeTest(TestCase): """ A smoke test for BERT presets we run continuously. @@ -170,7 +171,7 @@ def test_override_preprocessor_sequence_length_gt_max(self): @pytest.mark.extra_large -class BertPresetFullTest(tf.test.TestCase, parameterized.TestCase): +class BertPresetFullTest(TestCase): """ Test the full enumeration of our preset. diff --git a/keras_nlp/models/bert/bert_tokenizer_test.py b/keras_nlp/models/bert/bert_tokenizer_test.py index c36646909c..394e96c883 100644 --- a/keras_nlp/models/bert/bert_tokenizer_test.py +++ b/keras_nlp/models/bert/bert_tokenizer_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.bert.bert_tokenizer import BertTokenizer +from keras_nlp.tests.test_case import TestCase -class BertTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class BertTokenizerTest(TestCase): def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] diff --git a/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py b/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py index cfa89fbe22..0e964f5435 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone +from keras_nlp.tests.test_case import TestCase -class DebertaV3BackboneTest(tf.test.TestCase, parameterized.TestCase): +class DebertaV3BackboneTest(TestCase): def setUp(self): self.backbone = DebertaV3Backbone( vocabulary_size=10, @@ -99,7 +100,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class DebertaV3BackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class DebertaV3BackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.backbone = DebertaV3Backbone( diff --git a/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py b/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py index a84e3074ad..594a74b36d 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py @@ -30,9 +30,10 @@ DebertaV3Preprocessor, ) from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer +from keras_nlp.tests.test_case import TestCase -class DebertaV3ClassifierTest(tf.test.TestCase, parameterized.TestCase): +class DebertaV3ClassifierTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py index 38f4847e3f..3f1a93fa79 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for DeBERTa preprocessor layer.""" - import io import os @@ -27,9 +26,10 @@ DebertaV3MaskedLMPreprocessor, ) from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer +from keras_nlp.tests.test_case import TestCase -class DebertaV3PreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class DebertaV3PreprocessorTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py index 37e54e7209..e4a17de0b3 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py @@ -28,9 +28,10 @@ DebertaV3MaskedLMPreprocessor, ) from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer +from keras_nlp.tests.test_case import TestCase -class DebertaV3MaskedLMTest(tf.test.TestCase, parameterized.TestCase): +class DebertaV3MaskedLMTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py b/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py index 1bec64da06..f3aa332a19 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for DeBERTa preprocessor layer.""" - import io import os @@ -27,9 +26,10 @@ DebertaV3Preprocessor, ) from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer +from keras_nlp.tests.test_case import TestCase -class DebertaV3PreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class DebertaV3PreprocessorTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py b/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py index b2db760d7f..0df222fd0b 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py @@ -25,10 +25,11 @@ DebertaV3Preprocessor, ) from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer +from keras_nlp.tests.test_case import TestCase @pytest.mark.large -class DebertaV3PresetSmokeTest(tf.test.TestCase, parameterized.TestCase): +class DebertaV3PresetSmokeTest(TestCase): """ A smoke test for DeBERTa presets we run continuously. @@ -132,7 +133,7 @@ def test_unknown_preset_error(self, cls, kwargs): @pytest.mark.extra_large -class DebertaV3PresetFullTest(tf.test.TestCase, parameterized.TestCase): +class DebertaV3PresetFullTest(TestCase): """ Test the full enumeration of our preset. diff --git a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py index 3f0884c7e2..21d6350ba5 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for DeBERTa tokenizer.""" - import io import os @@ -24,9 +23,10 @@ from tensorflow import keras from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer +from keras_nlp.tests.test_case import TestCase -class DebertaV3TokenizerTest(tf.test.TestCase, parameterized.TestCase): +class DebertaV3TokenizerTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/distil_bert/distil_bert_backbone_test.py b/keras_nlp/models/distil_bert/distil_bert_backbone_test.py index a186978465..a0c42f3948 100644 --- a/keras_nlp/models/distil_bert/distil_bert_backbone_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.distil_bert.distil_bert_backbone import DistilBertBackbone +from keras_nlp.tests.test_case import TestCase -class DistilBertTest(tf.test.TestCase, parameterized.TestCase): +class DistilBertTest(TestCase): def setUp(self): self.backbone = DistilBertBackbone( vocabulary_size=10, @@ -93,7 +94,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class DistilBertTPUTest(tf.test.TestCase, parameterized.TestCase): +class DistilBertTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.backbone = DistilBertBackbone( diff --git a/keras_nlp/models/distil_bert/distil_bert_classifier_test.py b/keras_nlp/models/distil_bert/distil_bert_classifier_test.py index d8c09cce8a..41d176bd8a 100644 --- a/keras_nlp/models/distil_bert/distil_bert_classifier_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_classifier_test.py @@ -30,9 +30,10 @@ from keras_nlp.models.distil_bert.distil_bert_tokenizer import ( DistilBertTokenizer, ) +from keras_nlp.tests.test_case import TestCase -class DistilBertClassifierTest(tf.test.TestCase, parameterized.TestCase): +class DistilBertClassifierTest(TestCase): def setUp(self): # Setup model diff --git a/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor_test.py b/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor_test.py index 7ed23c0b49..0cff4899f5 100644 --- a/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for DistilBERT masked language model preprocessor layer.""" - import os import pytest @@ -27,11 +26,10 @@ from keras_nlp.models.distil_bert.distil_bert_tokenizer import ( DistilBertTokenizer, ) +from keras_nlp.tests.test_case import TestCase -class DistilBertMaskedLMPreprocessorTest( - tf.test.TestCase, parameterized.TestCase -): +class DistilBertMaskedLMPreprocessorTest(TestCase): def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] @@ -127,8 +125,8 @@ def test_saved_model(self, save_format, filename): outputs = self.preprocessor(inputs) model = keras.Model(inputs, outputs) - path = os.path.join(self.get_temp_dir(), filename) - # Don't save traces in the tf format, we check compilation elsewhere. + path = os.path.join(self.get_temp_dir(), filename) + # Don't save traces in the tf format, we check compilation elsewhere. kwargs = {"save_traces": False} if save_format == "tf" else {} model.save(path, save_format=save_format, **kwargs) diff --git a/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py b/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py index 714e5442a2..ce4b7cf8d2 100644 --- a/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py @@ -30,9 +30,10 @@ from keras_nlp.models.distil_bert.distil_bert_tokenizer import ( DistilBertTokenizer, ) +from keras_nlp.tests.test_case import TestCase -class DistilBertMaskedLMTest(tf.test.TestCase, parameterized.TestCase): +class DistilBertMaskedLMTest(TestCase): def setUp(self): # Setup model. self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] diff --git a/keras_nlp/models/distil_bert/distil_bert_preprocessor_test.py b/keras_nlp/models/distil_bert/distil_bert_preprocessor_test.py index 1cede3a850..ed10622c75 100644 --- a/keras_nlp/models/distil_bert/distil_bert_preprocessor_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_preprocessor_test.py @@ -26,9 +26,10 @@ from keras_nlp.models.distil_bert.distil_bert_tokenizer import ( DistilBertTokenizer, ) +from keras_nlp.tests.test_case import TestCase -class DistilBertPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class DistilBertPreprocessorTest(TestCase): def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] diff --git a/keras_nlp/models/distil_bert/distil_bert_presets_test.py b/keras_nlp/models/distil_bert/distil_bert_presets_test.py index 6e27cf7d76..71e56911c0 100644 --- a/keras_nlp/models/distil_bert/distil_bert_presets_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_presets_test.py @@ -27,10 +27,11 @@ from keras_nlp.models.distil_bert.distil_bert_tokenizer import ( DistilBertTokenizer, ) +from keras_nlp.tests.test_case import TestCase @pytest.mark.large -class DistilBertPresetSmokeTest(tf.test.TestCase, parameterized.TestCase): +class DistilBertPresetSmokeTest(TestCase): """ A smoke test for DistilBERT presets we run continuously. @@ -123,7 +124,7 @@ def test_unknown_preset_error(self, cls, kwargs): @pytest.mark.extra_large -class DistilBertPresetFullTest(tf.test.TestCase, parameterized.TestCase): +class DistilBertPresetFullTest(TestCase): """ Tests the full enumeration of our preset. diff --git a/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py b/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py index 293baaa325..5712df3c24 100644 --- a/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py @@ -23,9 +23,10 @@ from keras_nlp.models.distil_bert.distil_bert_tokenizer import ( DistilBertTokenizer, ) +from keras_nlp.tests.test_case import TestCase -class DistilBertTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class DistilBertTokenizerTest(TestCase): def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] diff --git a/keras_nlp/models/f_net/f_net_backbone_test.py b/keras_nlp/models/f_net/f_net_backbone_test.py index 9e33d56377..b87598f51a 100644 --- a/keras_nlp/models/f_net/f_net_backbone_test.py +++ b/keras_nlp/models/f_net/f_net_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.f_net.f_net_backbone import FNetBackbone +from keras_nlp.tests.test_case import TestCase -class FNetBackboneTest(tf.test.TestCase, parameterized.TestCase): +class FNetBackboneTest(TestCase): def setUp(self): self.backbone = FNetBackbone( vocabulary_size=10, @@ -91,7 +92,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class FNetBackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class FNetBackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.backbone = FNetBackbone( diff --git a/keras_nlp/models/f_net/f_net_classifier_test.py b/keras_nlp/models/f_net/f_net_classifier_test.py index b67b1506cb..f6a78a80aa 100644 --- a/keras_nlp/models/f_net/f_net_classifier_test.py +++ b/keras_nlp/models/f_net/f_net_classifier_test.py @@ -26,9 +26,10 @@ from keras_nlp.models.f_net.f_net_classifier import FNetClassifier from keras_nlp.models.f_net.f_net_preprocessor import FNetPreprocessor from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer +from keras_nlp.tests.test_case import TestCase -class FNetClassifierTest(tf.test.TestCase, parameterized.TestCase): +class FNetClassifierTest(TestCase): def setUp(self): # Setup Model bytes_io = io.BytesIO() diff --git a/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py b/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py index 4830c6c81a..c41730cdaa 100644 --- a/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py @@ -24,9 +24,10 @@ FNetMaskedLMPreprocessor, ) from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer +from keras_nlp.tests.test_case import TestCase -class FNetMaskedLMPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class FNetMaskedLMPreprocessorTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/f_net/f_net_masked_lm_test.py b/keras_nlp/models/f_net/f_net_masked_lm_test.py index 999262040c..5f61909b03 100644 --- a/keras_nlp/models/f_net/f_net_masked_lm_test.py +++ b/keras_nlp/models/f_net/f_net_masked_lm_test.py @@ -26,9 +26,10 @@ FNetMaskedLMPreprocessor, ) from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer +from keras_nlp.tests.test_case import TestCase -class FNetMaskedLMTest(tf.test.TestCase, parameterized.TestCase): +class FNetMaskedLMTest(TestCase): def setUp(self): # Setup Model. bytes_io = io.BytesIO() diff --git a/keras_nlp/models/f_net/f_net_preprocessor_test.py b/keras_nlp/models/f_net/f_net_preprocessor_test.py index 10c0a3aac6..719f178f5c 100644 --- a/keras_nlp/models/f_net/f_net_preprocessor_test.py +++ b/keras_nlp/models/f_net/f_net_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for FNet preprocessor layer.""" - import io import os @@ -25,9 +24,10 @@ from keras_nlp.models.f_net.f_net_preprocessor import FNetPreprocessor from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer +from keras_nlp.tests.test_case import TestCase -class FNetPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class FNetPreprocessorTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/f_net/f_net_presets_test.py b/keras_nlp/models/f_net/f_net_presets_test.py index abd8240f99..4c30e0b0b2 100644 --- a/keras_nlp/models/f_net/f_net_presets_test.py +++ b/keras_nlp/models/f_net/f_net_presets_test.py @@ -21,10 +21,11 @@ from keras_nlp.models.f_net.f_net_classifier import FNetClassifier from keras_nlp.models.f_net.f_net_preprocessor import FNetPreprocessor from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer +from keras_nlp.tests.test_case import TestCase @pytest.mark.large -class FNetPresetSmokeTest(tf.test.TestCase, parameterized.TestCase): +class FNetPresetSmokeTest(TestCase): """ A smoke test for FNet presets we run continuously. @@ -110,7 +111,7 @@ def test_unknown_preset_error(self, cls, kwargs): @pytest.mark.extra_large -class FNetPresetFullTest(tf.test.TestCase, parameterized.TestCase): +class FNetPresetFullTest(TestCase): """ Test the full enumeration of our preset. diff --git a/keras_nlp/models/f_net/f_net_tokenizer_test.py b/keras_nlp/models/f_net/f_net_tokenizer_test.py index 33e26cc8cd..1d7a1f3850 100644 --- a/keras_nlp/models/f_net/f_net_tokenizer_test.py +++ b/keras_nlp/models/f_net/f_net_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for FNet tokenizer.""" - import io import os @@ -24,9 +23,10 @@ from tensorflow import keras from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer +from keras_nlp.tests.test_case import TestCase -class FNetTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class FNetTokenizerTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/gpt2/gpt2_backbone_test.py b/keras_nlp/models/gpt2/gpt2_backbone_test.py index 02cb6dcfdd..155f120d85 100644 --- a/keras_nlp/models/gpt2/gpt2_backbone_test.py +++ b/keras_nlp/models/gpt2/gpt2_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.gpt2.gpt2_backbone import GPT2Backbone +from keras_nlp.tests.test_case import TestCase -class GPT2Test(tf.test.TestCase, parameterized.TestCase): +class GPT2Test(TestCase): def setUp(self): # For DTensor. keras.backend.experimental.enable_tf_random_generator() @@ -115,7 +116,7 @@ def test_create_layout_map(self): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class GPT2BackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class GPT2BackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.model = GPT2Backbone( diff --git a/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py b/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py index 5d5912d9a8..97242d481b 100644 --- a/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py +++ b/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for GPT2 causal LM preprocessor layer.""" - import os import pytest @@ -25,9 +24,10 @@ GPT2CausalLMPreprocessor, ) from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer +from keras_nlp.tests.test_case import TestCase -class GPT2CausalLMPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class GPT2CausalLMPreprocessorTest(TestCase): def setUp(self): self.vocab = { "!": 0, diff --git a/keras_nlp/models/gpt2/gpt2_causal_lm_test.py b/keras_nlp/models/gpt2/gpt2_causal_lm_test.py index 6fbbdbc201..5ffb00e7e8 100644 --- a/keras_nlp/models/gpt2/gpt2_causal_lm_test.py +++ b/keras_nlp/models/gpt2/gpt2_causal_lm_test.py @@ -28,9 +28,10 @@ GPT2CausalLMPreprocessor, ) from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer +from keras_nlp.tests.test_case import TestCase -class GPT2CausalLMTest(tf.test.TestCase, parameterized.TestCase): +class GPT2CausalLMTest(TestCase): def setUp(self): # For DTensor. keras.backend.experimental.enable_tf_random_generator() diff --git a/keras_nlp/models/gpt2/gpt2_preprocessor_test.py b/keras_nlp/models/gpt2/gpt2_preprocessor_test.py index 6f8f7d8629..f7bd9bc7cb 100644 --- a/keras_nlp/models/gpt2/gpt2_preprocessor_test.py +++ b/keras_nlp/models/gpt2/gpt2_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for GPT2 preprocessor layer.""" - import os import pytest @@ -23,9 +22,10 @@ from keras_nlp.models.gpt2.gpt2_preprocessor import GPT2Preprocessor from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer +from keras_nlp.tests.test_case import TestCase -class GPT2PreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class GPT2PreprocessorTest(TestCase): def setUp(self): self.vocab = { "!": 0, diff --git a/keras_nlp/models/gpt2/gpt2_presets_test.py b/keras_nlp/models/gpt2/gpt2_presets_test.py index 0254008a40..396d5b995c 100644 --- a/keras_nlp/models/gpt2/gpt2_presets_test.py +++ b/keras_nlp/models/gpt2/gpt2_presets_test.py @@ -19,10 +19,11 @@ from keras_nlp.models.gpt2.gpt2_backbone import GPT2Backbone from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer +from keras_nlp.tests.test_case import TestCase @pytest.mark.large -class GPT2PresetSmokeTest(tf.test.TestCase, parameterized.TestCase): +class GPT2PresetSmokeTest(TestCase): """ A smoke test for GPT-2 presets we run continuously. @@ -78,7 +79,7 @@ def test_unknown_preset_error(self, cls): @pytest.mark.extra_large -class GPT2PresetFullTest(tf.test.TestCase, parameterized.TestCase): +class GPT2PresetFullTest(TestCase): """ Test the full enumeration of our preset. diff --git a/keras_nlp/models/gpt2/gpt2_tokenizer_test.py b/keras_nlp/models/gpt2/gpt2_tokenizer_test.py index f620e01315..6ecae6bf7f 100644 --- a/keras_nlp/models/gpt2/gpt2_tokenizer_test.py +++ b/keras_nlp/models/gpt2/gpt2_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for GPT-2 preprocessing layers.""" - import os import pytest @@ -22,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer +from keras_nlp.tests.test_case import TestCase -class GPT2TokenizerTest(tf.test.TestCase, parameterized.TestCase): +class GPT2TokenizerTest(TestCase): def setUp(self): self.vocab = { "<|endoftext|>": 0, diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py index dd80dbe2bd..a8ef804417 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models import GPTNeoXBackbone +from keras_nlp.tests.test_case import TestCase -class GPTNeoXTest(tf.test.TestCase, parameterized.TestCase): +class GPTNeoXTest(TestCase): def setUp(self): self.backbone = GPTNeoXBackbone( vocabulary_size=10, @@ -93,7 +94,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class GPTNeoXBackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class GPTNeoXBackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): GPTNeoXBackbone( diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer_test.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer_test.py index 7f54ccab34..458b63dc71 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer_test.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for GPT-2 preprocessing layers.""" - import os import pytest @@ -22,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer +from keras_nlp.tests.test_case import TestCase -class GPTNeoXTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class GPTNeoXTokenizerTest(TestCase): def setUp(self): self.vocab = { "<|endoftext|>": 0, diff --git a/keras_nlp/models/opt/opt_backbone_test.py b/keras_nlp/models/opt/opt_backbone_test.py index a26e8d6a0a..2bd154aadd 100644 --- a/keras_nlp/models/opt/opt_backbone_test.py +++ b/keras_nlp/models/opt/opt_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.opt.opt_backbone import OPTBackbone +from keras_nlp.tests.test_case import TestCase -class OPTTest(tf.test.TestCase, parameterized.TestCase): +class OPTTest(TestCase): def setUp(self): # For DTensor. keras.backend.experimental.enable_tf_random_generator() @@ -115,7 +116,7 @@ def test_create_layout_map(self): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class OPTBackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class OPTBackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.backbone = OPTBackbone( diff --git a/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py b/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py index 305c76b0d1..c3743c9d23 100644 --- a/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py +++ b/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for OPT causal LM preprocessor layer.""" - import os import pytest @@ -25,9 +24,10 @@ OPTCausalLMPreprocessor, ) from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer +from keras_nlp.tests.test_case import TestCase -class OPTCausalLMPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class OPTCausalLMPreprocessorTest(TestCase): def setUp(self): self.vocab = { "": 0, diff --git a/keras_nlp/models/opt/opt_causal_lm_test.py b/keras_nlp/models/opt/opt_causal_lm_test.py index df3e1db7fa..fe2b6b7744 100644 --- a/keras_nlp/models/opt/opt_causal_lm_test.py +++ b/keras_nlp/models/opt/opt_causal_lm_test.py @@ -28,9 +28,10 @@ OPTCausalLMPreprocessor, ) from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer +from keras_nlp.tests.test_case import TestCase -class OPTCausalLMTest(tf.test.TestCase, parameterized.TestCase): +class OPTCausalLMTest(TestCase): def setUp(self): # For DTensor. keras.backend.experimental.enable_tf_random_generator() diff --git a/keras_nlp/models/opt/opt_preprocessor_test.py b/keras_nlp/models/opt/opt_preprocessor_test.py index 51050ae865..497dc1e049 100644 --- a/keras_nlp/models/opt/opt_preprocessor_test.py +++ b/keras_nlp/models/opt/opt_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for OPT preprocessor layer.""" - import os import pytest @@ -23,9 +22,10 @@ from keras_nlp.models.opt.opt_preprocessor import OPTPreprocessor from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer +from keras_nlp.tests.test_case import TestCase -class OPTPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class OPTPreprocessorTest(TestCase): def setUp(self): self.vocab = { "": 0, diff --git a/keras_nlp/models/opt/opt_presets_test.py b/keras_nlp/models/opt/opt_presets_test.py index 2ab762e22f..ec95e5ecc6 100644 --- a/keras_nlp/models/opt/opt_presets_test.py +++ b/keras_nlp/models/opt/opt_presets_test.py @@ -19,10 +19,11 @@ from keras_nlp.models.opt.opt_backbone import OPTBackbone from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer +from keras_nlp.tests.test_case import TestCase @pytest.mark.large -class OPTPresetSmokeTest(tf.test.TestCase, parameterized.TestCase): +class OPTPresetSmokeTest(TestCase): """ A smoke test for GPT-2 presets we run continuously. @@ -78,7 +79,7 @@ def test_unknown_preset_error(self, cls): @pytest.mark.extra_large -class OPTPresetFullTest(tf.test.TestCase, parameterized.TestCase): +class OPTPresetFullTest(TestCase): """ Test the full enumeration of our preset. diff --git a/keras_nlp/models/opt/opt_tokenizer_test.py b/keras_nlp/models/opt/opt_tokenizer_test.py index 1f91a3f04b..1de3869e37 100644 --- a/keras_nlp/models/opt/opt_tokenizer_test.py +++ b/keras_nlp/models/opt/opt_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for OPT tokenizer layer.""" - import os import pytest @@ -22,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer +from keras_nlp.tests.test_case import TestCase -class OPTTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class OPTTokenizerTest(TestCase): def setUp(self): self.vocab = { "": 0, diff --git a/keras_nlp/models/roberta/roberta_backbone_test.py b/keras_nlp/models/roberta/roberta_backbone_test.py index ac0777a8d4..2983fd7617 100644 --- a/keras_nlp/models/roberta/roberta_backbone_test.py +++ b/keras_nlp/models/roberta/roberta_backbone_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Test for RoBERTa backbone models.""" - import os import pytest @@ -22,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.roberta.roberta_backbone import RobertaBackbone +from keras_nlp.tests.test_case import TestCase -class RobertaBackboneTest(tf.test.TestCase, parameterized.TestCase): +class RobertaBackboneTest(TestCase): def setUp(self): self.backbone = RobertaBackbone( vocabulary_size=10, @@ -99,7 +99,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class RobertaBackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class RobertaBackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.backbone = RobertaBackbone( diff --git a/keras_nlp/models/roberta/roberta_classifier_test.py b/keras_nlp/models/roberta/roberta_classifier_test.py index b4d2efe33d..7008664c09 100644 --- a/keras_nlp/models/roberta/roberta_classifier_test.py +++ b/keras_nlp/models/roberta/roberta_classifier_test.py @@ -24,9 +24,10 @@ from keras_nlp.models.roberta.roberta_classifier import RobertaClassifier from keras_nlp.models.roberta.roberta_preprocessor import RobertaPreprocessor from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer +from keras_nlp.tests.test_case import TestCase -class RobertaClassifierTest(tf.test.TestCase, parameterized.TestCase): +class RobertaClassifierTest(TestCase): def setUp(self): self.vocab = { "": 0, diff --git a/keras_nlp/models/roberta/roberta_masked_lm_preprocessor_test.py b/keras_nlp/models/roberta/roberta_masked_lm_preprocessor_test.py index f34f4b22e6..bd2e775d58 100644 --- a/keras_nlp/models/roberta/roberta_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/roberta/roberta_masked_lm_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for RoBERTa masked language model preprocessor layer.""" - import os import pytest @@ -25,9 +24,10 @@ RobertaMaskedLMPreprocessor, ) from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer +from keras_nlp.tests.test_case import TestCase -class RobertaMaskedLMPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class RobertaMaskedLMPreprocessorTest(TestCase): def setUp(self): vocab = { "": 0, diff --git a/keras_nlp/models/roberta/roberta_masked_lm_test.py b/keras_nlp/models/roberta/roberta_masked_lm_test.py index 8334ad42af..b33640821c 100644 --- a/keras_nlp/models/roberta/roberta_masked_lm_test.py +++ b/keras_nlp/models/roberta/roberta_masked_lm_test.py @@ -26,9 +26,10 @@ RobertaMaskedLMPreprocessor, ) from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer +from keras_nlp.tests.test_case import TestCase -class RobertaMaskedLMTest(tf.test.TestCase, parameterized.TestCase): +class RobertaMaskedLMTest(TestCase): def setUp(self): self.vocab = { "": 0, diff --git a/keras_nlp/models/roberta/roberta_preprocessor_test.py b/keras_nlp/models/roberta/roberta_preprocessor_test.py index d98644f2f6..78eef4c8db 100644 --- a/keras_nlp/models/roberta/roberta_preprocessor_test.py +++ b/keras_nlp/models/roberta/roberta_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for RoBERTa preprocessor layer.""" - import os import pytest @@ -23,9 +22,10 @@ from keras_nlp.models.roberta.roberta_preprocessor import RobertaPreprocessor from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer +from keras_nlp.tests.test_case import TestCase -class RobertaPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class RobertaPreprocessorTest(TestCase): def setUp(self): vocab = { "": 0, diff --git a/keras_nlp/models/roberta/roberta_presets_test.py b/keras_nlp/models/roberta/roberta_presets_test.py index c96d60e7cc..c5cb97d255 100644 --- a/keras_nlp/models/roberta/roberta_presets_test.py +++ b/keras_nlp/models/roberta/roberta_presets_test.py @@ -22,10 +22,11 @@ from keras_nlp.models.roberta.roberta_masked_lm import RobertaMaskedLM from keras_nlp.models.roberta.roberta_preprocessor import RobertaPreprocessor from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer +from keras_nlp.tests.test_case import TestCase @pytest.mark.large -class RobertaPresetSmokeTest(tf.test.TestCase, parameterized.TestCase): +class RobertaPresetSmokeTest(TestCase): """ A smoke test for RoBERTa presets we run continuously. @@ -149,7 +150,7 @@ def test_unknown_preset_error(self, cls, kwargs): @pytest.mark.extra_large -class RobertaPresetFullTest(tf.test.TestCase, parameterized.TestCase): +class RobertaPresetFullTest(TestCase): """ Test the full enumeration of our preset. diff --git a/keras_nlp/models/roberta/roberta_tokenizer_test.py b/keras_nlp/models/roberta/roberta_tokenizer_test.py index e99ad0684f..3bd9fadd68 100644 --- a/keras_nlp/models/roberta/roberta_tokenizer_test.py +++ b/keras_nlp/models/roberta/roberta_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for RoBERTa tokenizer.""" - import os import pytest @@ -22,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer +from keras_nlp.tests.test_case import TestCase -class RobertaTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class RobertaTokenizerTest(TestCase): def setUp(self): vocab = { "": 0, diff --git a/keras_nlp/models/t5/t5_backbone_test.py b/keras_nlp/models/t5/t5_backbone_test.py index 4d89336dff..51ca0a4854 100644 --- a/keras_nlp/models/t5/t5_backbone_test.py +++ b/keras_nlp/models/t5/t5_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.t5.t5_backbone import T5Backbone +from keras_nlp.tests.test_case import TestCase -class T5Test(tf.test.TestCase, parameterized.TestCase): +class T5Test(TestCase): def setUp(self): self.backbone = T5Backbone( vocabulary_size=4, @@ -119,7 +120,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class T5BackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class T5BackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.backbone = T5Backbone( diff --git a/keras_nlp/models/t5/t5_tokenizer_test.py b/keras_nlp/models/t5/t5_tokenizer_test.py index 7386872dcf..446203d9c7 100644 --- a/keras_nlp/models/t5/t5_tokenizer_test.py +++ b/keras_nlp/models/t5/t5_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for T5 tokenizer.""" - import io import os @@ -24,9 +23,10 @@ from tensorflow import keras from keras_nlp.models.t5.t5_tokenizer import T5Tokenizer +from keras_nlp.tests.test_case import TestCase -class T5TokenizerTest(tf.test.TestCase, parameterized.TestCase): +class T5TokenizerTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/task_test.py b/keras_nlp/models/task_test.py index 81d55bd090..c3066bc1d6 100644 --- a/keras_nlp/models/task_test.py +++ b/keras_nlp/models/task_test.py @@ -11,12 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf from tensorflow import keras from tensorflow.keras.losses import SparseCategoricalCrossentropy from keras_nlp.models.preprocessor import Preprocessor from keras_nlp.models.task import Task +from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.tokenizer import Tokenizer @@ -40,7 +40,7 @@ def __init__(self, preprocessor=None, activation=None, **kwargs): self.activation = keras.activations.get(activation) -class TestTask(tf.test.TestCase): +class TestTask(TestCase): def test_summary_with_preprocessor(self): preprocessor = SimplePreprocessor() model = SimpleTask(preprocessor) diff --git a/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py b/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py index 0b35c99db8..2581f4681c 100644 --- a/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py +++ b/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py @@ -23,11 +23,10 @@ from keras_nlp.models.whisper.whisper_audio_feature_extractor import ( WhisperAudioFeatureExtractor, ) +from keras_nlp.tests.test_case import TestCase -class WhisperAudioFeatureExtractorTest( - tf.test.TestCase, parameterized.TestCase -): +class WhisperAudioFeatureExtractorTest(TestCase): def setUp(self): self.num_mels = 80 self.num_fft_bins = 400 diff --git a/keras_nlp/models/whisper/whisper_backbone_test.py b/keras_nlp/models/whisper/whisper_backbone_test.py index 2644f7a606..d162f2ef63 100644 --- a/keras_nlp/models/whisper/whisper_backbone_test.py +++ b/keras_nlp/models/whisper/whisper_backbone_test.py @@ -21,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.whisper.whisper_backbone import WhisperBackbone +from keras_nlp.tests.test_case import TestCase -class WhisperBackboneTest(tf.test.TestCase, parameterized.TestCase): +class WhisperBackboneTest(TestCase): def setUp(self): self.backbone = WhisperBackbone( vocabulary_size=10, @@ -126,7 +127,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class WhisperBackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class WhisperBackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.backbone = WhisperBackbone( diff --git a/keras_nlp/models/whisper/whisper_tokenizer_test.py b/keras_nlp/models/whisper/whisper_tokenizer_test.py index de14daff17..8fb6c11eb6 100644 --- a/keras_nlp/models/whisper/whisper_tokenizer_test.py +++ b/keras_nlp/models/whisper/whisper_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for Whisper preprocessing layers.""" - import os import tensorflow as tf @@ -21,9 +20,10 @@ from tensorflow import keras from keras_nlp.models.whisper.whisper_tokenizer import WhisperTokenizer +from keras_nlp.tests.test_case import TestCase -class WhisperTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class WhisperTokenizerTest(TestCase): def setUp(self): self.vocab = { "Ġair": 0, diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py index 6112c37984..1b211dad6c 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for XLM-RoBERTa backbone models.""" - import os import pytest @@ -22,9 +21,10 @@ from tensorflow import keras from keras_nlp.models.xlm_roberta.xlm_roberta_backbone import XLMRobertaBackbone +from keras_nlp.tests.test_case import TestCase -class XLMRobertaBackboneTest(tf.test.TestCase, parameterized.TestCase): +class XLMRobertaBackboneTest(TestCase): def setUp(self): self.backbone = XLMRobertaBackbone( vocabulary_size=10, @@ -97,7 +97,7 @@ def test_saved_model(self, save_format, filename): @pytest.mark.tpu @pytest.mark.usefixtures("tpu_test_class") -class XLMRobertaBackboneTPUTest(tf.test.TestCase, parameterized.TestCase): +class XLMRobertaBackboneTPUTest(TestCase): def setUp(self): with self.tpu_strategy.scope(): self.backbone = XLMRobertaBackbone( diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py index 326d19dfbb..c228916730 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py @@ -32,9 +32,10 @@ from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import ( XLMRobertaTokenizer, ) +from keras_nlp.tests.test_case import TestCase -class XLMRobertaClassifierTest(tf.test.TestCase, parameterized.TestCase): +class XLMRobertaClassifierTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py index 2b573204db..74e6445f30 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for XLM-RoBERTa masked language model preprocessor layer.""" - import io import os @@ -29,11 +28,10 @@ from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import ( XLMRobertaTokenizer, ) +from keras_nlp.tests.test_case import TestCase -class XLMRobertaMaskedLMPreprocessorTest( - tf.test.TestCase, parameterized.TestCase -): +class XLMRobertaMaskedLMPreprocessorTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py index 168a219da3..bc4e48b9ac 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py @@ -32,9 +32,10 @@ from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import ( XLMRobertaTokenizer, ) +from keras_nlp.tests.test_case import TestCase -class XLMRobertaMaskedLMTest(tf.test.TestCase, parameterized.TestCase): +class XLMRobertaMaskedLMTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py index a06490c298..1034505eb2 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for XLM-RoBERTa preprocessor layer.""" - import io import os @@ -29,9 +28,10 @@ from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import ( XLMRobertaTokenizer, ) +from keras_nlp.tests.test_case import TestCase -class XLMRobertaPreprocessorTest(tf.test.TestCase, parameterized.TestCase): +class XLMRobertaPreprocessorTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py index 2bb6d5cef3..e1d74fbaea 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py @@ -27,10 +27,11 @@ from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import ( XLMRobertaTokenizer, ) +from keras_nlp.tests.test_case import TestCase @pytest.mark.large -class XLMRobertaPresetSmokeTest(tf.test.TestCase, parameterized.TestCase): +class XLMRobertaPresetSmokeTest(TestCase): """ A smoke test for XLM-RoBERTa presets we run continuously. @@ -124,7 +125,7 @@ def test_unknown_preset_error(self, cls, kwargs): @pytest.mark.extra_large -class XLMRobertaPresetFullTest(tf.test.TestCase, parameterized.TestCase): +class XLMRobertaPresetFullTest(TestCase): """ Test the full enumeration of our preset. diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py index 25ecdb6fc4..0e08978c76 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for XLM-RoBERTa tokenizer.""" - import io import os @@ -26,9 +25,10 @@ from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import ( XLMRobertaTokenizer, ) +from keras_nlp.tests.test_case import TestCase -class XLMRobertaTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class XLMRobertaTokenizerTest(TestCase): def setUp(self): bytes_io = io.BytesIO() vocab_data = tf.data.Dataset.from_tensor_slices( diff --git a/keras_nlp/samplers/beam_sampler_test.py b/keras_nlp/samplers/beam_sampler_test.py index 93a78c5254..d7cd3d3795 100644 --- a/keras_nlp/samplers/beam_sampler_test.py +++ b/keras_nlp/samplers/beam_sampler_test.py @@ -17,9 +17,10 @@ from absl.testing import parameterized from keras_nlp.samplers.beam_sampler import BeamSampler +from keras_nlp.tests.test_case import TestCase -class BeamSamplerTest(tf.test.TestCase, parameterized.TestCase): +class BeamSamplerTest(TestCase): def setUp(self): super().setUp() # Use a simple alphabet of lowercase characters to [0, 26). diff --git a/keras_nlp/samplers/contrastive_sampler_test.py b/keras_nlp/samplers/contrastive_sampler_test.py index 89374d4358..6d4bbcf175 100644 --- a/keras_nlp/samplers/contrastive_sampler_test.py +++ b/keras_nlp/samplers/contrastive_sampler_test.py @@ -17,9 +17,10 @@ from absl.testing import parameterized from keras_nlp.samplers.contrastive_sampler import ContrastiveSampler +from keras_nlp.tests.test_case import TestCase -class ContrastiveSamplerTest(tf.test.TestCase, parameterized.TestCase): +class ContrastiveSamplerTest(TestCase): def setUp(self): super().setUp() # Use a simple alphabet of lowercase characters to [0, 26). diff --git a/keras_nlp/samplers/greedy_sampler_test.py b/keras_nlp/samplers/greedy_sampler_test.py index 33f67a68dc..9219b5000b 100644 --- a/keras_nlp/samplers/greedy_sampler_test.py +++ b/keras_nlp/samplers/greedy_sampler_test.py @@ -17,9 +17,10 @@ from absl.testing import parameterized from keras_nlp.samplers.greedy_sampler import GreedySampler +from keras_nlp.tests.test_case import TestCase -class GreedySamplerTest(tf.test.TestCase, parameterized.TestCase): +class GreedySamplerTest(TestCase): def setUp(self): super().setUp() # Use a simple alphabet of lowercase characters to [0, 26). diff --git a/keras_nlp/samplers/random_sampler_test.py b/keras_nlp/samplers/random_sampler_test.py index 3142c6d6f2..6a271d6f42 100644 --- a/keras_nlp/samplers/random_sampler_test.py +++ b/keras_nlp/samplers/random_sampler_test.py @@ -18,9 +18,10 @@ from absl.testing import parameterized from keras_nlp.samplers.random_sampler import RandomSampler +from keras_nlp.tests.test_case import TestCase -class RandomSamplerTest(tf.test.TestCase, parameterized.TestCase): +class RandomSamplerTest(TestCase): def setUp(self): super().setUp() # Use a simple alphabet of lowercase characters to [0, 25]. diff --git a/keras_nlp/samplers/serialization_test.py b/keras_nlp/samplers/serialization_test.py index b9fcff7e91..a0c5491f53 100644 --- a/keras_nlp/samplers/serialization_test.py +++ b/keras_nlp/samplers/serialization_test.py @@ -13,15 +13,14 @@ # limitations under the License. """Tests for Sampler classes.""" -import tensorflow as tf - from keras_nlp.samplers.serialization import deserialize from keras_nlp.samplers.serialization import get from keras_nlp.samplers.serialization import serialize from keras_nlp.samplers.top_k_sampler import TopKSampler +from keras_nlp.tests.test_case import TestCase -class SerializationTest(tf.test.TestCase): +class SerializationTest(TestCase): def test_serialization(self): sampler = TopKSampler(k=5) restored = deserialize(serialize(sampler)) diff --git a/keras_nlp/samplers/top_k_sampler_test.py b/keras_nlp/samplers/top_k_sampler_test.py index 46f46709ae..26fb1c11d4 100644 --- a/keras_nlp/samplers/top_k_sampler_test.py +++ b/keras_nlp/samplers/top_k_sampler_test.py @@ -17,9 +17,10 @@ from absl.testing import parameterized from keras_nlp.samplers.top_k_sampler import TopKSampler +from keras_nlp.tests.test_case import TestCase -class TopKSamplerTest(tf.test.TestCase, parameterized.TestCase): +class TopKSamplerTest(TestCase): def setUp(self): super().setUp() # Use a simple alphabet of lowercase characters to [0, 26). diff --git a/keras_nlp/samplers/top_p_sampler_test.py b/keras_nlp/samplers/top_p_sampler_test.py index 3c98337c76..1243cd8393 100644 --- a/keras_nlp/samplers/top_p_sampler_test.py +++ b/keras_nlp/samplers/top_p_sampler_test.py @@ -18,9 +18,10 @@ from absl.testing import parameterized from keras_nlp.samplers.top_p_sampler import TopPSampler +from keras_nlp.tests.test_case import TestCase -class TopPSamplerTest(tf.test.TestCase, parameterized.TestCase): +class TopPSamplerTest(TestCase): def setUp(self): super().setUp() # Use a simple alphabet of lowercase characters to [0, 26). diff --git a/keras_nlp/tests/test_case.py b/keras_nlp/tests/test_case.py new file mode 100644 index 0000000000..f50e59602c --- /dev/null +++ b/keras_nlp/tests/test_case.py @@ -0,0 +1,26 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import tensorflow as tf +from absl.testing import parameterized + + +class TestCase(tf.test.TestCase, parameterized.TestCase): + """Base test case class for KerasNLP. + + For now we just extend tf.TestCase and parameterized.TestCase, but this + indirection will allow us to add more functionality in the future if we + want. + """ + + pass diff --git a/keras_nlp/tokenizers/byte_pair_tokenizer_test.py b/keras_nlp/tokenizers/byte_pair_tokenizer_test.py index 581a7827c8..2e48bd23a8 100644 --- a/keras_nlp/tokenizers/byte_pair_tokenizer_test.py +++ b/keras_nlp/tokenizers/byte_pair_tokenizer_test.py @@ -19,6 +19,7 @@ from absl.testing import parameterized from tensorflow import keras +from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.byte_pair_tokenizer import BytePairTokenizer VOCAB_PATH = keras.utils.get_file( @@ -32,7 +33,7 @@ @pytest.mark.large -class BytePairTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class BytePairTokenizerTest(TestCase): def setUp(self): super().setUp() self.tokenizer = BytePairTokenizer( diff --git a/keras_nlp/tokenizers/byte_tokenizer_test.py b/keras_nlp/tokenizers/byte_tokenizer_test.py index 500de32666..af0f6fcd61 100644 --- a/keras_nlp/tokenizers/byte_tokenizer_test.py +++ b/keras_nlp/tokenizers/byte_tokenizer_test.py @@ -18,10 +18,11 @@ from absl.testing import parameterized from tensorflow import keras +from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.byte_tokenizer import ByteTokenizer -class ByteTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class ByteTokenizerTest(TestCase): def test_tokenize(self): input_data = tf.constant(["hello", "fun", "▀▁▂▃"]) tokenizer = ByteTokenizer() diff --git a/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py b/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py index 4b7b592706..5558a45103 100644 --- a/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py +++ b/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py @@ -20,10 +20,11 @@ from absl.testing import parameterized from tensorflow import keras +from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer -class SentencePieceTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class SentencePieceTokenizerTest(TestCase): def setUp(self): super().setUp() bytes_io = io.BytesIO() diff --git a/keras_nlp/tokenizers/sentence_piece_tokenizer_trainer_test.py b/keras_nlp/tokenizers/sentence_piece_tokenizer_trainer_test.py index d57d27384c..872bff2d9c 100644 --- a/keras_nlp/tokenizers/sentence_piece_tokenizer_trainer_test.py +++ b/keras_nlp/tokenizers/sentence_piece_tokenizer_trainer_test.py @@ -18,13 +18,14 @@ import tensorflow as tf +from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer from keras_nlp.tokenizers.sentence_piece_tokenizer_trainer import ( compute_sentence_piece_proto, ) -class SentencePieceTokenizerTrainerTest(tf.test.TestCase): +class SentencePieceTokenizerTrainerTest(TestCase): def test_dataset_input(self): test_text = ["Ninjas and Samurais"] expected_output = [ diff --git a/keras_nlp/tokenizers/tokenizer_test.py b/keras_nlp/tokenizers/tokenizer_test.py index 434a0bfa00..711c4fd98d 100644 --- a/keras_nlp/tokenizers/tokenizer_test.py +++ b/keras_nlp/tokenizers/tokenizer_test.py @@ -15,6 +15,7 @@ import tensorflow as tf from tensorflow import keras +from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.tokenizer import Tokenizer @@ -28,7 +29,7 @@ def detokenize(self, inputs): return tf.strings.reduce_join([inputs], separator=" ", axis=-1) -class TokenizerTest(tf.test.TestCase): +class TokenizerTest(TestCase): def test_tokenize(self): input_data = ["the quick brown fox"] tokenizer = SimpleTokenizer() diff --git a/keras_nlp/tokenizers/unicode_codepoint_tokenizer_test.py b/keras_nlp/tokenizers/unicode_codepoint_tokenizer_test.py index c4e6317cee..3d5b1a9243 100644 --- a/keras_nlp/tokenizers/unicode_codepoint_tokenizer_test.py +++ b/keras_nlp/tokenizers/unicode_codepoint_tokenizer_test.py @@ -18,12 +18,13 @@ from absl.testing import parameterized from tensorflow import keras +from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.unicode_codepoint_tokenizer import ( UnicodeCodepointTokenizer, ) -class UnicodeCodepointTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class UnicodeCodepointTokenizerTest(TestCase): def test_tokenize(self): input_data = tf.constant(["ninja", "samurai", "▀▁▂▃"]) tokenizer = UnicodeCodepointTokenizer() diff --git a/keras_nlp/tokenizers/word_piece_tokenizer_test.py b/keras_nlp/tokenizers/word_piece_tokenizer_test.py index 10b9e9affa..d51beccc00 100644 --- a/keras_nlp/tokenizers/word_piece_tokenizer_test.py +++ b/keras_nlp/tokenizers/word_piece_tokenizer_test.py @@ -18,10 +18,11 @@ from absl.testing import parameterized from tensorflow import keras +from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.word_piece_tokenizer import WordPieceTokenizer -class WordPieceTokenizerTest(tf.test.TestCase, parameterized.TestCase): +class WordPieceTokenizerTest(TestCase): def test_tokenize(self): input_data = ["the quick brown fox."] vocab_data = ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox", "."] diff --git a/keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py b/keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py index da04de7cf7..bcdf6c9a2b 100644 --- a/keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py +++ b/keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py @@ -17,12 +17,13 @@ import tensorflow as tf +from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.word_piece_tokenizer_trainer import ( compute_word_piece_vocabulary, ) -class WordPieceTokenizerTrainerTest(tf.test.TestCase): +class WordPieceTokenizerTrainerTest(TestCase): def test_dataset_input(self): test_text = ["baa maa caa saa aaa"] test_output = ["a", "b", "c", "m", "s", "##aa", "##a", "##b"] diff --git a/keras_nlp/utils/keras_utils_test.py b/keras_nlp/utils/keras_utils_test.py index 7b5c8884cb..e2ad7ed979 100644 --- a/keras_nlp/utils/keras_utils_test.py +++ b/keras_nlp/utils/keras_utils_test.py @@ -15,11 +15,12 @@ import tensorflow as tf from tensorflow import keras +from keras_nlp.tests.test_case import TestCase from keras_nlp.utils.keras_utils import clone_initializer from keras_nlp.utils.keras_utils import pack_x_y_sample_weight -class CloneInitializerTest(tf.test.TestCase): +class CloneInitializerTest(TestCase): def test_config_equality(self): initializer = keras.initializers.VarianceScaling( scale=2.0, @@ -42,7 +43,7 @@ def test_strings(self): self.assertAllEqual(initializer, clone) -class PackTest(tf.test.TestCase): +class PackTest(TestCase): def test_pack_dict(self): tensor_dict = {"foo": tf.constant([1, 2])} data = pack_x_y_sample_weight(tensor_dict) diff --git a/keras_nlp/utils/pipeline_model_test.py b/keras_nlp/utils/pipeline_model_test.py index 3a433a38ad..c4b0edb31d 100644 --- a/keras_nlp/utils/pipeline_model_test.py +++ b/keras_nlp/utils/pipeline_model_test.py @@ -17,6 +17,7 @@ from absl.testing import parameterized from tensorflow import keras +from keras_nlp.tests.test_case import TestCase from keras_nlp.utils.pipeline_model import PipelineModel @@ -84,7 +85,7 @@ def from_config(cls, config): return cls(**config) -class TestNoopPipelineModel(tf.test.TestCase, parameterized.TestCase): +class TestNoopPipelineModel(TestCase): def test_fit(self): x = tf.random.uniform((8, 5)) y = tf.random.uniform((8, 1)) @@ -155,7 +156,7 @@ def test_saved_model(self, save_format, filename): self.assertAllClose(model_output, restored_output) -class TestFeaturePreprocessingModel(tf.test.TestCase, parameterized.TestCase): +class TestFeaturePreprocessingModel(TestCase): def test_fit_with_preprocessing(self): x = tf.strings.as_string(tf.random.uniform((100, 5))) y = tf.random.uniform((100, 1)) @@ -273,7 +274,7 @@ def test_saved_model(self, save_format, filename): self.assertAllClose(model_output, restored_output) -class TestLabelPreprocessingModel(tf.test.TestCase, parameterized.TestCase): +class TestLabelPreprocessingModel(TestCase): def test_fit_with_preprocessing(self): x = tf.random.uniform((100, 5)) y = tf.strings.as_string(tf.random.uniform((100, 1))) @@ -384,7 +385,7 @@ def test_saved_model(self, save_format, filename): self.assertAllClose(model_output, restored_output) -class TestDataPreprocessingModel(tf.test.TestCase, parameterized.TestCase): +class TestDataPreprocessingModel(TestCase): def test_fit_with_preprocessing(self): data = tf.strings.as_string(tf.random.uniform((100, 1))) model = DataPipeline() @@ -478,7 +479,7 @@ def test_saved_model(self, save_format, filename): self.assertAllClose(model_output, restored_output) -class TestFunctional(tf.test.TestCase, parameterized.TestCase): +class TestFunctional(TestCase): def test_fit(self): x = tf.strings.as_string(tf.random.uniform((100, 5))) y = tf.random.uniform((100, 1)) @@ -529,7 +530,7 @@ def test_saved_model(self, save_format, filename): self.assertAllClose(model_output, restored_output) -class TestFitArguments(tf.test.TestCase): +class TestFitArguments(TestCase): def test_validation_data(self): x = tf.strings.as_string(tf.random.uniform((80, 5))) y = tf.random.uniform((80, 1)) @@ -574,7 +575,7 @@ def test_error_dataset_and_invalid_arguments(self): model.fit(ds, sample_weight=sw) -class TestInputErrors(tf.test.TestCase): +class TestInputErrors(TestCase): def test_unbatched_input_raises(self): model = FeaturePipeline() with self.assertRaisesRegex(ValueError, "must have a batch dimension"): diff --git a/keras_nlp/utils/python_utils_test.py b/keras_nlp/utils/python_utils_test.py index efe9a17906..60590dd47b 100644 --- a/keras_nlp/utils/python_utils_test.py +++ b/keras_nlp/utils/python_utils_test.py @@ -12,13 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf - +from keras_nlp.tests.test_case import TestCase from keras_nlp.utils.python_utils import classproperty from keras_nlp.utils.python_utils import format_docstring -class ClassPropertyTest(tf.test.TestCase): +class ClassPropertyTest(TestCase): def test_class_property(self): class Foo: @classproperty @@ -28,7 +27,7 @@ def bar(cls): self.assertAllEqual(Foo.bar, "class property") -class FormatDocstringTest(tf.test.TestCase): +class FormatDocstringTest(TestCase): def test_function(self): @format_docstring(adjective="salubrious") def foo(): diff --git a/keras_nlp/utils/tensor_utils_test.py b/keras_nlp/utils/tensor_utils_test.py index 14620b0fb4..e74892c41c 100644 --- a/keras_nlp/utils/tensor_utils_test.py +++ b/keras_nlp/utils/tensor_utils_test.py @@ -14,11 +14,12 @@ import tensorflow as tf +from keras_nlp.tests.test_case import TestCase from keras_nlp.utils.tensor_utils import tensor_to_list from keras_nlp.utils.tensor_utils import tensor_to_string_list -class TensorToListTest(tf.test.TestCase): +class TensorToListTest(TestCase): def test_ragged_input(self): input_data = tf.ragged.constant([[1, 2], [4, 5, 6]]) list_output = tensor_to_list(input_data) @@ -35,7 +36,7 @@ def test_scalar_input(self): self.assertEqual(list_output, 1) -class TensorToStringListTest(tf.test.TestCase): +class TensorToStringListTest(TestCase): def test_detokenize_to_strings_for_ragged(self): input_data = tf.ragged.constant([["▀▁▂▃", "samurai"]]) detokenize_output = tensor_to_string_list(input_data)