From 85a8956c7ab4f6e456b669098675735270acc89e Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Tue, 30 Jan 2024 13:03:02 -0600 Subject: [PATCH] Fix format and Update Vectorized Base (#2323) --- benchmarks/vectorized_randomly_zoomed_crop.py | 8 ++++---- .../base_image_augmentation_layer.py | 12 +++++------ .../preprocessing/random_crop_and_resize.py | 8 ++++---- ...ectorized_base_image_augmentation_layer.py | 20 +++++++++++-------- ...ized_base_image_augmentation_layer_test.py | 12 +++++++++++ .../layers/regularization/squeeze_excite.py | 8 ++++---- keras_cv/layers/vit_det_layers.py | 6 +++--- .../object_detection/box_coco_metrics.py | 6 +++--- .../backbones/densenet/densenet_backbone.py | 6 +++--- .../backbones/resnet_v1/resnet_v1_backbone.py | 6 +++--- .../backbones/resnet_v2/resnet_v2_backbone.py | 6 +++--- .../backbones/vit_det/vit_det_backbone.py | 6 +++--- keras_cv/models/legacy/darknet.py | 1 - keras_cv/models/legacy/mlp_mixer.py | 1 - .../yolo_v8/yolo_v8_backbone.py | 6 +++--- .../yolo_v8/yolo_v8_detector.py | 6 +++--- .../stable_diffusion/noise_scheduler.py | 4 +--- 17 files changed, 67 insertions(+), 55 deletions(-) diff --git a/benchmarks/vectorized_randomly_zoomed_crop.py b/benchmarks/vectorized_randomly_zoomed_crop.py index 4e807fd1ab..3a207ed2e3 100644 --- a/benchmarks/vectorized_randomly_zoomed_crop.py +++ b/benchmarks/vectorized_randomly_zoomed_crop.py @@ -249,10 +249,10 @@ def from_config(cls, config): config["zoom_factor"] ) if isinstance(config["aspect_ratio_factor"], dict): - config[ - "aspect_ratio_factor" - ] = keras.utils.deserialize_keras_object( - config["aspect_ratio_factor"] + config["aspect_ratio_factor"] = ( + keras.utils.deserialize_keras_object( + config["aspect_ratio_factor"] + ) ) return cls(**config) diff --git a/keras_cv/layers/preprocessing/base_image_augmentation_layer.py b/keras_cv/layers/preprocessing/base_image_augmentation_layer.py index ef2e9cefe7..167da7ad0b 100644 --- a/keras_cv/layers/preprocessing/base_image_augmentation_layer.py +++ b/keras_cv/layers/preprocessing/base_image_augmentation_layer.py @@ -236,15 +236,15 @@ def _compute_output_signature(self, inputs): bounding_boxes = inputs.get(BOUNDING_BOXES, None) if bounding_boxes is not None: - fn_output_signature[ - BOUNDING_BOXES - ] = self._compute_bounding_box_signature(bounding_boxes) + fn_output_signature[BOUNDING_BOXES] = ( + self._compute_bounding_box_signature(bounding_boxes) + ) segmentation_masks = inputs.get(SEGMENTATION_MASKS, None) if segmentation_masks is not None: - fn_output_signature[ - SEGMENTATION_MASKS - ] = self.compute_image_signature(segmentation_masks) + fn_output_signature[SEGMENTATION_MASKS] = ( + self.compute_image_signature(segmentation_masks) + ) keypoints = inputs.get(KEYPOINTS, None) if keypoints is not None: diff --git a/keras_cv/layers/preprocessing/random_crop_and_resize.py b/keras_cv/layers/preprocessing/random_crop_and_resize.py index 593515ad09..cd947d5835 100644 --- a/keras_cv/layers/preprocessing/random_crop_and_resize.py +++ b/keras_cv/layers/preprocessing/random_crop_and_resize.py @@ -272,10 +272,10 @@ def from_config(cls, config): config["crop_area_factor"] ) if isinstance(config["aspect_ratio_factor"], dict): - config[ - "aspect_ratio_factor" - ] = keras.utils.deserialize_keras_object( - config["aspect_ratio_factor"] + config["aspect_ratio_factor"] = ( + keras.utils.deserialize_keras_object( + config["aspect_ratio_factor"] + ) ) return cls(**config) diff --git a/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer.py b/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer.py index 3d9fc8e52a..fd36e22065 100644 --- a/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer.py +++ b/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer.py @@ -17,6 +17,7 @@ from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export +from keras_cv.backend import config from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.backend import scope @@ -412,6 +413,8 @@ def _batch_augment(self, inputs): def call(self, inputs): # try to convert a given backend native tensor to TensorFlow tensor # before passing it over to TFDataScope + is_tf_backend = config.backend() == "tensorflow" + is_in_tf_graph = not tf.executing_eagerly() contains_ragged = lambda y: any( tree.map_structure( lambda x: isinstance(x, (tf.RaggedTensor, tf.SparseTensor)), @@ -419,7 +422,7 @@ def call(self, inputs): ) ) inputs_contain_ragged = contains_ragged(inputs) - if not inputs_contain_ragged: + if not is_tf_backend and not inputs_contain_ragged: inputs = tree.map_structure( lambda x: tf.convert_to_tensor(x), inputs ) @@ -443,13 +446,14 @@ def call(self, inputs): # backend native tensors. This is to avoid breaking TF data # pipelines that can't easily be ported to become backend # agnostic. - if not inputs_contain_ragged and not contains_ragged(outputs): - outputs = tree.map_structure( - # some layers return None, handle that case when - # converting to tensors - lambda x: ops.convert_to_tensor(x) if x is not None else x, - outputs, - ) + if not is_tf_backend and not is_in_tf_graph: + if not inputs_contain_ragged and not contains_ragged(outputs): + outputs = tree.map_structure( + # some layers return None, handle that case when + # converting to tensors + lambda x: ops.convert_to_tensor(x) if x is not None else x, + outputs, + ) return outputs def _format_inputs(self, inputs): diff --git a/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer_test.py b/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer_test.py index 3ebdfdb820..c2d0daa840 100644 --- a/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer_test.py +++ b/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer_test.py @@ -549,3 +549,15 @@ def test_converts_ragged_to_dense_segmentation_masks(self): {"images": images, "segmentation_masks": segmentation_masks} ) self.assertTrue(isinstance(result["segmentation_masks"], tf.Tensor)) + + def test_in_tf_data_pipeline(self): + images = np.random.randn(4, 100, 100, 3).astype("float32") + train_ds = tf.data.Dataset.from_tensor_slices(images) + train_ds = train_ds.map(lambda x: {"images": x}) + train_ds = train_ds.map( + VectorizedRandomAddLayer(fixed_value=2.0) + ).batch(4) + for output in train_ds.take(1): + pass + self.assertTrue(isinstance(output["images"], tf.Tensor)) + self.assertAllClose(output["images"], images + 2.0) diff --git a/keras_cv/layers/regularization/squeeze_excite.py b/keras_cv/layers/regularization/squeeze_excite.py index cb03cc6942..8cbcc5bd94 100644 --- a/keras_cv/layers/regularization/squeeze_excite.py +++ b/keras_cv/layers/regularization/squeeze_excite.py @@ -118,10 +118,10 @@ def get_config(self): @classmethod def from_config(cls, config): if isinstance(config["squeeze_activation"], dict): - config[ - "squeeze_activation" - ] = keras.saving.deserialize_keras_object( - config["squeeze_activation"] + config["squeeze_activation"] = ( + keras.saving.deserialize_keras_object( + config["squeeze_activation"] + ) ) if isinstance(config["excite_activation"], dict): config["excite_activation"] = keras.saving.deserialize_keras_object( diff --git a/keras_cv/layers/vit_det_layers.py b/keras_cv/layers/vit_det_layers.py index 9311a957f5..2e053db4cb 100644 --- a/keras_cv/layers/vit_det_layers.py +++ b/keras_cv/layers/vit_det_layers.py @@ -430,9 +430,9 @@ def __init__( key_dim=self.project_dim // self.num_heads, use_bias=use_bias, use_rel_pos=use_rel_pos, - input_size=input_size - if window_size == 0 - else (window_size, window_size), + input_size=( + input_size if window_size == 0 else (window_size, window_size) + ), ) self.mlp_block = MLP( mlp_dim, diff --git a/keras_cv/metrics/object_detection/box_coco_metrics.py b/keras_cv/metrics/object_detection/box_coco_metrics.py index a59af8c767..47d86ba1c2 100644 --- a/keras_cv/metrics/object_detection/box_coco_metrics.py +++ b/keras_cv/metrics/object_detection/box_coco_metrics.py @@ -212,9 +212,9 @@ def result_fn(self, force=False): ) result = {} for i, key in enumerate(METRIC_NAMES): - result[ - self.name_prefix() + METRIC_MAPPING[key] - ] = py_func_result[i] + result[self.name_prefix() + METRIC_MAPPING[key]] = ( + py_func_result[i] + ) return result obj.result = types.MethodType(result_fn, obj) diff --git a/keras_cv/models/backbones/densenet/densenet_backbone.py b/keras_cv/models/backbones/densenet/densenet_backbone.py index 28109b64fa..251f3601ec 100644 --- a/keras_cv/models/backbones/densenet/densenet_backbone.py +++ b/keras_cv/models/backbones/densenet/densenet_backbone.py @@ -119,9 +119,9 @@ def __init__( name=f"conv{len(stackwise_num_repeats) + 1}", ) - pyramid_level_inputs[ - f"P{len(stackwise_num_repeats) + 1}" - ] = utils.get_tensor_input_name(x) + pyramid_level_inputs[f"P{len(stackwise_num_repeats) + 1}"] = ( + utils.get_tensor_input_name(x) + ) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name="bn" )(x) diff --git a/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone.py b/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone.py index 61046234d3..07c896613c 100644 --- a/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone.py +++ b/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone.py @@ -130,9 +130,9 @@ def __init__( first_shortcut=(block_type == "block" or stack_index > 0), name=f"v2_stack_{stack_index}", ) - pyramid_level_inputs[ - f"P{stack_index + 2}" - ] = utils.get_tensor_input_name(x) + pyramid_level_inputs[f"P{stack_index + 2}"] = ( + utils.get_tensor_input_name(x) + ) # Create model. super().__init__(inputs=inputs, outputs=x, **kwargs) diff --git a/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone.py b/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone.py index a31841f7fc..6a0cc74740 100644 --- a/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone.py +++ b/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone.py @@ -136,9 +136,9 @@ def __init__( first_shortcut=(block_type == "block" or stack_index > 0), name=f"v2_stack_{stack_index}", ) - pyramid_level_inputs[ - f"P{stack_index + 2}" - ] = utils.get_tensor_input_name(x) + pyramid_level_inputs[f"P{stack_index + 2}"] = ( + utils.get_tensor_input_name(x) + ) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name="post_bn" diff --git a/keras_cv/models/backbones/vit_det/vit_det_backbone.py b/keras_cv/models/backbones/vit_det/vit_det_backbone.py index c2c21ab98e..beb730f4df 100644 --- a/keras_cv/models/backbones/vit_det/vit_det_backbone.py +++ b/keras_cv/models/backbones/vit_det/vit_det_backbone.py @@ -144,9 +144,9 @@ def __init__( num_heads=num_heads, use_bias=use_bias, use_rel_pos=use_rel_pos, - window_size=window_size - if i not in global_attention_indices - else 0, + window_size=( + window_size if i not in global_attention_indices else 0 + ), input_size=(img_size // patch_size, img_size // patch_size), )(x) x = keras.models.Sequential( diff --git a/keras_cv/models/legacy/darknet.py b/keras_cv/models/legacy/darknet.py index ea7fd429f2..2dc14d499d 100644 --- a/keras_cv/models/legacy/darknet.py +++ b/keras_cv/models/legacy/darknet.py @@ -76,7 +76,6 @@ @keras.utils.register_keras_serializable(package="keras_cv.models") class DarkNet(keras.Model): - """Represents the DarkNet architecture. The DarkNet architecture is commonly used for detection tasks. It is diff --git a/keras_cv/models/legacy/mlp_mixer.py b/keras_cv/models/legacy/mlp_mixer.py index a48544f905..170d0a4c6f 100644 --- a/keras_cv/models/legacy/mlp_mixer.py +++ b/keras_cv/models/legacy/mlp_mixer.py @@ -143,7 +143,6 @@ def apply_mixer_block(x, tokens_mlp_dim, channels_mlp_dim, name=None): @keras.utils.register_keras_serializable(package="keras_cv.models") class MLPMixer(keras.Model): - """Instantiates the MLP Mixer architecture. Args: diff --git a/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone.py b/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone.py index f4bd99fafa..a2bf4bdd3b 100644 --- a/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone.py +++ b/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone.py @@ -178,9 +178,9 @@ def __init__( activation=activation, name=f"{stack_name}_spp_fast", ) - pyramid_level_inputs[ - f"P{stack_id + 2}" - ] = utils.get_tensor_input_name(x) + pyramid_level_inputs[f"P{stack_id + 2}"] = ( + utils.get_tensor_input_name(x) + ) super().__init__(inputs=inputs, outputs=x, **kwargs) self.pyramid_level_inputs = pyramid_level_inputs diff --git a/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector.py b/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector.py index bfba44945c..6c17c71a72 100644 --- a/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector.py +++ b/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector.py @@ -663,9 +663,9 @@ def from_config(cls, config): if prediction_decoder is not None and isinstance( prediction_decoder, dict ): - config[ - "prediction_decoder" - ] = keras.saving.deserialize_keras_object(prediction_decoder) + config["prediction_decoder"] = ( + keras.saving.deserialize_keras_object(prediction_decoder) + ) return cls(**config) @classproperty diff --git a/keras_cv/models/stable_diffusion/noise_scheduler.py b/keras_cv/models/stable_diffusion/noise_scheduler.py index a69bdf5b51..17bad6468f 100644 --- a/keras_cv/models/stable_diffusion/noise_scheduler.py +++ b/keras_cv/models/stable_diffusion/noise_scheduler.py @@ -54,9 +54,7 @@ def __init__( elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = ( - ops.linspace( - beta_start**0.5, beta_end**0.5, train_timesteps - ) + ops.linspace(beta_start**0.5, beta_end**0.5, train_timesteps) ** 2 ) else: