From 4c67dcf3755ebd33c472199089255701b6f699e1 Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Tue, 16 Apr 2024 01:06:21 +0800 Subject: [PATCH 001/101] Introduce float8 training (#19488) * Add float8 training support * Add tests for fp8 training * Add `quantize_and_dequantize` test * Fix bugs and add float8 correctness tests * Cleanup * Address comments and cleanup * Add docstrings and some minor refactoring * Add `QuantizedFloat8DTypePolicy` * Add dtype policy setter * Fix torch dynamo issue by using `self._dtype_policy` * Improve test coverage --- keras/backend/common/variables.py | 26 ++ keras/backend/common/variables_test.py | 12 + keras/dtype_policies/__init__.py | 9 +- keras/dtype_policies/dtype_policy.py | 100 ++++++- keras/dtype_policies/dtype_policy_test.py | 205 +++++++++---- keras/layers/core/dense.py | 288 +++++++++++++++--- keras/layers/core/dense_test.py | 267 +++++++++++++++-- keras/layers/core/einsum_dense.py | 340 +++++++++++++++++----- keras/layers/core/einsum_dense_test.py | 280 ++++++++++++++++-- keras/layers/core/embedding.py | 122 +++++--- keras/layers/core/embedding_test.py | 14 +- keras/layers/layer.py | 17 +- keras/layers/layer_test.py | 14 + keras/models/model.py | 8 +- keras/models/model_test.py | 56 ++-- keras/ops/operation.py | 6 +- keras/optimizers/base_optimizer.py | 38 +++ keras/optimizers/optimizer_test.py | 18 +- keras/quantizers/__init__.py | 3 + keras/quantizers/quantizers.py | 46 +++ keras/quantizers/quantizers_test.py | 40 +++ 21 files changed, 1605 insertions(+), 304 deletions(-) diff --git a/keras/backend/common/variables.py b/keras/backend/common/variables.py index 5321fa8a75d7..3d9c80989687 100644 --- a/keras/backend/common/variables.py +++ b/keras/backend/common/variables.py @@ -115,6 +115,10 @@ def __init__( self._trainable = trainable self._autocast = autocast self._aggregation = aggregation + # `self._overwrite_with_gradient` is an internal property to determine + # whether this variable should be overwritten by the computed gradient. + # Ref: https://github.com/google/flax/blob/main/flax/linen/fp8_ops.py + self._overwrite_with_gradient = False if isinstance(initializer, str): from keras import initializers @@ -266,6 +270,28 @@ def trainable(self): def trainable(self, value): self._trainable = value + @property + def overwrite_with_gradient(self): + """Whether this variable should be overwritten by the gradient. + + This property is designed for a special case where we want to overwrite + the variable directly with its computed gradient. For example, in float8 + training, new `scale` and `amax_history` are computed as gradients, and + we want to overwrite them directly instead of following the typical + procedure such as gradient descent with a learning rate, gradient + clipping and weight decaying. + """ + return self._overwrite_with_gradient + + @overwrite_with_gradient.setter + def overwrite_with_gradient(self, value): + if not isinstance(value, bool): + raise TypeError( + "`overwrite_with_gradient` must be a boolean. " + f"Received: {value}" + ) + self._overwrite_with_gradient = value + @property def regularizer(self): return self._regularizer diff --git a/keras/backend/common/variables_test.py b/keras/backend/common/variables_test.py index 6bd21d37fbc2..1062aa4ac76c 100644 --- a/keras/backend/common/variables_test.py +++ b/keras/backend/common/variables_test.py @@ -239,6 +239,18 @@ def test_variable_path_creation(self): v = backend.Variable(initializer=np.ones((2, 2)), name="test_var") self.assertEqual(v.path, "test_var") + def test_overwrite_with_gradient_setter(self): + v = backend.Variable( + initializer=initializers.RandomNormal(), + shape=(2, 2), + ) + self.assertFalse(v.overwrite_with_gradient) + v.overwrite_with_gradient = True + self.assertTrue(v.overwrite_with_gradient) + + with self.assertRaisesRegex(TypeError, "must be a boolean."): + v.overwrite_with_gradient = "true" + class VariableNumpyValueAndAssignmentTest(test_case.TestCase): """tests for KerasVariable.numpy(), KerasVariable.value() diff --git a/keras/dtype_policies/__init__.py b/keras/dtype_policies/__init__.py index f6b041b43455..39ccb8497ed5 100644 --- a/keras/dtype_policies/__init__.py +++ b/keras/dtype_policies/__init__.py @@ -1,10 +1,15 @@ from keras import backend from keras.dtype_policies import dtype_policy +from keras.dtype_policies.dtype_policy import QUANTIZATION_MODES from keras.dtype_policies.dtype_policy import FloatDTypePolicy from keras.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy def get(identifier): + from keras.dtype_policies.dtype_policy import ( + _get_quantized_dtype_policy_by_str, + ) from keras.saving import serialization_lib if identifier is None: @@ -14,8 +19,8 @@ def get(identifier): if isinstance(identifier, dict): return serialization_lib.deserialize_keras_object(identifier) if isinstance(identifier, str): - if identifier.startswith("int8"): - return QuantizedDTypePolicy(identifier) + if identifier.startswith(QUANTIZATION_MODES): + return _get_quantized_dtype_policy_by_str(identifier) else: return FloatDTypePolicy(identifier) try: diff --git a/keras/dtype_policies/dtype_policy.py b/keras/dtype_policies/dtype_policy.py index 8734037f7d45..0499a17bd3e7 100644 --- a/keras/dtype_policies/dtype_policy.py +++ b/keras/dtype_policies/dtype_policy.py @@ -3,6 +3,8 @@ from keras.api_export import keras_export from keras.backend.common import global_state +QUANTIZATION_MODES = ("int8", "float8") + @keras_export( [ @@ -55,7 +57,7 @@ class DTypePolicy: to explicitly construct a `DTypePolicy` object. """ - def __new__(cls, name): + def __new__(cls, name, *args, **kwargs): if not isinstance(name, str): raise TypeError( "'name' must be a string, such as 'mixed_float16'. " @@ -64,8 +66,8 @@ def __new__(cls, name): # For backwards compatibility # TODO: We should consider deprecating this behavior if cls is __class__: - if name.startswith("int8"): - return QuantizedDTypePolicy(name) + if name.startswith(QUANTIZATION_MODES): + return _get_quantized_dtype_policy_by_str(name) return FloatDTypePolicy(name) return super().__new__(cls) @@ -204,23 +206,23 @@ def __init__(self, name): def _parse_name(self, name): error_msg = ( - f"Cannot convert '{name}' to a QuantizedDTypePolicy. " - "Valid policies include " - "'int8_from_float32', 'int8_from_float16', 'int8_from_bfloat16', " - "'int8_from_mixed_float16', 'int8_from_mixed_bfloat16'." + f"Cannot convert '{name}' to a {self.__class__.__name__}. " + f"Valid policies are: {self._get_all_valid_policies()}." ) split_name = name.split("_from_") if len(split_name) != 2: raise ValueError(error_msg) mode, from_name = split_name - if mode not in ("int8",): + if mode not in QUANTIZATION_MODES: raise ValueError(error_msg) - if from_name == "mixed_float16": + if from_name == "mixed_float16" and mode != "int8": return mode, "float16", "float32" elif from_name == "mixed_bfloat16": return mode, "bfloat16", "float32" try: dtype = backend.standardize_dtype(from_name) + if dtype == "float16" and mode == "int8": + raise ValueError return mode, dtype, dtype except ValueError: raise ValueError(error_msg) @@ -237,6 +239,67 @@ def quantization_mode(self): def __repr__(self): return f'' + def _get_all_valid_policies(self): + valid_float_policies = [ + "float32", + "float16", + "bfloat16", + "mixed_float16", + "mixed_bfloat16", + ] + valid_policies = [ + f"{mode}_from_{policy}" + for mode in ("int8",) + for policy in valid_float_policies + ] + # Remove invalid policies + valid_policies.remove("int8_from_float16") + valid_policies.remove("int8_from_mixed_float16") + return valid_policies + + +@keras_export( + [ + "keras.QuantizedFloat8DTypePolicy", + "keras.dtype_policies.QuantizedFloat8DTypePolicy", + ] +) +class QuantizedFloat8DTypePolicy(QuantizedDTypePolicy): + def __init__(self, name, amax_history_length=1024): + super().__init__(name) + if not isinstance(amax_history_length, int): + raise TypeError( + "`amax_history_length` must be an integer. " + f"Received: amax_history_length={amax_history_length}" + ) + self._amax_history_length = amax_history_length + + @property + def amax_history_length(self): + """The length of the amax history window. + + This property is used for scaling factor computation in float8 training. + """ + return self._amax_history_length + + def __repr__(self): + return f'' + + def _get_all_valid_policies(self): + valid_float_policies = [ + "float32", + "float16", + "bfloat16", + "mixed_float16", + "mixed_bfloat16", + ] + valid_policies = [ + f"{mode}_from_{policy}" + for mode in ("float8") + for policy in valid_float_policies + ] + return valid_policies + @keras_export( [ @@ -254,8 +317,8 @@ def set_dtype_policy(policy): """ if not isinstance(policy, DTypePolicy): if isinstance(policy, str): - if policy.startswith("int8"): - policy = QuantizedDTypePolicy(policy) + if policy.startswith(QUANTIZATION_MODES): + policy = _get_quantized_dtype_policy_by_str(policy) else: policy = FloatDTypePolicy(policy) else: @@ -283,3 +346,18 @@ def dtype_policy(): policy = FloatDTypePolicy(backend.floatx()) set_dtype_policy(policy) return policy + + +def _get_quantized_dtype_policy_by_str(policy): + if not isinstance(policy, str): + raise TypeError(f"`policy` must be a string. Received: policy={policy}") + if not policy.startswith(QUANTIZATION_MODES): + raise ValueError( + "`policy` is incompatible with the current supported quantization." + ) + if policy.startswith("int8"): + return QuantizedDTypePolicy(policy) + elif policy.startswith("float8"): + return QuantizedFloat8DTypePolicy(policy) + else: + raise NotImplementedError diff --git a/keras/dtype_policies/dtype_policy_test.py b/keras/dtype_policies/dtype_policy_test.py index 7b3535e66a7a..3e4fb8f75f5b 100644 --- a/keras/dtype_policies/dtype_policy_test.py +++ b/keras/dtype_policies/dtype_policy_test.py @@ -1,6 +1,9 @@ +from absl.testing import parameterized + from keras.dtype_policies.dtype_policy import DTypePolicy from keras.dtype_policies.dtype_policy import FloatDTypePolicy from keras.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy from keras.dtype_policies.dtype_policy import dtype_policy from keras.dtype_policies.dtype_policy import set_dtype_policy from keras.testing import test_case @@ -61,13 +64,14 @@ def test_get_config_from_config(self): new_policy = DTypePolicy.from_config(config) self.assertEqual(new_policy.name, "mixed_float16") - def test_deepcopy(self): + def test_serialization(self): """Test builtin serialization methods.""" import copy import pickle - # copy.deepcopy policy = DTypePolicy("mixed_float16") + + # copy.deepcopy copied_policy = copy.deepcopy(policy) self.assertEqual( repr(copied_policy), '' @@ -151,62 +155,163 @@ def test_get_config_from_config(self): self.assertEqual(new_policy.name, "mixed_float16") -class QuantizedDTypePolicyTest(test_case.TestCase): - def test_initialization_valid_name(self): - """Test initialization with a valid name.""" - policy = QuantizedDTypePolicy("int8_from_mixed_float16") - self.assertEqual(policy.compute_dtype, "float16") - self.assertEqual(policy.variable_dtype, "float32") - - def test_initialization_invalid_name(self): - """Test initialization with an invalid name.""" +class QuantizedDTypePolicyTest(test_case.TestCase, parameterized.TestCase): + @parameterized.named_parameters( + ("float32", "float32", "float32", "float32"), + ("bfloat16", "bfloat16", "bfloat16", "bfloat16"), + ("mixed_bfloat16", "mixed_bfloat16", "bfloat16", "float32"), + ) + def test_initialization_for_int8( + self, from_name, expected_compute_dtype, expected_variable_dtype + ): + name = f"int8_from_{from_name}" + policy = QuantizedDTypePolicy(name) + self.assertEqual(policy.name, name) + self.assertEqual(policy.compute_dtype, expected_compute_dtype) + self.assertEqual(policy.variable_dtype, expected_variable_dtype) + self.assertEqual(repr(policy), f'') + + @parameterized.named_parameters( + ("float32", "float32", "float32", "float32"), + ("float16", "float16", "float16", "float16"), + ("bfloat16", "bfloat16", "bfloat16", "bfloat16"), + ("mixed_float16", "mixed_float16", "float16", "float32"), + ("mixed_bfloat16", "mixed_bfloat16", "bfloat16", "float32"), + ) + def test_initialization_for_float8( + self, from_name, expected_compute_dtype, expected_variable_dtype + ): + name = f"float8_from_{from_name}" + policy = QuantizedFloat8DTypePolicy(name) + self.assertEqual(policy.name, name) + self.assertEqual(policy.compute_dtype, expected_compute_dtype) + self.assertEqual(policy.variable_dtype, expected_variable_dtype) + self.assertEqual(repr(policy), f'') + + @parameterized.named_parameters( + ("abc", "abc"), + ("abc_from_def", "abc_from_def"), + ("int8_from_float16", "int8_from_float16"), + ("int8_from_mixed_float16", "int8_from_mixed_float16"), + ) + def test_initialization_with_invalid_name(self, invalid_name): with self.assertRaisesRegex(ValueError, "Cannot convert"): - QuantizedDTypePolicy("invalid_name") + QuantizedDTypePolicy(invalid_name) def test_initialization_non_string_name(self): """Test initialization with a non-string name.""" with self.assertRaisesRegex(TypeError, "'name' must be a string"): QuantizedDTypePolicy(123) - def test_properties_mixed_float16(self): - """Test properties for 'mixed_float16'.""" - policy = QuantizedDTypePolicy("int8_from_mixed_float16") - self.assertEqual(policy.compute_dtype, "float16") - self.assertEqual(policy.variable_dtype, "float32") - - def test_properties_mixed_bfloat16(self): - """Test properties for 'mixed_bfloat16'.""" + def test_get_config_from_config(self): + """Test get_config and from_config methods.""" policy = QuantizedDTypePolicy("int8_from_mixed_bfloat16") - self.assertEqual(policy.compute_dtype, "bfloat16") - self.assertEqual(policy.variable_dtype, "float32") + config = policy.get_config() + self.assertEqual(config, {"name": "int8_from_mixed_bfloat16"}) - def test_initialization_with_invalid_name_behaviour(self): - """Test initialization behavior with an invalid name.""" - with self.assertRaisesRegex(ValueError, "Cannot convert"): - QuantizedDTypePolicy("invalid_name") + new_policy = QuantizedDTypePolicy.from_config(config) + self.assertEqual(new_policy.name, "int8_from_mixed_bfloat16") + + @parameterized.named_parameters( + ( + "int8_from_mixed_bfloat16", + "int8_from_mixed_bfloat16", + '', + ), + ( + "float8_from_mixed_bfloat16", + "float8_from_mixed_bfloat16", + '', + ), + ) + def test_serialization(self, name, repr_str): + import copy + import pickle - def test_properties(self): - """Test variable_dtype, compute_dtype, and name properties.""" - policy = QuantizedDTypePolicy("int8_from_mixed_float16") - self.assertEqual(policy.variable_dtype, "float32") - self.assertEqual(policy.compute_dtype, "float16") - self.assertEqual(policy.name, "int8_from_mixed_float16") + policy = DTypePolicy(name) - def test_repr(self): - """Test __repr__ method.""" - policy = QuantizedDTypePolicy("int8_from_mixed_float16") + # copy.deepcopy + copied_policy = copy.deepcopy(policy) + self.assertEqual(repr(copied_policy), repr_str) + # copy.copy + copied_policy = copy.copy(policy) + self.assertEqual(repr(copied_policy), repr_str) + # pickle + temp_dir = self.get_temp_dir() + with open(f"{temp_dir}/policy.pickle", "wb") as f: + pickle.dump(policy, f) + with open(f"{temp_dir}/policy.pickle", "rb") as f: + copied_policy = pickle.load(f) + self.assertEqual(repr(copied_policy), repr_str) + + def test_properties_for_float8(self): + policy = QuantizedFloat8DTypePolicy("float8_from_mixed_bfloat16") + self.assertEqual(policy.amax_history_length, 1024) + policy = QuantizedFloat8DTypePolicy("float8_from_mixed_bfloat16", 512) + self.assertEqual(policy.amax_history_length, 512) + + def test_invalid_properties_for_float8(self): + with self.assertRaisesRegex(TypeError, "must be an integer."): + QuantizedFloat8DTypePolicy("float8_from_float32", "512") + with self.assertRaisesRegex(TypeError, "must be an integer."): + QuantizedFloat8DTypePolicy("float8_from_float32", 512.0) + + def test_serialization_for_float8(self): + import copy + import pickle + + policy = QuantizedFloat8DTypePolicy("float8_from_mixed_bfloat16", 123) + + # copy.deepcopy + copied_policy = copy.deepcopy(policy) + self.assertEqual( + repr(copied_policy), + '', + ) + self.assertEqual(copied_policy.amax_history_length, 123) + # copy.copy + copied_policy = copy.copy(policy) + self.assertEqual( + repr(copied_policy), + '', + ) + self.assertEqual(copied_policy.amax_history_length, 123) + # pickle + temp_dir = self.get_temp_dir() + with open(f"{temp_dir}/policy.pickle", "wb") as f: + pickle.dump(policy, f) + with open(f"{temp_dir}/policy.pickle", "rb") as f: + copied_policy = pickle.load(f) self.assertEqual( - repr(policy), '' + repr(copied_policy), + '', + ) + self.assertEqual(copied_policy.amax_history_length, 123) + + @parameterized.named_parameters( + ("int8_from_mixed_bfloat16", "int8_from_mixed_bfloat16"), + ("float8_from_mixed_bfloat16", "float8_from_mixed_bfloat16"), + ) + def test_get_quantized_dtype_policy_by_str(self, name): + from keras.dtype_policies.dtype_policy import ( + _get_quantized_dtype_policy_by_str, ) - def test_get_config_from_config(self): - """Test get_config and from_config methods.""" - policy = QuantizedDTypePolicy("int8_from_mixed_float16") - config = policy.get_config() - self.assertEqual(config, {"name": "int8_from_mixed_float16"}) + policy = _get_quantized_dtype_policy_by_str(name) + self.assertEqual(policy.name, name) - new_policy = QuantizedDTypePolicy.from_config(config) - self.assertEqual(new_policy.name, "int8_from_mixed_float16") + def test_invalid_get_quantized_dtype_policy_by_str(self): + from keras.dtype_policies.dtype_policy import ( + _get_quantized_dtype_policy_by_str, + ) + + with self.assertRaisesRegex(TypeError, "must be a string."): + _get_quantized_dtype_policy_by_str(123) + with self.assertRaisesRegex( + ValueError, + "is incompatible with the current supported quantization.", + ): + _get_quantized_dtype_policy_by_str("float7") class DTypePolicyGlobalFunctionsTest(test_case.TestCase): @@ -222,9 +327,9 @@ def test_set_dtype_policy_valid_string(self): def test_set_dtype_policy_valid_string_quantized(self): """Test set_dtype_policy with a valid string.""" - set_dtype_policy("int8_from_mixed_float16") + set_dtype_policy("int8_from_mixed_bfloat16") policy = dtype_policy() - self.assertEqual(policy.name, "int8_from_mixed_float16") + self.assertEqual(policy.name, "int8_from_mixed_bfloat16") def test_set_dtype_policy_valid_policy(self): """Test set_dtype_policy with a valid FloatDTypePolicy object.""" @@ -235,10 +340,10 @@ def test_set_dtype_policy_valid_policy(self): def test_set_dtype_policy_valid_policy_quantized(self): """Test set_dtype_policy with a valid FloatDTypePolicy object.""" - policy_obj = QuantizedDTypePolicy("int8_from_mixed_float16") + policy_obj = QuantizedDTypePolicy("int8_from_mixed_bfloat16") set_dtype_policy(policy_obj) policy = dtype_policy() - self.assertEqual(policy.name, "int8_from_mixed_float16") + self.assertEqual(policy.name, "int8_from_mixed_bfloat16") def test_set_dtype_policy_invalid(self): """Test set_dtype_policy with an invalid input.""" @@ -282,17 +387,17 @@ def test_empty_name(self): def test_special_character_name(self): """Test initialization with special characters in the name.""" with self.assertRaisesRegex(ValueError, "Cannot convert"): - QuantizedDTypePolicy("@int8_from_mixed_float16!") + QuantizedDTypePolicy("@int8_from_mixed_bfloat16!") def test_very_long_name(self): """Test initialization with a very long name.""" with self.assertRaisesRegex(ValueError, "Cannot convert"): - QuantizedDTypePolicy("int8_from_mixed_float16" * 100) + QuantizedDTypePolicy("int8_from_mixed_bfloat16" * 100) def test_almost_valid_name(self): """Test initialization with a name close to a valid one.""" with self.assertRaisesRegex(ValueError, "Cannot convert"): - QuantizedDTypePolicy("int7_from_mixed_float16") + QuantizedDTypePolicy("int7_from_mixed_bfloat16") class DTypePolicyGlobalFunctionsEdgeCasesTest(test_case.TestCase): diff --git a/keras/layers/core/dense.py b/keras/layers/core/dense.py index cb0c61743c8f..d7dffadc0548 100644 --- a/keras/layers/core/dense.py +++ b/keras/layers/core/dense.py @@ -1,3 +1,5 @@ +import ml_dtypes + from keras import activations from keras import backend from keras import constraints @@ -100,11 +102,17 @@ def __init__( def build(self, input_shape): input_dim = input_shape[-1] - if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): + # We use `self._dtype_policy` to check to avoid issues in torch dynamo + is_quantized = isinstance( + self._dtype_policy, dtype_policies.QuantizedDTypePolicy + ) + if is_quantized: self.quantized_build( input_shape, mode=self.dtype_policy.quantization_mode ) - else: + if not is_quantized or self.dtype_policy.quantization_mode != "int8": + # If the layer is quantized to int8, `self._kernel` will be added + # in `self._int8_build`. Therefore, we skip it here. self._kernel = self.add_weight( name="kernel", shape=(input_dim, self.units), @@ -199,7 +207,20 @@ def save_own_variables(self, store): if self.use_bias: store["1"] = self.bias if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): - store["2"] = kernel_scale + mode = self.dtype_policy.quantization_mode + if mode == "int8": + store["2"] = kernel_scale + elif mode == "float8": + store["2"] = self.inputs_scale + store["3"] = self.inputs_amax_history + store["4"] = self.kernel_scale + store["5"] = self.kernel_amax_history + store["6"] = self.outputs_grad_scale + store["7"] = self.outputs_grad_amax_history + else: + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) + ) def load_own_variables(self, store): if not self.lora_enabled: @@ -213,7 +234,20 @@ def load_own_variables(self, store): if self.use_bias: self.bias.assign(store["1"]) if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): - self.kernel_scale.assign(store["2"]) + mode = self.dtype_policy.quantization_mode + if mode == "int8": + self.kernel_scale.assign(store["2"]) + elif mode == "float8": + self.inputs_scale.assign(store["2"]) + self.inputs_amax_history.assign(store["3"]) + self.kernel_scale.assign(store["4"]) + self.kernel_amax_history.assign(store["5"]) + self.outputs_grad_scale.assign(store["6"]) + self.outputs_grad_amax_history.assign(store["7"]) + else: + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) + ) if self.lora_enabled: self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape)) self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape)) @@ -273,27 +307,108 @@ def _check_load_own_variables(self, store): f"Expected: {[v.name for v in all_vars]}" ) - """Quantization-related methods""" + """Quantization-related (int8 and float8) methods""" + + QUANTIZATION_MODE_ERROR_TEMPLATE = ( + f"Invalid quantization mode. Expected one of " + f"{dtype_policies.QUANTIZATION_MODES}. " + "Received: quantization_mode={mode}" + ) def quantized_build(self, input_shape, mode): - input_dim = input_shape[-1] if mode == "int8": - self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) - self._kernel = self.add_weight( - name="kernel", - shape=(input_dim, self.units), - initializer="zeros", - dtype="int8", - trainable=False, + input_dim = input_shape[-1] + kernel_shape = (input_dim, self.units) + self._int8_build(kernel_shape) + elif mode == "float8": + self._float8_build() + else: + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) - self.kernel_scale = self.add_weight( - name="kernel_scale", - shape=(self.units,), - initializer="ones", - trainable=False, + + def _int8_build( + self, + kernel_shape, + kernel_initializer="zeros", + kernel_scale_initializer="ones", + ): + self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) + self._kernel = self.add_weight( + name="kernel", + shape=kernel_shape, + initializer=kernel_initializer, + dtype="int8", + trainable=False, + ) + self.kernel_scale = self.add_weight( + name="kernel_scale", + shape=(self.units,), + initializer=kernel_scale_initializer, + trainable=False, + ) + + def _float8_build(self): + if not isinstance( + self.dtype_policy, dtype_policies.QuantizedFloat8DTypePolicy + ): + raise TypeError( + "`self.dtype_policy` must be the type of " + f"QuantizedFloat8DTypePolicy. Received {self.dtype_policy}" ) + amax_history_length = self.dtype_policy.amax_history_length + # We set `trainable=True` because we will use the gradients to overwrite + # these variables + scale_kwargs = { + "shape": (), + "initializer": "ones", + "dtype": "float32", # Always be float32 + "trainable": True, + "autocast": False, + } + amax_history_kwargs = { + "shape": (amax_history_length,), + "initializer": "zeros", + "dtype": "float32", # Always be float32 + "trainable": True, + "autocast": False, + } + self.inputs_scale = self.add_weight(name="inputs_scale", **scale_kwargs) + self.inputs_amax_history = self.add_weight( + name="inputs_amax_history", **amax_history_kwargs + ) + self.kernel_scale = self.add_weight(name="kernel_scale", **scale_kwargs) + self.kernel_amax_history = self.add_weight( + name="kernel_amax_history", **amax_history_kwargs + ) + self.outputs_grad_scale = self.add_weight( + name="outputs_grad_scale", **scale_kwargs + ) + self.outputs_grad_amax_history = self.add_weight( + name="outputs_grad_amax_history", **amax_history_kwargs + ) + # We need to set `overwrite_with_gradient=True` to instruct the + # optimizer to directly overwrite these variables with their computed + # gradients during training + self.inputs_scale.overwrite_with_gradient = True + self.inputs_amax_history.overwrite_with_gradient = True + self.kernel_scale.overwrite_with_gradient = True + self.kernel_amax_history.overwrite_with_gradient = True + self.outputs_grad_scale.overwrite_with_gradient = True + self.outputs_grad_amax_history.overwrite_with_gradient = True def quantized_call(self, inputs): + if self.dtype_policy.quantization_mode == "int8": + return self._int8_call(inputs) + elif self.dtype_policy.quantization_mode == "float8": + return self._float8_call(inputs) + else: + mode = self.dtype_policy.quantization_mode + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) + ) + + def _int8_call(self, inputs): @ops.custom_gradient def matmul_with_inputs_gradient(inputs, kernel, kernel_scale): def grad_fn(*args, upstream=None): @@ -328,6 +443,93 @@ def grad_fn(*args, upstream=None): x = self.activation(x) return x + def _float8_call(self, inputs): + if self.lora_enabled: + raise NotImplementedError( + "Currently, `_float8_call` doesn't support LoRA" + ) + + @ops.custom_gradient + def quantized_dequantize_inputs(inputs, scale, amax_history): + new_scale = quantizers.compute_float8_scale( + ops.max(amax_history, axis=0), + scale, + ops.cast( + float(ml_dtypes.finfo("float8_e4m3fn").max), "float32" + ), + ) + qdq_inputs = quantizers.quantize_and_dequantize( + inputs, scale, "float8_e4m3fn", self.compute_dtype + ) + new_amax_history = quantizers.compute_float8_amax_history( + inputs, amax_history + ) + + def grad(*args, upstream=None, variables=None): + if upstream is None: + (upstream,) = args + return upstream, new_scale, new_amax_history + + return qdq_inputs, grad + + @ops.custom_gradient + def quantized_dequantize_outputs(outputs, scale, amax_history): + """Quantize-dequantize the output gradient but not the output.""" + + def grad(*args, upstream=None, variables=None): + if upstream is None: + (upstream,) = args + new_scale = quantizers.compute_float8_scale( + ops.max(amax_history, axis=0), + scale, + ops.cast( + float(ml_dtypes.finfo("float8_e5m2").max), "float32" + ), + ) + qdq_upstream = quantizers.quantize_and_dequantize( + upstream, scale, "float8_e5m2", self.compute_dtype + ) + new_amax_history = quantizers.compute_float8_amax_history( + upstream, amax_history + ) + return qdq_upstream, new_scale, new_amax_history + + return outputs, grad + + x = ops.matmul( + quantized_dequantize_inputs( + inputs, + ops.convert_to_tensor(self.inputs_scale), + ops.convert_to_tensor(self.inputs_amax_history), + ), + quantized_dequantize_inputs( + ops.convert_to_tensor(self._kernel), + ops.convert_to_tensor(self.kernel_scale), + ops.convert_to_tensor(self.kernel_amax_history), + ), + ) + # `quantized_dequantize_outputs` is placed immediately after + # `ops.matmul` for the sake of pattern matching in gemm_rewrite. That + # way, the qdq will be adjacent to the corresponding matmul_bprop in the + # bprop. + x = quantized_dequantize_outputs( + x, + ops.convert_to_tensor(self.outputs_grad_scale), + ops.convert_to_tensor(self.outputs_grad_amax_history), + ) + if self.bias is not None: + # Under non-mixed precision cases, F32 bias has to be converted to + # BF16 first to get the biasAdd fusion support. ref. PR + # https://github.com/tensorflow/tensorflow/pull/60306 + bias = self.bias + if self.dtype_policy.compute_dtype == "float32": + bias_bf16 = ops.cast(bias, "bfloat16") + bias = ops.cast(bias_bf16, bias.dtype) + x = ops.add(x, bias) + if self.activation is not None: + x = self.activation(x) + return x + def quantize(self, mode): import gc @@ -338,6 +540,17 @@ def quantize(self, mode): "method implemented." ) self._check_quantize_args(mode, self.compute_dtype) + + # Set new dtype policy + if not isinstance( + self.dtype_policy, dtype_policies.QuantizedDTypePolicy + ): + quantized_dtype = f"{mode}_from_{self.dtype_policy.name}" + # We set the internal `self._dtype_policy` instead of using the + # setter to avoid double `quantize` call + self._dtype_policy = dtype_policies.get(quantized_dtype) + + self._tracker.unlock() if mode == "int8": if backend.standardize_dtype(self._kernel.dtype) == "int8": raise ValueError("`quantize` can only be done once per layer.") @@ -348,38 +561,25 @@ def quantize(self, mode): self._kernel, axis=0 ) kernel_scale = ops.squeeze(kernel_scale, axis=0) - self._tracker.unlock() self._untrack_variable(self._kernel) kernel_shape = self._kernel.shape del self._kernel - self._kernel = self.add_weight( - name="kernel", - shape=kernel_shape, - # Prevent adding a large constant to the computation graph - initializer=lambda shape, dtype: kernel_value, - dtype="int8", - trainable=False, + # Utilize a lambda expression as an initializer to prevent adding a + # large constant to the computation graph. + self._int8_build( + kernel_shape, + lambda shape, dtype: kernel_value, + lambda shape, dtype: kernel_scale, ) - self.kernel_scale = self.add_weight( - name="kernel_scale", - shape=(self.units,), - # Prevent adding a large constant to the computation graph - initializer=lambda shape, dtype: kernel_scale, - trainable=False, - ) - self._tracker.lock() + elif mode == "float8": + if hasattr(self, "inputs_amax_history"): + raise ValueError("`quantize` can only be done once per layer.") + self._float8_build() else: - NotImplementedError( - "Invalid quantization mode. Expected 'int8'. " - f"Received: mode={mode}" + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) - - # Set new dtype policy - if not isinstance( - self.dtype_policy, dtype_policies.QuantizedDTypePolicy - ): - quantized_dtype = f"{mode}_from_{self.dtype_policy.name}" - self.dtype_policy = dtype_policies.get(quantized_dtype) + self._tracker.lock() # Release memory manually because sometimes the backend doesn't gc.collect() diff --git a/keras/layers/core/dense_test.py b/keras/layers/core/dense_test.py index 02db7cc7feba..e7161f7a3921 100644 --- a/keras/layers/core/dense_test.py +++ b/keras/layers/core/dense_test.py @@ -2,19 +2,22 @@ import numpy as np import pytest +from absl.testing import parameterized from keras import backend from keras import constraints from keras import layers from keras import models from keras import ops +from keras import optimizers +from keras import random from keras import saving from keras import testing from keras.backend.common import keras_tensor from keras.export import export_lib -class DenseTest(testing.TestCase): +class DenseTest(testing.TestCase, parameterized.TestCase): @pytest.mark.requires_trainable_backend def test_dense_basics(self): # 2D case, no bias. @@ -329,6 +332,8 @@ def test_enable_lora_when_already_enabled(self): with self.assertRaisesRegex(ValueError, "lora is already enabled"): layer.enable_lora(rank=2) + """Test quantization-related (int8 and float8) methods""" + @pytest.mark.skipif( backend.backend() == "numpy", reason=f"{backend.backend()} does not support ops.custom_gradient.", @@ -383,50 +388,74 @@ def test_quantize_int8(self): backend.standardize_dtype(layer.kernel_scale.dtype), "float32" ) - @pytest.mark.requires_trainable_backend - def test_quantize_dtype_argument(self): - self.run_layer_test( - layers.Dense, - init_kwargs={ - "units": 5, - "dtype": "int8_from_mixed_bfloat16", - }, - input_shape=(2, 3, 4), - expected_output_shape=(2, 3, 5), - expected_num_trainable_weights=1, - expected_num_non_trainable_weights=2, - expected_num_seed_generators=0, - expected_num_losses=0, - supports_masking=True, - ) - - def test_quantize_on_unbuilt_layer(self): + @parameterized.named_parameters( + ("int8", "int8"), + ("float8", "float8"), + ) + def test_quantize_on_unbuilt_layer(self, mode): layer = layers.Dense(units=2) with self.assertRaisesRegex( ValueError, "Cannot quantize a layer that isn't yet built." ): - layer.quantize("int8") + layer.quantize(mode) - def test_quantize_on_subclass(self): + @parameterized.named_parameters( + ("int8", "int8"), + ("float8", "float8"), + ) + def test_quantize_on_subclass(self, mode): class MyDense(layers.Dense): pass layer = MyDense(units=16) layer.build((None, 8)) with self.assertRaises(NotImplementedError): - layer.quantize("int8") + layer.quantize(mode) - def test_quantize_when_already_quantized(self): + @parameterized.named_parameters( + ("int8", "int8"), + ("float8", "float8"), + ) + def test_quantize_when_already_quantized(self, mode): layer = layers.Dense(units=2) layer.build((None, 2)) - layer.quantize("int8") + layer.quantize(mode) with self.assertRaisesRegex( ValueError, "`quantize` can only be done once per layer." ): - layer.quantize("int8") + layer.quantize(mode) + + @parameterized.named_parameters( + ("int8", "int8_from_float32", 3), + ("float8", "float8_from_float32", 8), + ) + def test_quantize_by_setting_dtype_policy( + self, policy, expected_num_variables + ): + layer = layers.Dense(units=2) + layer.build((None, 2)) + layer.dtype_policy = policy + self.assertLen(layer.variables, expected_num_variables) @pytest.mark.requires_trainable_backend - def test_quantize_when_lora_enabled(self): + def test_quantize_int8_dtype_argument(self): + self.run_layer_test( + layers.Dense, + init_kwargs={ + "units": 5, + "dtype": "int8_from_mixed_bfloat16", + }, + input_shape=(2, 3, 4), + expected_output_shape=(2, 3, 5), + expected_num_trainable_weights=1, + expected_num_non_trainable_weights=2, + expected_num_seed_generators=0, + expected_num_losses=0, + supports_masking=True, + ) + + @pytest.mark.requires_trainable_backend + def test_quantize_int8_when_lora_enabled(self): # Note that saving and loading with lora_enabled and quantized are # lossy, so we use a weak correctness test for model outputs (atol=0.5). config = dict(units=16) @@ -490,7 +519,7 @@ def test_quantize_when_lora_enabled(self): import tensorflow as tf temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") - ref_input = tf.random.normal((32, 8)) + ref_input = tf.random.normal((2, 8)) ref_output = model(ref_input) export_lib.export_model(model, temp_filepath) reloaded_layer = export_lib.TFSMLayer(temp_filepath) @@ -505,3 +534,187 @@ def test_quantize_when_lora_enabled(self): reloaded_layer.non_trainable_weights, len(model.non_trainable_weights), ) + + @pytest.mark.requires_trainable_backend + def test_quantize_float8_dtype_argument(self): + self.run_layer_test( + layers.Dense, + init_kwargs={ + "units": 5, + "dtype": "float8_from_mixed_bfloat16", + }, + input_shape=(2, 3, 4), + expected_output_shape=(2, 3, 5), + expected_num_trainable_weights=8, + expected_num_non_trainable_weights=0, + expected_num_seed_generators=0, + expected_num_losses=0, + supports_masking=True, + ) + + @pytest.mark.requires_trainable_backend + def test_quantize_float8(self): + import ml_dtypes + + from keras import quantizers + + layer = layers.Dense(units=32) + layer.build((None, 16)) + layer.quantize("float8") + optimizer = optimizers.AdamW(learning_rate=0.1) + optimizer.build(layer.trainable_variables) + + def loss_fn(x, dy): + y = layer(x, training=True) + loss = y * ops.cast(dy, y.dtype) + return ops.sum(loss) + + if backend.backend() == "tensorflow": + import tensorflow as tf + + @tf.function(jit_compile=True) + def train_one_step(x, dy): + with tf.GradientTape() as tape: + loss = loss_fn(x, dy) + grads = tape.gradient(loss, layer.trainable_variables) + optimizer.apply(grads, layer.trainable_variables) + + elif backend.backend() == "jax": + import jax + + def stateless_loss_fn(trainable_variables, x, dy): + y = layer.stateless_call(trainable_variables, [], x)[0] + loss = y * ops.cast(dy, y.dtype) + return ops.sum(loss) + + grad_fn = jax.jit(jax.grad(stateless_loss_fn)) + + def train_one_step(x, dy): + trainable_variables = [ + v.value for v in layer.trainable_variables + ] + optimizer_variables = [v.value for v in optimizer.variables] + grads = grad_fn(trainable_variables, x, dy) + trainable_variables, optimizer_variables = ( + optimizer.stateless_apply( + optimizer_variables, grads, trainable_variables + ) + ) + for variable, value in zip( + layer.trainable_variables, trainable_variables + ): + variable.assign(value) + for variable, value in zip( + optimizer.variables, optimizer_variables + ): + variable.assign(value) + + elif backend.backend() == "torch": + + def train_one_step(x, dy): + layer.zero_grad() + loss = loss_fn(x, dy) + loss.backward() + grads = [v.value.grad for v in layer.trainable_variables] + optimizer.apply(grads, layer.trainable_variables) + + scale_x, amax_history_x = ops.ones(()), ops.zeros((1024,)) + scale_k, amax_history_k = ops.ones(()), ops.zeros((1024,)) + scale_g, amax_history_g = ops.ones(()), ops.zeros((1024,)) + e4m3_max = ops.cast( + float(ml_dtypes.finfo("float8_e4m3fn").max), "float32" + ) + e5m2_max = ops.cast( + float(ml_dtypes.finfo("float8_e5m2").max), "float32" + ) + + for _ in range(3): + x = random.normal((16, 16), dtype="float32") + g = random.normal((16, 32), dtype="float32") + k = ops.convert_to_tensor(layer._kernel) + + # Manually compute the expected amax history and scaling factors. + amax_from_history_x = ops.max(amax_history_x) + amax_from_history_k = ops.max(amax_history_k) + amax_from_history_g = ops.max(amax_history_g) + scale_x = quantizers.compute_float8_scale( + amax_from_history_x, scale_x, e4m3_max + ) + scale_k = quantizers.compute_float8_scale( + amax_from_history_k, scale_k, e4m3_max + ) + scale_g = quantizers.compute_float8_scale( + amax_from_history_g, scale_g, e5m2_max + ) + amax_history_x = quantizers.compute_float8_amax_history( + x, amax_history_x + ) + amax_history_k = quantizers.compute_float8_amax_history( + k, amax_history_k + ) + amax_history_g = quantizers.compute_float8_amax_history( + g, amax_history_g + ) + + train_one_step(x, g) + + self.assertAllClose(layer.inputs_amax_history, amax_history_x) + self.assertAllClose(layer.kernel_amax_history, amax_history_k) + self.assertAllClose(layer.outputs_grad_amax_history, amax_history_g) + self.assertAllClose(layer.inputs_scale, scale_x) + self.assertAllClose(layer.kernel_scale, scale_k) + self.assertAllClose(layer.outputs_grad_scale, scale_g) + + @pytest.mark.requires_trainable_backend + def test_quantize_float8_fitting(self): + config = dict(units=16) + layer = layers.Dense(**config) + layer.build((None, 8)) + layer.quantize("float8") + self.assertLen(layer.trainable_weights, 8) + self.assertLen(layer.non_trainable_weights, 0) + + # Try calling fit() + x = np.random.random((64, 8)) + y = np.random.random((64, 16)) + model = models.Sequential([layer]) + model.compile(optimizer="sgd", loss="mse") + model.fit(x, y, epochs=2) + + # Try saving and reloading the model + temp_filepath = os.path.join( + self.get_temp_dir(), "quantized_float8_model.keras" + ) + model.save(temp_filepath) + new_model = saving.load_model(temp_filepath) + self.assertAllClose(model.predict(x), new_model.predict(x)) + + # Try saving and reloading the model's weights only + temp_filepath = os.path.join( + self.get_temp_dir(), "quantized_float8_model.weights.h5" + ) + model.save_weights(temp_filepath) + new_model = models.Sequential([layers.Dense(**config)]) + new_model.build((None, 8)) + new_model.quantize("float8") + new_model.load_weights(temp_filepath) + self.assertAllClose(model.predict(x), new_model.predict(x)) + + # Test export and TFSMLayer reloading when using tensorflow backend + if backend.backend() == "tensorflow": + import tensorflow as tf + + temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + ref_input = tf.random.normal((2, 8)) + ref_output = model(ref_input) + export_lib.export_model(model, temp_filepath) + reloaded_layer = export_lib.TFSMLayer(temp_filepath) + self.assertAllClose(reloaded_layer(ref_input), ref_output) + self.assertLen(reloaded_layer.weights, len(model.weights)) + self.assertLen( + reloaded_layer.trainable_weights, len(model.trainable_weights) + ) + self.assertLen( + reloaded_layer.non_trainable_weights, + len(model.non_trainable_weights), + ) diff --git a/keras/layers/core/einsum_dense.py b/keras/layers/core/einsum_dense.py index e8022d915b5f..95f7b7274215 100644 --- a/keras/layers/core/einsum_dense.py +++ b/keras/layers/core/einsum_dense.py @@ -1,6 +1,7 @@ import re import string +import ml_dtypes import numpy as np from keras import activations @@ -153,13 +154,19 @@ def build(self, input_shape): ) kernel_shape, bias_shape, full_output_shape = shape_data self.full_output_shape = tuple(full_output_shape) - # `quantized_build` needs `self.input_spec` + # `self._int8_build` needs `self.input_spec` self.input_spec = InputSpec(ndim=len(input_shape)) - if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): + # We use `self._dtype_policy` to check to avoid issues in torch dynamo + is_quantized = isinstance( + self._dtype_policy, dtype_policies.QuantizedDTypePolicy + ) + if is_quantized: self.quantized_build( input_shape, mode=self.dtype_policy.quantization_mode ) - else: + if not is_quantized or self.dtype_policy.quantization_mode != "int8": + # If the layer is quantized to int8, `self._kernel` will be added + # in `self._int8_build`. Therefore, we skip it here. self._kernel = self.add_weight( name="kernel", shape=tuple(kernel_shape), @@ -255,7 +262,20 @@ def save_own_variables(self, store): if self.bias is not None: store["1"] = self.bias if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): - store["2"] = kernel_scale + mode = self.dtype_policy.quantization_mode + if mode == "int8": + store["2"] = kernel_scale + elif mode == "float8": + store["2"] = self.inputs_scale + store["3"] = self.inputs_amax_history + store["4"] = self.kernel_scale + store["5"] = self.kernel_amax_history + store["6"] = self.outputs_grad_scale + store["7"] = self.outputs_grad_amax_history + else: + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) + ) def load_own_variables(self, store): if not self.lora_enabled: @@ -269,7 +289,20 @@ def load_own_variables(self, store): if self.bias is not None: self.bias.assign(store["1"]) if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): - self.kernel_scale.assign(store["2"]) + mode = self.dtype_policy.quantization_mode + if mode == "int8": + self.kernel_scale.assign(store["2"]) + elif mode == "float8": + self.inputs_scale.assign(store["2"]) + self.inputs_amax_history.assign(store["3"]) + self.kernel_scale.assign(store["4"]) + self.kernel_amax_history.assign(store["5"]) + self.outputs_grad_scale.assign(store["6"]) + self.outputs_grad_amax_history.assign(store["7"]) + else: + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) + ) if self.lora_enabled: self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape)) self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape)) @@ -333,53 +366,133 @@ def _check_load_own_variables(self, store): f"Expected: {[v.name for v in all_vars]}" ) - """Quantization-related methods""" + """Quantization-related (int8 and float8) methods""" + + QUANTIZATION_MODE_ERROR_TEMPLATE = ( + f"Invalid quantization mode. Expected one of " + f"{dtype_policies.QUANTIZATION_MODES}. " + "Received: quantization_mode={mode}" + ) def quantized_build(self, input_shape, mode): - shape_data = _analyze_einsum_string( - self.equation, - self.bias_axes, - input_shape, - self.partial_output_shape, - ) - kernel_shape, _, _ = shape_data if mode == "int8": - ( - self._input_reduced_axes, - self._kernel_reduced_axes, - self._input_transpose_axes, - self._kernel_transpose_axes, - self._input_expand_axes, - self._kernel_expand_axes, - self._input_squeeze_axes, - self._kernel_squeeze_axes, - self._custom_gradient_equation, - self._kernel_reverse_transpose_axes, - ) = _analyze_quantization_info(self.equation, self.input_spec.ndim) - self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) - self._kernel = self.add_weight( - name="kernel", - shape=kernel_shape, - initializer="zeros", - dtype="int8", - trainable=False, + shape_data = _analyze_einsum_string( + self.equation, + self.bias_axes, + input_shape, + self.partial_output_shape, ) - kernel_scale_shape = np.array(kernel_shape) - kernel_scale_shape[self._kernel_reduced_axes] = 1 - kernel_scale_shape = kernel_scale_shape[self._kernel_transpose_axes] - kernel_scale_shape = kernel_scale_shape.tolist() - for a in sorted(self._kernel_expand_axes): - kernel_scale_shape.insert(a, 1) - for a in sorted(self._kernel_squeeze_axes, reverse=True): - kernel_scale_shape.pop(a) - self.kernel_scale = self.add_weight( - name="kernel_scale", - shape=kernel_scale_shape, - initializer="ones", - trainable=False, + kernel_shape, _, _ = shape_data + self._int8_build(kernel_shape) + elif mode == "float8": + self._float8_build() + else: + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) + def _int8_build( + self, + kernel_shape, + kernel_initializer="zeros", + kernel_scale_initializer="ones", + ): + ( + self._input_reduced_axes, + self._kernel_reduced_axes, + self._input_transpose_axes, + self._kernel_transpose_axes, + self._input_expand_axes, + self._kernel_expand_axes, + self._input_squeeze_axes, + self._kernel_squeeze_axes, + self._custom_gradient_equation, + self._kernel_reverse_transpose_axes, + ) = _analyze_quantization_info(self.equation, self.input_spec.ndim) + self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) + self._kernel = self.add_weight( + name="kernel", + shape=kernel_shape, + initializer=kernel_initializer, + dtype="int8", + trainable=False, + ) + kernel_scale_shape = np.array(kernel_shape) + kernel_scale_shape[self._kernel_reduced_axes] = 1 + kernel_scale_shape = kernel_scale_shape[self._kernel_transpose_axes] + kernel_scale_shape = kernel_scale_shape.tolist() + for a in sorted(self._kernel_expand_axes): + kernel_scale_shape.insert(a, 1) + for a in sorted(self._kernel_squeeze_axes, reverse=True): + kernel_scale_shape.pop(a) + self.kernel_scale = self.add_weight( + name="kernel_scale", + shape=kernel_scale_shape, + initializer=kernel_scale_initializer, + trainable=False, + ) + + def _float8_build(self): + if not isinstance( + self.dtype_policy, dtype_policies.QuantizedFloat8DTypePolicy + ): + raise TypeError( + "`self.dtype_policy` must be the type of " + f"QuantizedFloat8DTypePolicy. Received {self.dtype_policy}" + ) + amax_history_length = self.dtype_policy.amax_history_length + # We set `trainable=True` because we will use the gradients to overwrite + # these variables + scale_kwargs = { + "shape": (), + "initializer": "ones", + "dtype": "float32", # Always be float32 + "trainable": True, + "autocast": False, + } + amax_history_kwargs = { + "shape": (amax_history_length,), + "initializer": "zeros", + "dtype": "float32", # Always be float32 + "trainable": True, + "autocast": False, + } + self.inputs_scale = self.add_weight(name="inputs_scale", **scale_kwargs) + self.inputs_amax_history = self.add_weight( + name="inputs_amax_history", **amax_history_kwargs + ) + self.kernel_scale = self.add_weight(name="kernel_scale", **scale_kwargs) + self.kernel_amax_history = self.add_weight( + name="kernel_amax_history", **amax_history_kwargs + ) + self.outputs_grad_scale = self.add_weight( + name="outputs_grad_scale", **scale_kwargs + ) + self.outputs_grad_amax_history = self.add_weight( + name="outputs_grad_amax_history", **amax_history_kwargs + ) + # We need to set `overwrite_with_gradient=True` to instruct the + # optimizer to directly overwrite these variables with their computed + # gradients during training + self.inputs_scale.overwrite_with_gradient = True + self.inputs_amax_history.overwrite_with_gradient = True + self.kernel_scale.overwrite_with_gradient = True + self.kernel_amax_history.overwrite_with_gradient = True + self.outputs_grad_scale.overwrite_with_gradient = True + self.outputs_grad_amax_history.overwrite_with_gradient = True + def quantized_call(self, inputs): + if self.dtype_policy.quantization_mode == "int8": + return self._int8_call(inputs) + elif self.dtype_policy.quantization_mode == "float8": + return self._float8_call(inputs) + else: + mode = self.dtype_policy.quantization_mode + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) + ) + + def _int8_call(self, inputs): @ops.custom_gradient def einsum_with_inputs_gradient(inputs, kernel, kernel_scale): def grad_fn(*args, upstream=None): @@ -442,6 +555,94 @@ def grad_fn(*args, upstream=None): x = self.activation(x) return x + def _float8_call(self, inputs): + if self.lora_enabled: + raise NotImplementedError( + "Currently, `_float8_call` doesn't support LoRA" + ) + + @ops.custom_gradient + def quantized_dequantize_inputs(inputs, scale, amax_history): + new_scale = quantizers.compute_float8_scale( + ops.max(amax_history, axis=0), + scale, + ops.cast( + float(ml_dtypes.finfo("float8_e4m3fn").max), "float32" + ), + ) + qdq_inputs = quantizers.quantize_and_dequantize( + inputs, scale, "float8_e4m3fn", self.compute_dtype + ) + new_amax_history = quantizers.compute_float8_amax_history( + inputs, amax_history + ) + + def grad(*args, upstream=None, variables=None): + if upstream is None: + (upstream,) = args + return upstream, new_scale, new_amax_history + + return qdq_inputs, grad + + @ops.custom_gradient + def quantized_dequantize_outputs(outputs, scale, amax_history): + """Quantize-dequantize the output gradient but not the output.""" + + def grad(*args, upstream=None, variables=None): + if upstream is None: + (upstream,) = args + new_scale = quantizers.compute_float8_scale( + ops.max(amax_history, axis=0), + scale, + ops.cast( + float(ml_dtypes.finfo("float8_e5m2").max), "float32" + ), + ) + qdq_upstream = quantizers.quantize_and_dequantize( + upstream, scale, "float8_e5m2", self.compute_dtype + ) + new_amax_history = quantizers.compute_float8_amax_history( + upstream, amax_history + ) + return qdq_upstream, new_scale, new_amax_history + + return outputs, grad + + x = ops.einsum( + self.equation, + quantized_dequantize_inputs( + inputs, + ops.convert_to_tensor(self.inputs_scale), + ops.convert_to_tensor(self.inputs_amax_history), + ), + quantized_dequantize_inputs( + ops.convert_to_tensor(self._kernel), + ops.convert_to_tensor(self.kernel_scale), + ops.convert_to_tensor(self.kernel_amax_history), + ), + ) + # `quantized_dequantize_outputs` is placed immediately after + # `ops.einsum` for the sake of pattern matching in gemm_rewrite. That + # way, the qdq will be adjacent to the corresponding einsum_bprop in the + # bprop. + x = quantized_dequantize_outputs( + x, + ops.convert_to_tensor(self.outputs_grad_scale), + ops.convert_to_tensor(self.outputs_grad_amax_history), + ) + if self.bias is not None: + # Under non-mixed precision cases, F32 bias has to be converted to + # BF16 first to get the biasAdd fusion support. ref. PR + # https://github.com/tensorflow/tensorflow/pull/60306 + bias = self.bias + if self.dtype_policy.compute_dtype == "float32": + bias_bf16 = ops.cast(bias, "bfloat16") + bias = ops.cast(bias_bf16, bias.dtype) + x = ops.add(x, bias) + if self.activation is not None: + x = self.activation(x) + return x + def quantize(self, mode): import gc @@ -452,6 +653,17 @@ def quantize(self, mode): "method implemented." ) self._check_quantize_args(mode, self.compute_dtype) + + # Set new dtype policy + if not isinstance( + self.dtype_policy, dtype_policies.QuantizedDTypePolicy + ): + quantized_dtype = f"{mode}_from_{self.dtype_policy.name}" + # We set the internal `self._dtype_policy` instead of using the + # setter to avoid double `quantize` call + self._dtype_policy = dtype_policies.get(quantized_dtype) + + self._tracker.unlock() if mode == "int8": if backend.standardize_dtype(self._kernel.dtype) == "int8": raise ValueError("`quantize` can only be done once per layer.") @@ -486,35 +698,25 @@ def quantize(self, mode): kernel_scale = ops.squeeze( kernel_scale, axis=self._kernel_squeeze_axes ) - self._tracker.unlock() self._untrack_variable(self._kernel) kernel_shape = self._kernel.shape del self._kernel - self._kernel = self.add_weight( - name="kernel", - shape=kernel_shape, - # Prevent adding a large constant to the computation graph - initializer=lambda shape, dtype: kernel_value, - dtype="int8", - trainable=False, - ) - self.kernel_scale = self.add_weight( - name="kernel_scale", - shape=kernel_scale.shape, - # Prevent adding a large constant to the computation graph - initializer=lambda shape, dtype: kernel_scale, - trainable=False, + # Utilize a lambda expression as an initializer to prevent adding a + # large constant to the computation graph. + self._int8_build( + kernel_shape, + lambda shape, dtype: kernel_value, + lambda shape, dtype: kernel_scale, ) - self._tracker.lock() + elif mode == "float8": + if hasattr(self, "inputs_amax_history"): + raise ValueError("`quantize` can only be done once per layer.") + self._float8_build() else: - NotImplementedError() - - # Set new dtype policy - if not isinstance( - self.dtype_policy, dtype_policies.QuantizedDTypePolicy - ): - quantized_dtype = f"{mode}_from_{self.dtype_policy.name}" - self.dtype_policy = dtype_policies.get(quantized_dtype) + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) + ) + self._tracker.lock() # Release memory manually because sometimes the backend doesn't gc.collect() diff --git a/keras/layers/core/einsum_dense_test.py b/keras/layers/core/einsum_dense_test.py index ce810b3affc7..dbf782e1e607 100644 --- a/keras/layers/core/einsum_dense_test.py +++ b/keras/layers/core/einsum_dense_test.py @@ -9,6 +9,8 @@ from keras import layers from keras import models from keras import ops +from keras import optimizers +from keras import random from keras import saving from keras import testing from keras.export import export_lib @@ -378,6 +380,8 @@ def test_lora_rank_argument(self): supports_masking=False, ) + """Test quantization-related (int8 and float8) methods""" + @pytest.mark.skipif( backend.backend() == "numpy", reason=f"{backend.backend()} does not support ops.custom_gradient.", @@ -465,26 +469,11 @@ def test_quantize_int8(self): backend.standardize_dtype(layer.kernel_scale.dtype), "float32" ) - @pytest.mark.requires_trainable_backend - def test_quantize_dtype_argument(self): - self.run_layer_test( - layers.EinsumDense, - init_kwargs={ - "equation": "ab,bcd->acd", - "output_shape": (8, 32), - "bias_axes": "d", - "dtype": "int8_from_mixed_bfloat16", - }, - input_shape=(2, 3), - expected_output_shape=(2, 8, 32), - expected_num_trainable_weights=1, - expected_num_non_trainable_weights=2, - expected_num_seed_generators=0, - expected_num_losses=0, - supports_masking=False, - ) - - def test_quantize_on_unbuilt_layer(self): + @parameterized.named_parameters( + ("int8", "int8"), + ("float8", "float8"), + ) + def test_quantize_on_unbuilt_layer(self, mode): layer = layers.EinsumDense( equation="ab,bcd->acd", output_shape=(8, 32), @@ -493,9 +482,13 @@ def test_quantize_on_unbuilt_layer(self): with self.assertRaisesRegex( ValueError, "Cannot quantize a layer that isn't yet built." ): - layer.quantize("int8") + layer.quantize(mode) - def test_quantize_on_subclass(self): + @parameterized.named_parameters( + ("int8", "int8"), + ("float8", "float8"), + ) + def test_quantize_on_subclass(self, mode): class MyEinsumDense(layers.EinsumDense): pass @@ -506,23 +499,62 @@ class MyEinsumDense(layers.EinsumDense): ) layer.build((None, 3)) with self.assertRaises(NotImplementedError): - layer.quantize("int8") + layer.quantize(mode) - def test_quantize_when_already_quantized(self): + @parameterized.named_parameters( + ("int8", "int8"), + ("float8", "float8"), + ) + def test_quantize_when_already_quantized(self, mode): layer = layers.EinsumDense( equation="ab,bcd->acd", output_shape=(8, 32), bias_axes="d", ) layer.build((None, 3)) - layer.quantize("int8") + layer.quantize(mode) with self.assertRaisesRegex( ValueError, "`quantize` can only be done once per layer." ): - layer.quantize("int8") + layer.quantize(mode) + + @parameterized.named_parameters( + ("int8", "int8_from_float32", 3), + ("float8", "float8_from_float32", 8), + ) + def test_quantize_by_setting_dtype_policy( + self, policy, expected_num_variables + ): + layer = layers.EinsumDense( + equation="ab,bcd->acd", + output_shape=(8, 32), + bias_axes="d", + ) + layer.build((None, 3)) + layer.dtype_policy = policy + self.assertLen(layer.variables, expected_num_variables) @pytest.mark.requires_trainable_backend - def test_quantize_when_lora_enabled(self): + def test_quantize_int8_dtype_argument(self): + self.run_layer_test( + layers.EinsumDense, + init_kwargs={ + "equation": "ab,bcd->acd", + "output_shape": (8, 32), + "bias_axes": "d", + "dtype": "int8_from_mixed_bfloat16", + }, + input_shape=(2, 3), + expected_output_shape=(2, 8, 32), + expected_num_trainable_weights=1, + expected_num_non_trainable_weights=2, + expected_num_seed_generators=0, + expected_num_losses=0, + supports_masking=False, + ) + + @pytest.mark.requires_trainable_backend + def test_quantize_int8_when_lora_enabled(self): config = dict( equation="ab,bcd->acd", output_shape=(8, 32), @@ -603,3 +635,197 @@ def test_quantize_when_lora_enabled(self): reloaded_layer.non_trainable_weights, len(model.non_trainable_weights), ) + + @pytest.mark.requires_trainable_backend + def test_quantize_float8_dtype_argument(self): + self.run_layer_test( + layers.EinsumDense, + init_kwargs={ + "equation": "ab,bcd->acd", + "output_shape": (8, 32), + "bias_axes": "d", + "dtype": "float8_from_mixed_bfloat16", + }, + input_shape=(2, 3), + expected_output_shape=(2, 8, 32), + expected_num_trainable_weights=8, + expected_num_non_trainable_weights=0, + expected_num_seed_generators=0, + expected_num_losses=0, + supports_masking=False, + ) + + @pytest.mark.requires_trainable_backend + def test_quantize_float8(self): + import ml_dtypes + + from keras import quantizers + + layer = layers.EinsumDense( + "ab,bc->ac", + output_shape=[32], + bias_axes="c", + ) + layer.build((None, 16)) + layer.quantize("float8") + optimizer = optimizers.AdamW(learning_rate=0.1) + optimizer.build(layer.trainable_variables) + + def loss_fn(x, dy): + y = layer(x, training=True) + loss = y * ops.cast(dy, y.dtype) + return ops.sum(loss) + + if backend.backend() == "tensorflow": + import tensorflow as tf + + @tf.function(jit_compile=True) + def train_one_step(x, dy): + with tf.GradientTape() as tape: + loss = loss_fn(x, dy) + grads = tape.gradient(loss, layer.trainable_variables) + optimizer.apply(grads, layer.trainable_variables) + + elif backend.backend() == "jax": + import jax + + def stateless_loss_fn(trainable_variables, x, dy): + y = layer.stateless_call(trainable_variables, [], x)[0] + loss = y * ops.cast(dy, y.dtype) + return ops.sum(loss) + + grad_fn = jax.jit(jax.grad(stateless_loss_fn)) + + def train_one_step(x, dy): + trainable_variables = [ + v.value for v in layer.trainable_variables + ] + optimizer_variables = [v.value for v in optimizer.variables] + grads = grad_fn(trainable_variables, x, dy) + trainable_variables, optimizer_variables = ( + optimizer.stateless_apply( + optimizer_variables, grads, trainable_variables + ) + ) + for variable, value in zip( + layer.trainable_variables, trainable_variables + ): + variable.assign(value) + for variable, value in zip( + optimizer.variables, optimizer_variables + ): + variable.assign(value) + + elif backend.backend() == "torch": + + def train_one_step(x, dy): + layer.zero_grad() + loss = loss_fn(x, dy) + loss.backward() + grads = [v.value.grad for v in layer.trainable_variables] + optimizer.apply(grads, layer.trainable_variables) + + scale_x, amax_history_x = ops.ones(()), ops.zeros((1024,)) + scale_k, amax_history_k = ops.ones(()), ops.zeros((1024,)) + scale_g, amax_history_g = ops.ones(()), ops.zeros((1024,)) + e4m3_max = ops.cast( + float(ml_dtypes.finfo("float8_e4m3fn").max), "float32" + ) + e5m2_max = ops.cast( + float(ml_dtypes.finfo("float8_e5m2").max), "float32" + ) + + for _ in range(3): + x = random.normal((16, 16), dtype="float32") + g = random.normal((16, 32), dtype="float32") + k = ops.convert_to_tensor(layer._kernel) + + # Manually compute the expected amax history and scaling factors. + amax_from_history_x = ops.max(amax_history_x) + amax_from_history_k = ops.max(amax_history_k) + amax_from_history_g = ops.max(amax_history_g) + scale_x = quantizers.compute_float8_scale( + amax_from_history_x, scale_x, e4m3_max + ) + scale_k = quantizers.compute_float8_scale( + amax_from_history_k, scale_k, e4m3_max + ) + scale_g = quantizers.compute_float8_scale( + amax_from_history_g, scale_g, e5m2_max + ) + amax_history_x = quantizers.compute_float8_amax_history( + x, amax_history_x + ) + amax_history_k = quantizers.compute_float8_amax_history( + k, amax_history_k + ) + amax_history_g = quantizers.compute_float8_amax_history( + g, amax_history_g + ) + + train_one_step(x, g) + + self.assertAllClose(layer.inputs_amax_history, amax_history_x) + self.assertAllClose(layer.kernel_amax_history, amax_history_k) + self.assertAllClose(layer.outputs_grad_amax_history, amax_history_g) + self.assertAllClose(layer.inputs_scale, scale_x) + self.assertAllClose(layer.kernel_scale, scale_k) + self.assertAllClose(layer.outputs_grad_scale, scale_g) + + @pytest.mark.requires_trainable_backend + def test_quantize_float8_fitting(self): + config = dict( + equation="ab,bcd->acd", + output_shape=(8, 32), + bias_axes="d", + ) + layer = layers.EinsumDense(**config) + layer.build((None, 3)) + layer.quantize("float8") + self.assertLen(layer.trainable_weights, 8) + self.assertLen(layer.non_trainable_weights, 0) + + # Try calling fit() + x = np.random.random((64, 3)) + y = np.random.random((64, 8, 32)) + model = models.Sequential([layer]) + model.compile(optimizer="sgd", loss="mse") + model.fit(x, y, epochs=2) + + # Try saving and reloading the model + temp_filepath = os.path.join( + self.get_temp_dir(), "quantized_lora_model.keras" + ) + model.save(temp_filepath) + new_model = saving.load_model(temp_filepath) + self.assertAllClose(model.predict(x), new_model.predict(x)) + + # Try saving and reloading the model's weights only + temp_filepath = os.path.join( + self.get_temp_dir(), "quantized_lora_model.weights.h5" + ) + model.save_weights(temp_filepath) + new_model = models.Sequential([layers.EinsumDense(**config)]) + new_model.build((None, 3)) + new_model.quantize("float8") + new_model.load_weights(temp_filepath) + self.assertAllClose(model.predict(x), new_model.predict(x)) + + # Test export and TFSMLayer reloading when using tensorflow backend + if backend.backend() == "tensorflow": + import tensorflow as tf + + temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + ref_input = tf.random.normal((2, 3)) + ref_output = model(ref_input) + export_lib.export_model(model, temp_filepath) + reloaded_layer = export_lib.TFSMLayer(temp_filepath) + self.assertAllClose(reloaded_layer(ref_input), ref_output) + self.assertLen(reloaded_layer.weights, len(model.weights)) + self.assertLen( + reloaded_layer.trainable_weights, len(model.trainable_weights) + ) + self.assertLen( + reloaded_layer.non_trainable_weights, + len(model.non_trainable_weights), + ) diff --git a/keras/layers/core/embedding.py b/keras/layers/core/embedding.py index 13c252a9d367..d7afb529bb9e 100644 --- a/keras/layers/core/embedding.py +++ b/keras/layers/core/embedding.py @@ -111,11 +111,15 @@ def __init__( def build(self, input_shape=None): if self.built: return - if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): + # We use `self._dtype_policy` to check to avoid issues in torch dynamo + is_quantized = isinstance( + self._dtype_policy, dtype_policies.QuantizedDTypePolicy + ) + if is_quantized: self.quantized_build( input_shape, mode=self.dtype_policy.quantization_mode ) - else: + if not is_quantized or self.dtype_policy.quantization_mode != "int8": self._embeddings = self.add_weight( shape=(self.input_dim, self.output_dim), initializer=self.embeddings_initializer, @@ -197,7 +201,13 @@ def save_own_variables(self, store): ) store["0"] = embeddings_value if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): - store["1"] = embeddings_scale + mode = self.dtype_policy.quantization_mode + if mode == "int8": + store["1"] = embeddings_scale + else: + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) + ) def load_own_variables(self, store): if not self.lora_enabled: @@ -209,7 +219,13 @@ def load_own_variables(self, store): # default ordering will change after quantization self._embeddings.assign(store["0"]) if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): - self.embeddings_scale.assign(store["1"]) + mode = self.dtype_policy.quantization_mode + if mode == "int8": + self.embeddings_scale.assign(store["1"]) + else: + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) + ) if self.lora_enabled: self.lora_embeddings_a.assign( ops.zeros(self.lora_embeddings_a.shape) @@ -275,26 +291,51 @@ def _check_load_own_variables(self, store): f"Expected: {[v.name for v in all_vars]}" ) - """Quantization-related methods""" + """Quantization-related (int8) methods""" + + QUANTIZATION_MODE_ERROR_TEMPLATE = ( + "Invalid quantization mode. Expected 'int8'. " + "Received: quantization_mode={mode}" + ) def quantized_build(self, input_shape, mode): if mode == "int8": - self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) - self._embeddings = self.add_weight( - name="embeddings", - shape=(self.input_dim, self.output_dim), - initializer="zeros", - dtype="int8", - trainable=False, - ) - self.embeddings_scale = self.add_weight( - name="embeddings_scale", - shape=(self.output_dim,), - initializer="ones", - trainable=False, + self._int8_build() + else: + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) + def _int8_build( + self, + embeddings_initializer="zeros", + embeddings_scale_initializer="ones", + ): + self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) + self._embeddings = self.add_weight( + name="embeddings", + shape=(self.input_dim, self.output_dim), + initializer=embeddings_initializer, + dtype="int8", + trainable=False, + ) + self.embeddings_scale = self.add_weight( + name="embeddings_scale", + shape=(self.output_dim,), + initializer=embeddings_scale_initializer, + trainable=False, + ) + def quantized_call(self, inputs): + if self.dtype_policy.quantization_mode == "int8": + return self._int8_call(inputs) + else: + mode = self.dtype_policy.quantization_mode + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) + ) + + def _int8_call(self, inputs): # We cannot update quantized self._embeddings, so the custom gradient is # not needed if backend.standardize_dtype(inputs.dtype) not in ("int32", "int64"): @@ -321,6 +362,17 @@ def quantize(self, mode): "method implemented." ) self._check_quantize_args(mode, self.compute_dtype) + + # Set new dtype policy + if not isinstance( + self.dtype_policy, dtype_policies.QuantizedDTypePolicy + ): + quantized_dtype = f"{mode}_from_{self.dtype_policy.name}" + # We set the internal `self._dtype_policy` instead of using the + # setter to avoid double `quantize` call + self._dtype_policy = dtype_policies.get(quantized_dtype) + + self._tracker.unlock() if mode == "int8": if backend.standardize_dtype(self._embeddings.dtype) == "int8": raise ValueError("`quantize` can only be done once per layer.") @@ -332,37 +384,19 @@ def quantize(self, mode): self._embeddings, axis=0 ) embeddings_scale = ops.squeeze(embeddings_scale, axis=0) - self._tracker.unlock() self._untrack_variable(self._embeddings) del self._embeddings - self._embeddings = self.add_weight( - name="embeddings", - shape=(self.input_dim, self.output_dim), - # Prevent adding a large constant to the computation graph - initializer=lambda shape, dtype: embeddings_value, - dtype="int8", - trainable=False, - ) - self.embeddings_scale = self.add_weight( - name="embeddings_scale", - shape=(self.output_dim,), - # Prevent adding a large constant to the computation graph - initializer=lambda shape, dtype: embeddings_scale, - trainable=False, + # Utilize a lambda expression as an initializer to prevent adding a + # large constant to the computation graph. + self._int8_build( + lambda shape, dtype: embeddings_value, + lambda shape, dtype: embeddings_scale, ) - self._tracker.lock() else: - NotImplementedError( - "Invalid quantization mode. Expected 'int8'. " - f"Received: mode={mode}" + raise NotImplementedError( + self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) - - # Set new dtype policy - if not isinstance( - self.dtype_policy, dtype_policies.QuantizedDTypePolicy - ): - quantized_dtype = f"{mode}_from_{self.dtype_policy.name}" - self.dtype_policy = dtype_policies.get(quantized_dtype) + self._tracker.lock() # Release memory manually because sometimes the backend doesn't gc.collect() diff --git a/keras/layers/core/embedding_test.py b/keras/layers/core/embedding_test.py index 328cd26bb699..2663ee1fce13 100644 --- a/keras/layers/core/embedding_test.py +++ b/keras/layers/core/embedding_test.py @@ -2,6 +2,7 @@ import numpy as np import pytest +from absl.testing import parameterized from keras import backend from keras import constraints @@ -13,7 +14,7 @@ from keras.testing import test_case -class EmbeddingTest(test_case.TestCase): +class EmbeddingTest(test_case.TestCase, parameterized.TestCase): @pytest.mark.requires_trainable_backend def test_embedding_basics(self): self.run_layer_test( @@ -317,6 +318,17 @@ def test_quantize_when_already_quantized(self): ): layer.quantize("int8") + @parameterized.named_parameters( + ("int8", "int8_from_float32", 2), + ) + def test_quantize_by_setting_dtype_policy( + self, policy, expected_num_variables + ): + layer = layers.Embedding(10, 16) + layer.build() + layer.dtype_policy = policy + self.assertLen(layer.variables, expected_num_variables) + @pytest.mark.requires_trainable_backend def test_quantize_when_lora_enabled(self): layer = layers.Embedding(10, 16) diff --git a/keras/layers/layer.py b/keras/layers/layer.py index ec38d006e00c..aaa7b1e8ef6f 100644 --- a/keras/layers/layer.py +++ b/keras/layers/layer.py @@ -668,6 +668,17 @@ def set_weights(self, weights): ) variable.assign(value) + @property + def dtype_policy(self): + return self._dtype_policy + + @dtype_policy.setter + def dtype_policy(self, value): + self._dtype_policy = dtype_policies.get(value) + if isinstance(self._dtype_policy, dtype_policies.QuantizedDTypePolicy): + if self.built: + self.quantize(self._dtype_policy.quantization_mode) + @property def dtype(self): """Alias of `layer.variable_dtype`.""" @@ -1127,9 +1138,11 @@ def _check_quantize_args(self, mode, compute_dtype): f"Layer '{self.name}' (of type '{self.__class__.__name__}') " "is not built yet." ) - if mode not in ("int8",): + if mode not in dtype_policies.QUANTIZATION_MODES: raise ValueError( - f"`quantize` must be one of ('int8'). Received: mode={mode}" + "Invalid quantization mode. " + f"Expected one of {dtype_policies.QUANTIZATION_MODES}. " + f"Received: mode={mode}" ) if mode == "int8" and compute_dtype == "float16": raise ValueError( diff --git a/keras/layers/layer_test.py b/keras/layers/layer_test.py index 79c695749a6f..ab93d808c728 100644 --- a/keras/layers/layer_test.py +++ b/keras/layers/layer_test.py @@ -2,6 +2,7 @@ import pytest from keras import backend +from keras import dtype_policies from keras import layers from keras import metrics from keras import models @@ -925,3 +926,16 @@ def test_trainable_init_arg(self): self.assertLen(layer.trainable_weights, 2) self.assertLen(model.trainable_weights, 2) self.assertLen(model.non_trainable_weights, 0) + + def test_dtype_policy_setter(self): + layer = layers.Dense(2) + # Set by string + layer.dtype_policy = "mixed_bfloat16" + self.assertEqual(layer.dtype_policy.name, "mixed_bfloat16") + self.assertEqual(layer.dtype_policy.compute_dtype, "bfloat16") + self.assertEqual(layer.dtype_policy.variable_dtype, "float32") + # Set by FloatDTypePolicy + layer.dtype_policy = dtype_policies.FloatDTypePolicy("mixed_float16") + self.assertEqual(layer.dtype_policy.name, "mixed_float16") + self.assertEqual(layer.dtype_policy.compute_dtype, "float16") + self.assertEqual(layer.dtype_policy.variable_dtype, "float32") diff --git a/keras/models/model.py b/keras/models/model.py index 30d33a398cc9..65d915cba7bd 100644 --- a/keras/models/model.py +++ b/keras/models/model.py @@ -357,14 +357,16 @@ def quantize(self, mode): mode: The mode of the quantization. Only 'int8' is supported at this time. """ + from keras.dtype_policies import QUANTIZATION_MODES + if not self.built: raise ValueError( "The model must be built first before calling `quantize()`." ) - if mode not in ("int8",): + if mode not in QUANTIZATION_MODES: raise ValueError( - "Invalid quantization mode. Expected 'int8'. " - f"Received: mode={mode}" + "Invalid quantization mode. " + f"Expected one of {QUANTIZATION_MODES}. Received: mode={mode}" ) mode_changed = False for layer in self._flatten_layers(): diff --git a/keras/models/model_test.py b/keras/models/model_test.py index 263a200ba28a..23f647b7d067 100644 --- a/keras/models/model_test.py +++ b/keras/models/model_test.py @@ -563,19 +563,29 @@ def test_functional_list_outputs_invalid_nested_list_losses(self): ): model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0) - def test_quantize(self): + @parameterized.named_parameters( + ("int8", "int8"), + ("float8", "float8"), + ) + def test_quantize(self, mode): model = _get_model() x1 = np.random.rand(2, 3) x2 = np.random.rand(2, 3) - model.quantize("int8") + model.quantize(mode) _ = model((x1, x2)) for layer in model._flatten_layers(): if isinstance(layer, (layers.Dense, layers.EinsumDense)): - self.assertEqual(layer.dtype_policy.name, "int8_from_float32") - self.assertEqual(layer.dtype_policy.quantization_mode, "int8") + self.assertEqual( + layer.dtype_policy.name, f"{mode}_from_float32" + ) + self.assertEqual(layer.dtype_policy.quantization_mode, mode) - def test_quantize_unbuilt(self): + @parameterized.named_parameters( + ("int8", "int8"), + ("float8", "float8"), + ) + def test_quantize_unbuilt(self, mode): class MyModel(Model): def __init__(self): super().__init__() @@ -592,20 +602,24 @@ def call(self, inputs, training=False): with self.assertRaisesRegex( ValueError, "The model must be built first before calling" ): - model.quantize("int8") + model.quantize(mode) x = np.random.rand(2, 3) _ = model(x) - model.quantize("int8") + model.quantize(mode) def test_quantize_invalid_args(self): model = _get_model() with self.assertRaisesRegex( - ValueError, "Invalid quantization mode. Expected 'int8'." + ValueError, "Invalid quantization mode. Expected one of" ): model.quantize("abc") - def test_quantize_nested_model(self): + @parameterized.named_parameters( + ("int8", "int8"), + ("float8", "float8"), + ) + def test_quantize_nested_model(self, mode): class NestedLayer(layers.Layer): def __init__(self, units): super().__init__() @@ -631,13 +645,17 @@ def call(self, x): inputs = layers.Input([3]) outputs = DoubleNestedLayer(8)(inputs) model = Model(inputs, outputs) - model.quantize("int8") - - kernel_count = 0 - for weight in model.weights: - if weight.name == "kernel": - kernel_count += 1 - self.assertEqual( - backend.standardize_dtype(weight.dtype), "int8" - ) - self.assertEqual(kernel_count, 3) + model.quantize(mode) + + if mode == "int8": + kernel_count = 0 + for weight in model.weights: + if weight.name == "kernel": + kernel_count += 1 + self.assertEqual( + backend.standardize_dtype(weight.dtype), "int8" + ) + self.assertEqual(kernel_count, 3) + if mode == "float8": + # kernel + bias + scale * 3 + amax_history * 3 == 8 + self.assertEqual(len(model.weights), 3 * 8) diff --git a/keras/ops/operation.py b/keras/ops/operation.py index 88b327c1ae4c..900833f1a2d9 100644 --- a/keras/ops/operation.py +++ b/keras/ops/operation.py @@ -23,7 +23,7 @@ def __init__(self, dtype=None, name=None): "cannot contain character `/`. " f"Received: name={name} (of type {type(name)})" ) - self.dtype_policy = dtype_policies.get(dtype) + self._dtype_policy = dtype_policies.get(dtype) self.name = name self._inbound_nodes = [] self._outbound_nodes = [] @@ -36,7 +36,7 @@ def __call__(self, *args, **kwargs): call_fn = self.symbolic_call else: if isinstance( - self.dtype_policy, dtype_policies.QuantizedDTypePolicy + self._dtype_policy, dtype_policies.QuantizedDTypePolicy ): call_fn = self.quantized_call else: @@ -50,7 +50,7 @@ def __call__(self, *args, **kwargs): # Plain flow. if any_symbolic_tensors(args, kwargs): return self.symbolic_call(*args, **kwargs) - if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): + if isinstance(self._dtype_policy, dtype_policies.QuantizedDTypePolicy): return self.quantized_call(*args, **kwargs) else: return self.call(*args, **kwargs) diff --git a/keras/optimizers/base_optimizer.py b/keras/optimizers/base_optimizer.py index 4dc39d30ad36..8da8ffaf52a3 100644 --- a/keras/optimizers/base_optimizer.py +++ b/keras/optimizers/base_optimizer.py @@ -324,6 +324,14 @@ def apply(self, grads, trainable_variables=None): self._check_variables_are_known(trainable_variables) with backend.name_scope(self.name, caller=self): + # Overwrite targeted variables directly with their gradients if + # their `overwrite_with_gradient` is set. + grads, trainable_variables = ( + self._overwrite_variables_directly_with_gradients( + grads, trainable_variables + ) + ) + # Filter empty gradients. grads, trainable_variables = self._filter_empty_gradients( grads, trainable_variables @@ -583,6 +591,36 @@ def _get_current_learning_rate(self): return self._learning_rate() return self._learning_rate + def _overwrite_variables_directly_with_gradients(self, grads, vars): + """Overwrite the variables directly by their gradients. + + This method is designed for a special case where we want to overwrite + the variable directly with its computed gradient. For example, in float8 + training, new `scale` and `amax_history` are computed as gradients, and + we want to overwrite them directly instead of following the typical + procedure such as gradient descent with a learning rate, gradient + clipping and weight decaying. + + After the update, the processed pairs will be filtered out. + """ + # Shortcut for `tf.Variable` because it doesn't have a + # `overwrite_with_gradient` attr + if not hasattr(vars[0], "overwrite_with_gradient"): + return grads, vars + + # Shallow copies + filtered_grads = list(grads) + filtered_vars = list(vars) + + # Iterate from right to left for safe popping + for i in range(len(filtered_grads) - 1, -1, -1): + g, v = filtered_grads[i], filtered_vars[i] + if v.overwrite_with_gradient: + v.assign(g) + filtered_grads.pop(i) + filtered_vars.pop(i) + return filtered_grads, filtered_vars + def _filter_empty_gradients(self, grads, vars): filtered_grads = list(grads) filtered_vars = list(vars) diff --git a/keras/optimizers/optimizer_test.py b/keras/optimizers/optimizer_test.py index ec1b410fbaa2..2f8ef0c85151 100644 --- a/keras/optimizers/optimizer_test.py +++ b/keras/optimizers/optimizer_test.py @@ -247,12 +247,26 @@ def test_tf_checkpointing(self): def test_callable_learning_rate(self): v = backend.Variable([[1.0, 2.0], [3.0, 4.0]]) grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]]) - optimizer = optimizers.AdamW(learning_rate=lambda: 0.0001) + optimizer = optimizers.SGD(learning_rate=lambda: 0.1) self.assertAllClose(optimizer.iterations, 0) optimizer.apply_gradients([(grads, v)]) - self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]], atol=1e-4) + self.assertAllClose(v, [[0.9, 1.9], [2.9, 3.9]]) self.assertAllClose(optimizer.iterations, 1) + def test_overwrite_with_gradient(self): + v = backend.Variable([[1.0, 2.0], [3.0, 4.0]]) + v.overwrite_with_gradient = True + v2 = backend.Variable([[1.0, 2.0], [3.0, 4.0]]) + grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]]) + grads2 = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]]) + + optimizer = optimizers.SGD(learning_rate=1.0) + optimizer.apply_gradients([(grads, v), (grads2, v2)]) + + # `v` is overwritten by its gradient but `v2` is updated normally + self.assertAllClose(v, [[1.0, 1.0], [1.0, 1.0]]) + self.assertAllClose(v2, [[0.0, 1.0], [2.0, 3.0]]) + def test_setting_lr_to_callable_untracks_lr_var(self): adam = optimizers.Adam(learning_rate=0.001) self.assertLen(adam.variables, 2) diff --git a/keras/quantizers/__init__.py b/keras/quantizers/__init__.py index 6139476a1fcb..1cecf1902d15 100644 --- a/keras/quantizers/__init__.py +++ b/keras/quantizers/__init__.py @@ -4,6 +4,9 @@ from keras.quantizers.quantizers import AbsMaxQuantizer from keras.quantizers.quantizers import Quantizer from keras.quantizers.quantizers import abs_max_quantize +from keras.quantizers.quantizers import compute_float8_amax_history +from keras.quantizers.quantizers import compute_float8_scale +from keras.quantizers.quantizers import quantize_and_dequantize from keras.saving import serialization_lib from keras.utils.naming import to_snake_case diff --git a/keras/quantizers/quantizers.py b/keras/quantizers/quantizers.py index e21f10bcc38b..5c8a4425cbad 100644 --- a/keras/quantizers/quantizers.py +++ b/keras/quantizers/quantizers.py @@ -1,7 +1,11 @@ +import ml_dtypes + from keras import backend from keras import ops from keras.api_export import keras_export +"""Int8-related classes and methods""" + @keras_export(["keras.Quantizer", "keras.quantizers.Quantizer"]) class Quantizer: @@ -100,3 +104,45 @@ def get_config(self): "epsilon": self.epsilon, "output_dtype": self.output_dtype, } + + +"""Float8-related methods""" + + +@keras_export(["keras.quantizers.compute_float8_scale"]) +def compute_float8_scale(amax, scale, dtype_max, margin=0): + # The algorithm for computing the new scale is sourced from + # https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/jax.html#transformer_engine.jax.update_fp8_metas + # wherein the `original_scale` corresponds to the reciprocal of the + # `scale` passed in this function. + scale = ops.reciprocal(scale) + sf = ops.divide(ops.divide(dtype_max, amax), 2**margin) + sf = ops.where(amax > 0.0, sf, scale) + sf = ops.where(ops.isfinite(amax), sf, scale) + return ops.reciprocal(sf) + + +@keras_export(["keras.quantizers.compute_float8_amax_history"]) +def compute_float8_amax_history(x, amax_history): + amax_update = ops.cast(ops.max(ops.abs(x)), amax_history.dtype) + new_amax_history = ops.scatter_update( + ops.roll(amax_history, shift=-1), + [[0]], + ops.reshape(amax_update, [1]), + ) + return new_amax_history + + +@keras_export(["keras.quantizers.quantize_and_dequantize"]) +def quantize_and_dequantize(inputs, scale, quantized_dtype, compute_dtype): + # Quantize + quantized_dtype_max = ops.cast( + float(ml_dtypes.finfo(quantized_dtype).max), compute_dtype + ) + x = ops.divide(inputs, ops.cast(scale, compute_dtype)) + x = ops.clip(x, -quantized_dtype_max, quantized_dtype_max) + x = ops.cast(x, quantized_dtype) + + # Dequantize + x = ops.multiply(ops.cast(x, compute_dtype), ops.cast(scale, compute_dtype)) + return x diff --git a/keras/quantizers/quantizers_test.py b/keras/quantizers/quantizers_test.py index 1483fb7cc9c5..bf68fd36fbd1 100644 --- a/keras/quantizers/quantizers_test.py +++ b/keras/quantizers/quantizers_test.py @@ -35,3 +35,43 @@ def test_abs_max_quantizer(self): # Test serialization self.run_class_serialization_test(quantizer) + + def test_compute_float8_scale(self): + amax = 3.0 + scale = 4.0 + dtype_max = 448.0 # float8_e4m3fn + # The algorithm for computing the new scale is sourced from + # https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/jax.html#transformer_engine.jax.update_fp8_metas + expected_scale = 1.0 / (dtype_max / amax) / (2**0) + + computed_scale = quantizers.compute_float8_scale(amax, scale, dtype_max) + self.assertAllClose(computed_scale, expected_scale) + + def test_compute_float8_amax_history(self): + values = random.uniform([3, 4, 5], minval=-1, maxval=1) + amax_history = random.uniform([123]) + amax_from_values = ops.max(ops.abs(values)) + + computed_amax_history = quantizers.compute_float8_amax_history( + values, amax_history + ) + self.assertAllClose(computed_amax_history[0], amax_from_values) + # Shift to left with 1 step + self.assertAllClose( + computed_amax_history[1:], ops.roll(amax_history, -1)[1:] + ) + + def test_quantize_and_dequantize(self): + scale = 1.0 / 100.0 + values = random.uniform([3, 4, 5], minval=-1, maxval=1) + qdq_values = quantizers.quantize_and_dequantize( + values, scale, "float8_e4m3fn", "float32" + ) + # A loose assertion due to an expected quantization error + self.assertAllClose(qdq_values, values, atol=1e-1) + + qdq_values = quantizers.quantize_and_dequantize( + values, scale, "float8_e5m2", "float32" + ) + # A loose assertion due to an expected quantization error + self.assertAllClose(qdq_values, values, atol=5e-1) From 38a0caa8ae647ad18e3da8a16681df0ad9867582 Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Tue, 16 Apr 2024 01:35:31 +0800 Subject: [PATCH 002/101] Add LoRA to ConvND layers (#19516) * Add LoRA to `BaseConv` * Add tests * Fix typo * Fix tests * Fix tests --- keras/layers/convolutional/base_conv.py | 130 +++++++++++++- keras/layers/convolutional/conv_test.py | 219 ++++++++++++++++++++++++ 2 files changed, 340 insertions(+), 9 deletions(-) diff --git a/keras/layers/convolutional/base_conv.py b/keras/layers/convolutional/base_conv.py index eebbd53fc1c0..689083d94003 100644 --- a/keras/layers/convolutional/base_conv.py +++ b/keras/layers/convolutional/base_conv.py @@ -71,6 +71,15 @@ class BaseConv(Layer): are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. + lora_rank: Optional integer. If set, the layer's forward pass + will implement LoRA (Low-Rank Adaptation) + with the provided rank. LoRA sets the layer's kernel + to non-trainable and replaces it with a delta over the + original kernel, obtained via multiplying two lower-rank + trainable matrices. This can be useful to reduce the + computation cost of fine-tuning large dense layers. + You can also enable LoRA on an existing layer by calling + `layer.enable_lora(rank)`. """ def __init__( @@ -92,16 +101,10 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - trainable=True, - name=None, + lora_rank=None, **kwargs, ): - super().__init__( - trainable=trainable, - name=name, - activity_regularizer=activity_regularizer, - **kwargs, - ) + super().__init__(activity_regularizer=activity_regularizer, **kwargs) self.rank = rank self.filters = filters self.groups = groups @@ -120,6 +123,8 @@ def __init__( self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) + self.lora_rank = lora_rank + self.lora_enabled = False self.input_spec = InputSpec(min_ndim=self.rank + 2) self.data_format = self.data_format @@ -187,7 +192,7 @@ def build(self, input_shape): # shape, and make sure the output shape has all positive dimensions. self.compute_output_shape(input_shape) - self.kernel = self.add_weight( + self._kernel = self.add_weight( name="kernel", shape=kernel_shape, initializer=self.kernel_initializer, @@ -209,6 +214,20 @@ def build(self, input_shape): else: self.bias = None self.built = True + if self.lora_rank: + self.enable_lora(self.lora_rank) + + @property + def kernel(self): + if not self.built: + raise AttributeError( + "You must build the layer before accessing `kernel`." + ) + if self.lora_enabled: + return self._kernel + ops.matmul( + self.lora_kernel_a, self.lora_kernel_b + ) + return self._kernel def convolution_op(self, inputs, kernel): return ops.conv( @@ -248,6 +267,63 @@ def compute_output_shape(self, input_shape): dilation_rate=self.dilation_rate, ) + def enable_lora( + self, rank, a_initializer="he_uniform", b_initializer="zeros" + ): + if self.kernel_constraint: + raise ValueError( + "Lora is incompatible with kernel constraints. " + "In order to enable lora on this layer, remove the " + "`kernel_constraint` argument." + ) + if not self.built: + raise ValueError( + "Cannot enable lora on a layer that isn't yet built." + ) + if self.lora_enabled: + raise ValueError( + "lora is already enabled. " + "This can only be done once per layer." + ) + self._tracker.unlock() + self.lora_kernel_a = self.add_weight( + name="lora_kernel_a", + shape=self._kernel.shape[:-1] + (rank,), + initializer=initializers.get(a_initializer), + regularizer=self.kernel_regularizer, + ) + self.lora_kernel_b = self.add_weight( + name="lora_kernel_b", + shape=(rank, self.filters), + initializer=initializers.get(b_initializer), + regularizer=self.kernel_regularizer, + ) + self._kernel.trainable = False + self._tracker.lock() + self.lora_enabled = True + self.lora_rank = rank + + def save_own_variables(self, store): + # Do nothing if the layer isn't yet built + if not self.built: + return + store["0"] = self.kernel + if self.use_bias: + store["1"] = self.bias + + def load_own_variables(self, store): + if not self.lora_enabled: + self._check_load_own_variables(store) + # Do nothing if the layer isn't yet built + if not self.built: + return + self._kernel.assign(store["0"]) + if self.use_bias: + self.bias.assign(store["1"]) + if self.lora_enabled: + self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape)) + self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape)) + def get_config(self): config = super().get_config() config.update( @@ -282,4 +358,40 @@ def get_config(self): "bias_constraint": constraints.serialize(self.bias_constraint), } ) + if self.lora_rank: + config["lora_rank"] = self.lora_rank return config + + def _check_load_own_variables(self, store): + all_vars = self._trainable_variables + self._non_trainable_variables + if len(store.keys()) != len(all_vars): + if len(all_vars) == 0 and not self.built: + raise ValueError( + f"Layer '{self.name}' was never built " + "and thus it doesn't have any variables. " + f"However the weights file lists {len(store.keys())} " + "variables for this layer.\n" + "In most cases, this error indicates that either:\n\n" + "1. The layer is owned by a parent layer that " + "implements a `build()` method, but calling the " + "parent's `build()` method did NOT create the state of " + f"the child layer '{self.name}'. A `build()` method " + "must create ALL state for the layer, including " + "the state of any children layers.\n\n" + "2. You need to implement " + "the `def build_from_config(self, config)` method " + f"on layer '{self.name}', to specify how to rebuild " + "it during loading. " + "In this case, you might also want to implement the " + "method that generates the build config at saving time, " + "`def get_build_config(self)`. " + "The method `build_from_config()` is meant " + "to create the state " + "of the layer (i.e. its variables) upon deserialization.", + ) + raise ValueError( + f"Layer '{self.name}' expected {len(all_vars)} variables, " + "but received " + f"{len(store.keys())} variables during loading. " + f"Expected: {[v.name for v in all_vars]}" + ) diff --git a/keras/layers/convolutional/conv_test.py b/keras/layers/convolutional/conv_test.py index 62ec5de8b0ea..281e35a960e5 100644 --- a/keras/layers/convolutional/conv_test.py +++ b/keras/layers/convolutional/conv_test.py @@ -1,10 +1,15 @@ +import os + import numpy as np import pytest from absl.testing import parameterized from numpy.lib.stride_tricks import as_strided +from keras import backend from keras import constraints from keras import layers +from keras import models +from keras import saving from keras import testing @@ -538,6 +543,220 @@ def test_bad_init_args(self): ): layers.Conv2D(filters=5, kernel_size=(2, 2), groups=2) + @parameterized.named_parameters( + { + "testcase_name": "conv1d_kernel_size3_strides1", + "conv_cls": layers.Conv1D, + "filters": 6, + "kernel_size": 3, + "strides": 1, + "padding": "valid", + "data_format": "channels_last", + "dilation_rate": 1, + "groups": 1, + "input_shape": (None, 5, 4), + "output_shape": (None, 3, 6), + }, + { + "testcase_name": "conv1d_kernel_size2_strides2", + "conv_cls": layers.Conv1D, + "filters": 6, + "kernel_size": 2, + "strides": 2, + "padding": "valid", + "data_format": "channels_last", + "dilation_rate": 1, + "groups": 2, + "input_shape": (None, 5, 4), + "output_shape": (None, 2, 6), + }, + { + "testcase_name": "conv2d_kernel_size3_strides1", + "conv_cls": layers.Conv2D, + "filters": 6, + "kernel_size": 3, + "strides": 1, + "padding": "valid", + "data_format": "channels_last", + "dilation_rate": 1, + "groups": 1, + "input_shape": (None, 5, 5, 4), + "output_shape": (None, 3, 3, 6), + }, + { + "testcase_name": "conv2d_kernel_size2_strides2", + "conv_cls": layers.Conv2D, + "filters": 6, + "kernel_size": 2, + "strides": 2, + "padding": "valid", + "data_format": "channels_last", + "dilation_rate": 1, + "groups": 2, + "input_shape": (None, 5, 5, 4), + "output_shape": (None, 2, 2, 6), + }, + { + "testcase_name": "conv3d_kernel_size3_strides1", + "conv_cls": layers.Conv3D, + "filters": 6, + "kernel_size": 3, + "strides": 1, + "padding": "valid", + "data_format": "channels_last", + "dilation_rate": 1, + "groups": 1, + "input_shape": (None, 5, 5, 5, 4), + "output_shape": (None, 3, 3, 3, 6), + }, + { + "testcase_name": "conv3d_kernel_size2_strides2", + "conv_cls": layers.Conv3D, + "filters": 6, + "kernel_size": 2, + "strides": 2, + "padding": "valid", + "data_format": "channels_last", + "dilation_rate": 1, + "groups": 2, + "input_shape": (None, 5, 5, 5, 4), + "output_shape": (None, 2, 2, 2, 6), + }, + ) + @pytest.mark.requires_trainable_backend + def test_enable_lora( + self, + conv_cls, + filters, + kernel_size, + strides, + padding, + data_format, + dilation_rate, + groups, + input_shape, + output_shape, + ): + if conv_cls not in (layers.Conv1D, layers.Conv2D, layers.Conv3D): + raise TypeError + layer = conv_cls( + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + groups=groups, + ) + layer.build(input_shape) + layer.enable_lora(2) + self.assertLen(layer.trainable_weights, 3) + self.assertLen(layer.non_trainable_weights, 1) + if backend.backend() == "torch": + self.assertLen(layer.torch_params, 4) + # Try eager call + x = np.random.random((64,) + input_shape[1:]) + y = np.random.random((64,) + output_shape[1:]) + _ = layer(x[:2]) + + init_lora_a_kernel_value = layer.lora_kernel_a.numpy() + init_lora_b_kernel_value = layer.lora_kernel_b.numpy() + + # Try calling fit() + model = models.Sequential([layer]) + model.compile(optimizer="sgd", loss="mse") + model.fit(x, y) + + final_lora_a_kernel_value = layer.lora_kernel_a.numpy() + final_lora_b_kernel_value = layer.lora_kernel_b.numpy() + diff_a = np.max( + np.abs(init_lora_a_kernel_value - final_lora_a_kernel_value) + ) + diff_b = np.max( + np.abs(init_lora_b_kernel_value - final_lora_b_kernel_value) + ) + self.assertGreater(diff_a, 0.0) + self.assertGreater(diff_b, 0.0) + + # Try saving and reloading the model + temp_filepath = os.path.join(self.get_temp_dir(), "lora_model.keras") + model.save(temp_filepath) + + new_model = saving.load_model(temp_filepath) + self.assertTrue(new_model.layers[0].lora_enabled) + self.assertAllClose(model.predict(x), new_model.predict(x)) + + # Try saving and reloading the model's weights only + temp_filepath = os.path.join( + self.get_temp_dir(), "lora_model.weights.h5" + ) + model.save_weights(temp_filepath) + + # Load the file into a fresh, non-lora model + new_model = models.Sequential( + [ + conv_cls( + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + groups=groups, + ) + ] + ) + new_model.build(input_shape) + new_model.load_weights(temp_filepath) + self.assertAllClose(model.predict(x), new_model.predict(x)) + + # Try loading a normal checkpoint into a lora model + new_model.save_weights(temp_filepath) + model.load_weights(temp_filepath) + self.assertAllClose(model.predict(x), new_model.predict(x)) + + @pytest.mark.requires_trainable_backend + def test_lora_weight_name(self): + + class MyModel(models.Model): + def __init__(self): + super().__init__(name="mymodel") + self.conv2d = layers.Conv2D(4, 3, name="conv2d") + + def build(self, input_shape): + self.conv2d.build(input_shape) + + def call(self, x): + return self.conv2d(x) + + model = MyModel() + model.build((None, 5, 5, 4)) + model.conv2d.enable_lora(2) + self.assertEqual( + model.conv2d.lora_kernel_a.path, "mymodel/conv2d/lora_kernel_a" + ) + + @pytest.mark.requires_trainable_backend + def test_lora_rank_argument(self): + self.run_layer_test( + layers.Conv2D, + init_kwargs={ + "filters": 5, + "kernel_size": 3, + "activation": "sigmoid", + "data_format": "channels_last", + "kernel_regularizer": "l2", + "lora_rank": 2, + }, + input_shape=(2, 5, 5, 4), + expected_output_shape=(2, 3, 3, 5), + expected_num_trainable_weights=3, + expected_num_non_trainable_weights=1, + expected_num_seed_generators=0, + expected_num_losses=2, # we have 2 regularizers. + supports_masking=False, + ) + class ConvCorrectnessTest(testing.TestCase, parameterized.TestCase): @parameterized.parameters( From c458201d9a0e5b50f3f7b3e638cc20867222b8a7 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 15 Apr 2024 12:04:14 -0700 Subject: [PATCH 003/101] Add path to run keras on dm-tree when optree is not available. --- keras/backend/common/keras_tensor.py | 2 +- keras/backend/jax/core.py | 2 +- keras/backend/jax/rnn.py | 2 +- keras/backend/jax/trainer.py | 2 +- keras/backend/numpy/core.py | 2 +- keras/backend/numpy/numpy.py | 2 +- keras/backend/numpy/rnn.py | 2 +- keras/backend/numpy/trainer.py | 2 +- keras/backend/tensorflow/core.py | 2 +- keras/backend/tensorflow/layer.py | 2 +- keras/backend/tensorflow/numpy.py | 2 +- keras/backend/tensorflow/rnn.py | 2 +- keras/backend/tensorflow/trainer.py | 2 +- keras/backend/torch/core.py | 2 +- keras/backend/torch/nn.py | 2 +- keras/backend/torch/rnn.py | 2 +- keras/backend/torch/trainer.py | 2 +- keras/callbacks/callback_list.py | 2 +- keras/callbacks/tensorboard.py | 2 +- keras/export/export_lib.py | 2 +- keras/export/export_lib_test.py | 2 +- keras/layers/core/identity.py | 2 +- keras/layers/core/lambda_layer.py | 2 +- keras/layers/input_spec.py | 2 +- keras/layers/layer.py | 2 +- keras/layers/preprocessing/feature_space.py | 2 +- keras/layers/preprocessing/tf_data_layer.py | 2 +- keras/layers/rnn/conv_lstm.py | 2 +- keras/layers/rnn/gru.py | 2 +- keras/layers/rnn/lstm.py | 2 +- keras/layers/rnn/rnn.py | 2 +- keras/layers/rnn/stacked_rnn_cells.py | 2 +- keras/legacy/saving/saving_utils.py | 2 +- keras/losses/loss.py | 2 +- keras/models/cloning.py | 2 +- keras/models/cloning_test.py | 2 +- keras/models/functional.py | 2 +- keras/models/sequential.py | 2 +- keras/ops/core.py | 2 +- keras/ops/core_test.py | 2 +- keras/ops/function.py | 2 +- keras/ops/node.py | 2 +- keras/ops/operation.py | 2 +- keras/ops/operation_utils.py | 2 +- keras/ops/symbolic_arguments.py | 2 +- keras/ops/symbolic_arguments_test.py | 2 +- keras/testing/test_case.py | 2 +- keras/trainers/compile_utils.py | 2 +- .../data_adapters/array_data_adapter.py | 2 +- keras/trainers/data_adapters/array_slicing.py | 2 +- .../data_adapters/data_adapter_utils.py | 2 +- .../data_adapters/generator_data_adapter.py | 2 +- .../data_adapters/tf_dataset_adapter.py | 2 +- .../torch_data_loader_adapter.py | 2 +- keras/trainers/trainer.py | 2 +- keras/tree/__init__.py | 10 + keras/tree/dmtree_impl.py | 153 ++++++++++ keras/{utils/tree.py => tree/optree_impl.py} | 277 +----------------- keras/tree/tree_api.py | 275 +++++++++++++++++ keras/{utils => tree}/tree_test.py | 45 +-- keras/utils/jax_layer.py | 2 +- keras/utils/jax_layer_test.py | 2 +- keras/utils/model_visualization.py | 2 +- keras/utils/module_utils.py | 2 + keras/utils/summary_utils.py | 2 +- keras/utils/traceback_utils.py | 2 +- keras/utils/tracking.py | 14 +- 67 files changed, 516 insertions(+), 380 deletions(-) create mode 100644 keras/tree/__init__.py create mode 100644 keras/tree/dmtree_impl.py rename keras/{utils/tree.py => tree/optree_impl.py} (53%) create mode 100644 keras/tree/tree_api.py rename keras/{utils => tree}/tree_test.py (84%) diff --git a/keras/backend/common/keras_tensor.py b/keras/backend/common/keras_tensor.py index c741d5ce8210..af030407f231 100644 --- a/keras/backend/common/keras_tensor.py +++ b/keras/backend/common/keras_tensor.py @@ -1,5 +1,5 @@ +from keras import tree from keras.api_export import keras_export -from keras.utils import tree from keras.utils.naming import auto_name diff --git a/keras/backend/jax/core.py b/keras/backend/jax/core.py index 5d678dafec49..7b37807239f4 100644 --- a/keras/backend/jax/core.py +++ b/keras/backend/jax/core.py @@ -4,13 +4,13 @@ import ml_dtypes import numpy as np +from keras import tree from keras.backend.common import KerasVariable from keras.backend.common import global_state from keras.backend.common import standardize_dtype from keras.backend.common.keras_tensor import KerasTensor from keras.backend.common.stateless_scope import StatelessScope from keras.backend.jax import distribution_lib -from keras.utils import tree SUPPORTS_SPARSE_TENSORS = True diff --git a/keras/backend/jax/rnn.py b/keras/backend/jax/rnn.py index 3e80cd1d20e4..b6676fc8d3a9 100644 --- a/keras/backend/jax/rnn.py +++ b/keras/backend/jax/rnn.py @@ -3,8 +3,8 @@ from jax import lax from jax import numpy as jnp +from keras import tree from keras.backend.common import stateless_scope -from keras.utils import tree def rnn( diff --git a/keras/backend/jax/trainer.py b/keras/backend/jax/trainer.py index 1f867cbcab1b..85308f34175a 100644 --- a/keras/backend/jax/trainer.py +++ b/keras/backend/jax/trainer.py @@ -8,6 +8,7 @@ from keras import backend from keras import callbacks as callbacks_module from keras import optimizers as optimizers_module +from keras import tree from keras.backend import distribution_lib as jax_distribution_lib from keras.distribution import distribution_lib from keras.trainers import trainer as base_trainer @@ -15,7 +16,6 @@ from keras.trainers.data_adapters import data_adapter_utils from keras.trainers.epoch_iterator import EpochIterator from keras.utils import traceback_utils -from keras.utils import tree class JAXTrainer(base_trainer.Trainer): diff --git a/keras/backend/numpy/core.py b/keras/backend/numpy/core.py index f3a30c8d6691..dcd01554f2cb 100644 --- a/keras/backend/numpy/core.py +++ b/keras/backend/numpy/core.py @@ -1,11 +1,11 @@ import numpy as np +from keras import tree from keras.backend.common import KerasVariable from keras.backend.common import standardize_dtype from keras.backend.common.dtypes import result_type from keras.backend.common.keras_tensor import KerasTensor from keras.backend.common.stateless_scope import StatelessScope -from keras.utils import tree SUPPORTS_SPARSE_TENSORS = False diff --git a/keras/backend/numpy/numpy.py b/keras/backend/numpy/numpy.py index c213606d393e..5965ce380218 100644 --- a/keras/backend/numpy/numpy.py +++ b/keras/backend/numpy/numpy.py @@ -1,11 +1,11 @@ import numpy as np +from keras import tree from keras.backend import config from keras.backend import standardize_dtype from keras.backend.common import dtypes from keras.backend.common.backend_utils import standardize_axis_for_numpy from keras.backend.numpy.core import convert_to_tensor -from keras.utils import tree def add(x1, x2): diff --git a/keras/backend/numpy/rnn.py b/keras/backend/numpy/rnn.py index 706667c392ab..6d760f0f6ebe 100644 --- a/keras/backend/numpy/rnn.py +++ b/keras/backend/numpy/rnn.py @@ -1,6 +1,6 @@ import numpy as np -from keras.utils import tree +from keras import tree def rnn( diff --git a/keras/backend/numpy/trainer.py b/keras/backend/numpy/trainer.py index 7b56ee29c34c..700e4b856dce 100644 --- a/keras/backend/numpy/trainer.py +++ b/keras/backend/numpy/trainer.py @@ -2,6 +2,7 @@ from keras import backend from keras import callbacks as callbacks_module +from keras import tree from keras.backend.common import standardize_dtype from keras.backend.common.keras_tensor import KerasTensor from keras.backend.numpy.core import is_tensor @@ -9,7 +10,6 @@ from keras.trainers.data_adapters import data_adapter_utils from keras.trainers.epoch_iterator import EpochIterator from keras.utils import traceback_utils -from keras.utils import tree class NumpyTrainer(base_trainer.Trainer): diff --git a/keras/backend/tensorflow/core.py b/keras/backend/tensorflow/core.py index a3a077c4398a..6e72539d11be 100644 --- a/keras/backend/tensorflow/core.py +++ b/keras/backend/tensorflow/core.py @@ -2,6 +2,7 @@ import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice +from keras import tree from keras.backend.common import KerasVariable from keras.backend.common import global_state from keras.backend.common import standardize_dtype @@ -10,7 +11,6 @@ from keras.backend.common.stateless_scope import StatelessScope from keras.backend.common.stateless_scope import in_stateless_scope from keras.backend.tensorflow.sparse import sparse_to_dense -from keras.utils import tree from keras.utils.naming import auto_name SUPPORTS_SPARSE_TENSORS = True diff --git a/keras/backend/tensorflow/layer.py b/keras/backend/tensorflow/layer.py index 9b586bc9ec95..7c871ba67cf8 100644 --- a/keras/backend/tensorflow/layer.py +++ b/keras/backend/tensorflow/layer.py @@ -1,9 +1,9 @@ import tensorflow as tf +from keras import tree from keras.backend.tensorflow.trackable import KerasAutoTrackable from keras.utils import tf_utils from keras.utils import tracking -from keras.utils import tree class TFLayer(KerasAutoTrackable): diff --git a/keras/backend/tensorflow/numpy.py b/keras/backend/tensorflow/numpy.py index e479897dd90a..1b6b754c9d71 100644 --- a/keras/backend/tensorflow/numpy.py +++ b/keras/backend/tensorflow/numpy.py @@ -10,6 +10,7 @@ from tensorflow.experimental import numpy as tfnp from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops +from keras import tree from keras.backend import config from keras.backend import standardize_dtype from keras.backend.common import dtypes @@ -18,7 +19,6 @@ from keras.backend.tensorflow import sparse from keras.backend.tensorflow.core import cast from keras.backend.tensorflow.core import convert_to_tensor -from keras.utils import tree @sparse.elementwise_binary_union(tf.sparse.add) diff --git a/keras/backend/tensorflow/rnn.py b/keras/backend/tensorflow/rnn.py index dd374a9ea116..c9de498a6cf6 100644 --- a/keras/backend/tensorflow/rnn.py +++ b/keras/backend/tensorflow/rnn.py @@ -1,6 +1,6 @@ import tensorflow as tf -from keras.utils import tree +from keras import tree def rnn( diff --git a/keras/backend/tensorflow/trainer.py b/keras/backend/tensorflow/trainer.py index fb90b1db4a7e..e1f0afaa1cf3 100644 --- a/keras/backend/tensorflow/trainer.py +++ b/keras/backend/tensorflow/trainer.py @@ -8,12 +8,12 @@ from keras import callbacks as callbacks_module from keras import metrics as metrics_module from keras import optimizers as optimizers_module +from keras import tree from keras.trainers import trainer as base_trainer from keras.trainers.data_adapters import array_slicing from keras.trainers.data_adapters import data_adapter_utils from keras.trainers.epoch_iterator import EpochIterator from keras.utils import traceback_utils -from keras.utils import tree class TensorFlowTrainer(base_trainer.Trainer): diff --git a/keras/backend/torch/core.py b/keras/backend/torch/core.py index 76dcc95db6f7..bb9bd98ff401 100644 --- a/keras/backend/torch/core.py +++ b/keras/backend/torch/core.py @@ -5,6 +5,7 @@ import numpy as np import torch +from keras import tree from keras.backend.common import KerasVariable from keras.backend.common import global_state from keras.backend.common import standardize_dtype @@ -12,7 +13,6 @@ from keras.backend.common.keras_tensor import KerasTensor from keras.backend.common.stateless_scope import StatelessScope from keras.backend.config import floatx -from keras.utils import tree SUPPORTS_SPARSE_TENSORS = False diff --git a/keras/backend/torch/nn.py b/keras/backend/torch/nn.py index af7aba02ddd0..f2105b5d5e12 100644 --- a/keras/backend/torch/nn.py +++ b/keras/backend/torch/nn.py @@ -1,6 +1,7 @@ import torch import torch.nn.functional as tnn +from keras import tree from keras.backend import standardize_data_format from keras.backend import standardize_dtype from keras.backend.common.backend_utils import ( @@ -13,7 +14,6 @@ from keras.backend.torch.numpy import expand_dims from keras.backend.torch.numpy import maximum from keras.backend.torch.numpy import where -from keras.utils import tree from keras.utils.argument_validation import standardize_tuple diff --git a/keras/backend/torch/rnn.py b/keras/backend/torch/rnn.py index e508fad5891e..163f2bc26227 100644 --- a/keras/backend/torch/rnn.py +++ b/keras/backend/torch/rnn.py @@ -1,7 +1,7 @@ import torch +from keras import tree from keras.backend.torch.core import convert_to_tensor -from keras.utils import tree def rnn( diff --git a/keras/backend/torch/trainer.py b/keras/backend/torch/trainer.py index 0712421dd521..f4cca60f84a4 100644 --- a/keras/backend/torch/trainer.py +++ b/keras/backend/torch/trainer.py @@ -7,12 +7,12 @@ from keras import backend from keras import callbacks as callbacks_module from keras import optimizers as optimizers_module +from keras import tree from keras.trainers import trainer as base_trainer from keras.trainers.data_adapters import array_slicing from keras.trainers.data_adapters import data_adapter_utils from keras.trainers.epoch_iterator import EpochIterator from keras.utils import traceback_utils -from keras.utils import tree class TorchTrainer(base_trainer.Trainer): diff --git a/keras/callbacks/callback_list.py b/keras/callbacks/callback_list.py index 363dce2bfabe..55d2567dbf35 100644 --- a/keras/callbacks/callback_list.py +++ b/keras/callbacks/callback_list.py @@ -1,8 +1,8 @@ +from keras import tree from keras.api_export import keras_export from keras.callbacks.callback import Callback from keras.callbacks.history import History from keras.callbacks.progbar_logger import ProgbarLogger -from keras.utils import tree @keras_export("keras.callbacks.CallbackList") diff --git a/keras/callbacks/tensorboard.py b/keras/callbacks/tensorboard.py index 03354d76badf..0a41c2b19e04 100644 --- a/keras/callbacks/tensorboard.py +++ b/keras/callbacks/tensorboard.py @@ -6,12 +6,12 @@ from keras import backend from keras import ops +from keras import tree from keras.api_export import keras_export from keras.callbacks.callback import Callback from keras.layers import Embedding from keras.optimizers import Optimizer from keras.utils import file_utils -from keras.utils import tree @keras_export("keras.callbacks.TensorBoard") diff --git a/keras/export/export_lib.py b/keras/export/export_lib.py index b33a443f74d7..a06901a3cae0 100644 --- a/keras/export/export_lib.py +++ b/keras/export/export_lib.py @@ -7,13 +7,13 @@ from absl import logging from keras import backend +from keras import tree from keras.api_export import keras_export from keras.backend.common.stateless_scope import StatelessScope from keras.layers import Layer from keras.models import Functional from keras.models import Sequential from keras.utils import io_utils -from keras.utils import tree from keras.utils.module_utils import tensorflow as tf diff --git a/keras/export/export_lib_test.py b/keras/export/export_lib_test.py index 6e623a4526eb..e53a84ce9a4b 100644 --- a/keras/export/export_lib_test.py +++ b/keras/export/export_lib_test.py @@ -13,11 +13,11 @@ from keras import ops from keras import random from keras import testing +from keras import tree from keras import utils from keras.export import export_lib from keras.saving import saving_lib from keras.testing.test_utils import named_product -from keras.utils import tree class CustomModel(models.Model): diff --git a/keras/layers/core/identity.py b/keras/layers/core/identity.py index 486441728351..b1e1cd50912f 100644 --- a/keras/layers/core/identity.py +++ b/keras/layers/core/identity.py @@ -1,7 +1,7 @@ +from keras import tree from keras.api_export import keras_export from keras.backend import KerasTensor from keras.layers.layer import Layer -from keras.utils import tree @keras_export("keras.layers.Identity") diff --git a/keras/layers/core/lambda_layer.py b/keras/layers/core/lambda_layer.py index 51f7ed5e7528..6ffb16ca7782 100644 --- a/keras/layers/core/lambda_layer.py +++ b/keras/layers/core/lambda_layer.py @@ -2,11 +2,11 @@ import types from keras import backend +from keras import tree from keras.api_export import keras_export from keras.layers.layer import Layer from keras.saving import serialization_lib from keras.utils import python_utils -from keras.utils import tree @keras_export("keras.layers.Lambda") diff --git a/keras/layers/input_spec.py b/keras/layers/input_spec.py index f6f53fab5330..6f47bd36aa80 100644 --- a/keras/layers/input_spec.py +++ b/keras/layers/input_spec.py @@ -1,6 +1,6 @@ from keras import backend +from keras import tree from keras.api_export import keras_export -from keras.utils import tree @keras_export(["keras.InputSpec", "keras.layers.InputSpec"]) diff --git a/keras/layers/layer.py b/keras/layers/layer.py index aaa7b1e8ef6f..a19a29f7f229 100644 --- a/keras/layers/layer.py +++ b/keras/layers/layer.py @@ -26,6 +26,7 @@ from keras import dtype_policies from keras import initializers from keras import regularizers +from keras import tree from keras import utils from keras.api_export import keras_export from keras.backend import KerasTensor @@ -39,7 +40,6 @@ from keras.utils import summary_utils from keras.utils import traceback_utils from keras.utils import tracking -from keras.utils import tree if backend.backend() == "tensorflow": from keras.backend.tensorflow.layer import TFLayer as BackendLayer diff --git a/keras/layers/preprocessing/feature_space.py b/keras/layers/preprocessing/feature_space.py index 7a0508d64590..c2b0a9b15581 100644 --- a/keras/layers/preprocessing/feature_space.py +++ b/keras/layers/preprocessing/feature_space.py @@ -1,11 +1,11 @@ from keras import backend from keras import layers +from keras import tree from keras.api_export import keras_export from keras.layers.layer import Layer from keras.saving import saving_lib from keras.saving import serialization_lib from keras.utils import backend_utils -from keras.utils import tree from keras.utils.module_utils import tensorflow as tf from keras.utils.naming import auto_name diff --git a/keras/layers/preprocessing/tf_data_layer.py b/keras/layers/preprocessing/tf_data_layer.py index 7184dd9d70b9..74cf6515ce3c 100644 --- a/keras/layers/preprocessing/tf_data_layer.py +++ b/keras/layers/preprocessing/tf_data_layer.py @@ -1,9 +1,9 @@ import keras.backend +from keras import tree from keras.layers.layer import Layer from keras.random.seed_generator import SeedGenerator from keras.utils import backend_utils from keras.utils import tracking -from keras.utils import tree class TFDataLayer(Layer): diff --git a/keras/layers/rnn/conv_lstm.py b/keras/layers/rnn/conv_lstm.py index 6333de0db755..e8e3ee3ffebe 100644 --- a/keras/layers/rnn/conv_lstm.py +++ b/keras/layers/rnn/conv_lstm.py @@ -4,13 +4,13 @@ from keras import initializers from keras import ops from keras import regularizers +from keras import tree from keras.layers.input_spec import InputSpec from keras.layers.layer import Layer from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell from keras.layers.rnn.rnn import RNN from keras.ops import operation_utils from keras.utils import argument_validation -from keras.utils import tree class ConvLSTMCell(Layer, DropoutRNNCell): diff --git a/keras/layers/rnn/gru.py b/keras/layers/rnn/gru.py index a77ac8a6b08d..d1b03e256cb0 100644 --- a/keras/layers/rnn/gru.py +++ b/keras/layers/rnn/gru.py @@ -4,12 +4,12 @@ from keras import initializers from keras import ops from keras import regularizers +from keras import tree from keras.api_export import keras_export from keras.layers.input_spec import InputSpec from keras.layers.layer import Layer from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell from keras.layers.rnn.rnn import RNN -from keras.utils import tree @keras_export("keras.layers.GRUCell") diff --git a/keras/layers/rnn/lstm.py b/keras/layers/rnn/lstm.py index db5c13891bd6..13c32278418f 100644 --- a/keras/layers/rnn/lstm.py +++ b/keras/layers/rnn/lstm.py @@ -4,12 +4,12 @@ from keras import initializers from keras import ops from keras import regularizers +from keras import tree from keras.api_export import keras_export from keras.layers.input_spec import InputSpec from keras.layers.layer import Layer from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell from keras.layers.rnn.rnn import RNN -from keras.utils import tree @keras_export("keras.layers.LSTMCell") diff --git a/keras/layers/rnn/rnn.py b/keras/layers/rnn/rnn.py index 8ab894101766..6880ab26488a 100644 --- a/keras/layers/rnn/rnn.py +++ b/keras/layers/rnn/rnn.py @@ -1,12 +1,12 @@ from keras import backend from keras import ops +from keras import tree from keras.api_export import keras_export from keras.layers.layer import Layer from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell from keras.layers.rnn.stacked_rnn_cells import StackedRNNCells from keras.saving import serialization_lib from keras.utils import tracking -from keras.utils import tree @keras_export("keras.layers.RNN") diff --git a/keras/layers/rnn/stacked_rnn_cells.py b/keras/layers/rnn/stacked_rnn_cells.py index 921202b48fa5..20502d4655c7 100644 --- a/keras/layers/rnn/stacked_rnn_cells.py +++ b/keras/layers/rnn/stacked_rnn_cells.py @@ -1,8 +1,8 @@ from keras import ops +from keras import tree from keras.api_export import keras_export from keras.layers.layer import Layer from keras.saving import serialization_lib -from keras.utils import tree @keras_export("keras.layers.StackedRNNCells") diff --git a/keras/legacy/saving/saving_utils.py b/keras/legacy/saving/saving_utils.py index c06ea8424d07..69e3553bdf15 100644 --- a/keras/legacy/saving/saving_utils.py +++ b/keras/legacy/saving/saving_utils.py @@ -9,9 +9,9 @@ from keras import metrics as metrics_module from keras import models from keras import optimizers +from keras import tree from keras.legacy.saving import serialization from keras.saving import object_registration -from keras.utils import tree MODULE_OBJECTS = threading.local() diff --git a/keras/losses/loss.py b/keras/losses/loss.py index b6a601f60cd9..ad432e62dc37 100644 --- a/keras/losses/loss.py +++ b/keras/losses/loss.py @@ -1,7 +1,7 @@ from keras import backend from keras import ops +from keras import tree from keras.api_export import keras_export -from keras.utils import tree from keras.utils.naming import auto_name diff --git a/keras/models/cloning.py b/keras/models/cloning.py index fbc0ea09097e..e17655dc582e 100644 --- a/keras/models/cloning.py +++ b/keras/models/cloning.py @@ -1,4 +1,5 @@ from keras import backend +from keras import tree from keras import utils from keras.api_export import keras_export from keras.layers import Input @@ -7,7 +8,6 @@ from keras.models.functional import functional_like_constructor from keras.models.sequential import Sequential from keras.saving import serialization_lib -from keras.utils import tree @keras_export("keras.models.clone_model") diff --git a/keras/models/cloning_test.py b/keras/models/cloning_test.py index 14ad98715971..340856f33a89 100644 --- a/keras/models/cloning_test.py +++ b/keras/models/cloning_test.py @@ -5,8 +5,8 @@ from keras import layers from keras import models from keras import testing +from keras import tree from keras.models.cloning import clone_model -from keras.utils import tree def get_mlp_functional_model(shared_layers=False): diff --git a/keras/models/functional.py b/keras/models/functional.py index 68b47b3d7a99..de6c8d82a5f1 100644 --- a/keras/models/functional.py +++ b/keras/models/functional.py @@ -5,6 +5,7 @@ from keras import backend from keras import ops +from keras import tree from keras.backend.common import global_state from keras.layers.core.input_layer import Input from keras.layers.core.input_layer import InputLayer @@ -20,7 +21,6 @@ from keras.ops.node import Node from keras.saving import serialization_lib from keras.utils import tracking -from keras.utils import tree class Functional(Function, Model): diff --git a/keras/models/sequential.py b/keras/models/sequential.py index a86ea79b3b28..e094a5df5eee 100644 --- a/keras/models/sequential.py +++ b/keras/models/sequential.py @@ -2,6 +2,7 @@ import inspect import typing +from keras import tree from keras.api_export import keras_export from keras.backend.common import global_state from keras.layers.core.input_layer import InputLayer @@ -11,7 +12,6 @@ from keras.models.functional import Functional from keras.models.model import Model from keras.saving import serialization_lib -from keras.utils import tree @keras_export(["keras.Sequential", "keras.models.Sequential"]) diff --git a/keras/ops/core.py b/keras/ops/core.py index 04f0a00cc991..c542f79f8847 100644 --- a/keras/ops/core.py +++ b/keras/ops/core.py @@ -17,12 +17,12 @@ import numpy as np from keras import backend +from keras import tree from keras.api_export import keras_export from keras.backend import KerasTensor from keras.backend import any_symbolic_tensors from keras.ops.operation import Operation from keras.utils import traceback_utils -from keras.utils import tree class Scatter(Operation): diff --git a/keras/ops/core_test.py b/keras/ops/core_test.py index 1c09bd2f385d..fe1284d1252b 100644 --- a/keras/ops/core_test.py +++ b/keras/ops/core_test.py @@ -12,10 +12,10 @@ from keras import ops from keras import optimizers from keras import testing +from keras import tree from keras.backend.common import dtypes from keras.backend.common.keras_tensor import KerasTensor from keras.ops import core -from keras.utils import tree class CoreOpsStaticShapeTest(testing.TestCase): diff --git a/keras/ops/function.py b/keras/ops/function.py index 48a21a8bd3b8..7ae8244c77c2 100644 --- a/keras/ops/function.py +++ b/keras/ops/function.py @@ -1,10 +1,10 @@ import collections +from keras import tree from keras.api_export import keras_export from keras.backend import KerasTensor from keras.backend.config import backend from keras.ops.operation import Operation -from keras.utils import tree @keras_export("keras.Function") diff --git a/keras/ops/node.py b/keras/ops/node.py index e3c2fa219ca8..dc775920c0f9 100644 --- a/keras/ops/node.py +++ b/keras/ops/node.py @@ -1,8 +1,8 @@ import collections +from keras import tree from keras.backend import KerasTensor from keras.ops.symbolic_arguments import SymbolicArguments -from keras.utils import tree class Node: diff --git a/keras/ops/operation.py b/keras/ops/operation.py index 900833f1a2d9..547e8f3f387d 100644 --- a/keras/ops/operation.py +++ b/keras/ops/operation.py @@ -3,12 +3,12 @@ from keras import backend from keras import dtype_policies +from keras import tree from keras.api_export import keras_export from keras.backend.common.keras_tensor import any_symbolic_tensors from keras.ops.node import Node from keras.utils import python_utils from keras.utils import traceback_utils -from keras.utils import tree from keras.utils.naming import auto_name diff --git a/keras/ops/operation_utils.py b/keras/ops/operation_utils.py index b92eb48a0ae9..e8d335bb286a 100644 --- a/keras/ops/operation_utils.py +++ b/keras/ops/operation_utils.py @@ -2,10 +2,10 @@ import numpy as np +from keras import tree from keras.api_export import keras_export from keras.backend.common.backend_utils import canonicalize_axis from keras.backend.common.backend_utils import to_tuple_or_list -from keras.utils import tree def broadcast_shapes(shape1, shape2): diff --git a/keras/ops/symbolic_arguments.py b/keras/ops/symbolic_arguments.py index 49644a0c81bd..33ec86998672 100644 --- a/keras/ops/symbolic_arguments.py +++ b/keras/ops/symbolic_arguments.py @@ -1,5 +1,5 @@ +from keras import tree from keras.backend import KerasTensor -from keras.utils import tree class SymbolicArguments: diff --git a/keras/ops/symbolic_arguments_test.py b/keras/ops/symbolic_arguments_test.py index 25f615d28d3c..3fda859e6c58 100644 --- a/keras/ops/symbolic_arguments_test.py +++ b/keras/ops/symbolic_arguments_test.py @@ -1,7 +1,7 @@ from keras import testing +from keras import tree from keras.backend import KerasTensor from keras.ops.symbolic_arguments import SymbolicArguments -from keras.utils import tree class SymbolicArgumentsTest(testing.TestCase): diff --git a/keras/testing/test_case.py b/keras/testing/test_case.py index a8dd206ce739..74656e6f0efb 100644 --- a/keras/testing/test_case.py +++ b/keras/testing/test_case.py @@ -8,6 +8,7 @@ from keras import backend from keras import distribution from keras import ops +from keras import tree from keras import utils from keras.backend.common import is_float_dtype from keras.backend.common import standardize_dtype @@ -15,7 +16,6 @@ from keras.backend.common.keras_tensor import KerasTensor from keras.models import Model from keras.utils import traceback_utils -from keras.utils import tree class TestCase(unittest.TestCase): diff --git a/keras/trainers/compile_utils.py b/keras/trainers/compile_utils.py index 178790bc94da..2049d4b8546b 100644 --- a/keras/trainers/compile_utils.py +++ b/keras/trainers/compile_utils.py @@ -2,7 +2,7 @@ from keras import losses as losses_module from keras import metrics as metrics_module from keras import ops -from keras.utils import tree +from keras import tree from keras.utils.naming import get_object_name diff --git a/keras/trainers/data_adapters/array_data_adapter.py b/keras/trainers/data_adapters/array_data_adapter.py index 262f1665d784..26cc77e609e2 100644 --- a/keras/trainers/data_adapters/array_data_adapter.py +++ b/keras/trainers/data_adapters/array_data_adapter.py @@ -3,10 +3,10 @@ import numpy as np +from keras import tree from keras.trainers.data_adapters import array_slicing from keras.trainers.data_adapters import data_adapter_utils from keras.trainers.data_adapters.data_adapter import DataAdapter -from keras.utils import tree class ArrayDataAdapter(DataAdapter): diff --git a/keras/trainers/data_adapters/array_slicing.py b/keras/trainers/data_adapters/array_slicing.py index 7735a760a74a..eab94fa3ed5b 100644 --- a/keras/trainers/data_adapters/array_slicing.py +++ b/keras/trainers/data_adapters/array_slicing.py @@ -4,8 +4,8 @@ import numpy as np from keras import backend +from keras import tree from keras.trainers.data_adapters import data_adapter_utils -from keras.utils import tree try: import pandas diff --git a/keras/trainers/data_adapters/data_adapter_utils.py b/keras/trainers/data_adapters/data_adapter_utils.py index daf344477cad..7768c9295e24 100644 --- a/keras/trainers/data_adapters/data_adapter_utils.py +++ b/keras/trainers/data_adapters/data_adapter_utils.py @@ -1,8 +1,8 @@ import numpy as np from keras import backend +from keras import tree from keras.api_export import keras_export -from keras.utils import tree NUM_BATCHES_FOR_TENSOR_SPEC = 2 diff --git a/keras/trainers/data_adapters/generator_data_adapter.py b/keras/trainers/data_adapters/generator_data_adapter.py index aa44f0350d2c..6e26c9585ccd 100644 --- a/keras/trainers/data_adapters/generator_data_adapter.py +++ b/keras/trainers/data_adapters/generator_data_adapter.py @@ -1,8 +1,8 @@ import itertools +from keras import tree from keras.trainers.data_adapters import data_adapter_utils from keras.trainers.data_adapters.data_adapter import DataAdapter -from keras.utils import tree class GeneratorDataAdapter(DataAdapter): diff --git a/keras/trainers/data_adapters/tf_dataset_adapter.py b/keras/trainers/data_adapters/tf_dataset_adapter.py index 8cc17163dfa6..6f2362dc9b4d 100644 --- a/keras/trainers/data_adapters/tf_dataset_adapter.py +++ b/keras/trainers/data_adapters/tf_dataset_adapter.py @@ -1,6 +1,6 @@ +from keras import tree from keras.trainers.data_adapters import data_adapter_utils from keras.trainers.data_adapters.data_adapter import DataAdapter -from keras.utils import tree class TFDatasetAdapter(DataAdapter): diff --git a/keras/trainers/data_adapters/torch_data_loader_adapter.py b/keras/trainers/data_adapters/torch_data_loader_adapter.py index 0c863b7ea69e..78df412c68f3 100644 --- a/keras/trainers/data_adapters/torch_data_loader_adapter.py +++ b/keras/trainers/data_adapters/torch_data_loader_adapter.py @@ -2,9 +2,9 @@ import numpy as np +from keras import tree from keras.trainers.data_adapters import data_adapter_utils from keras.trainers.data_adapters.data_adapter import DataAdapter -from keras.utils import tree class TorchDataLoaderAdapter(DataAdapter): diff --git a/keras/trainers/trainer.py b/keras/trainers/trainer.py index eaadbe42b8ff..32d818e0b641 100644 --- a/keras/trainers/trainer.py +++ b/keras/trainers/trainer.py @@ -5,6 +5,7 @@ from keras import metrics as metrics_module from keras import ops from keras import optimizers +from keras import tree from keras.optimizers.loss_scale_optimizer import LossScaleOptimizer from keras.saving import serialization_lib from keras.trainers.compile_utils import CompileLoss @@ -12,7 +13,6 @@ from keras.trainers.data_adapters import data_adapter_utils from keras.utils import traceback_utils from keras.utils import tracking -from keras.utils import tree class Trainer: diff --git a/keras/tree/__init__.py b/keras/tree/__init__.py new file mode 100644 index 000000000000..fc6a783879de --- /dev/null +++ b/keras/tree/__init__.py @@ -0,0 +1,10 @@ +from keras.tree.tree_api import assert_same_structure +from keras.tree.tree_api import flatten +from keras.tree.tree_api import is_nested +from keras.tree.tree_api import lists_to_tuples +from keras.tree.tree_api import map_shape_structure +from keras.tree.tree_api import map_structure +from keras.tree.tree_api import map_structure_up_to +from keras.tree.tree_api import pack_sequence_as +from keras.tree.tree_api import register_tree_node_class +from keras.tree.tree_api import traverse diff --git a/keras/tree/dmtree_impl.py b/keras/tree/dmtree_impl.py new file mode 100644 index 000000000000..916fb35b257b --- /dev/null +++ b/keras/tree/dmtree_impl.py @@ -0,0 +1,153 @@ +from keras.utils.module_utils import dmtree + + +def register_tree_node_class(cls): + return cls + + +def is_nested(structure): + dmtree.is_nested(structure) + + +def traverse(func, structure, top_down=True): + return dmtree.traverse(func, structure, top_down=top_down) + + +def flatten(structure): + return dmtree.flatten(structure) + + +def map_structure(func, *structures): + return dmtree.map_structure(func, *structures) + + +def map_structure_up_to(shallow_structure, func, *structures): + return dmtree.map_structure_up_to(shallow_structure, func, *structures) + + +def assert_same_structure(a, b, check_types=True): + return dmtree.assert_same_structure(a, b, check_types=check_types) + + +def pack_sequence_as(structure, flat_sequence, sequence_fn=None): + is_nested_fn = dmtree.is_nested + sequence_fn = sequence_fn or dmtree._sequence_like + + def truncate(value, length): + value_str = str(value) + return value_str[:length] + (value_str[length:] and "...") + + if not is_nested_fn(flat_sequence): + raise TypeError( + "Attempted to pack value:\n {}\ninto a structure, but found " + "incompatible type `{}` instead.".format( + truncate(flat_sequence, 100), type(flat_sequence) + ) + ) + + if not is_nested_fn(structure): + if len(flat_sequence) != 1: + raise ValueError( + "The target structure is of type `{}`\n {}\nHowever the input " + "is a sequence ({}) of length {}.\n {}\nnest cannot " + "guarantee that it is safe to map one to the other.".format( + type(structure), + truncate(structure, 100), + type(flat_sequence), + len(flat_sequence), + truncate(flat_sequence, 100), + ) + ) + return flat_sequence[0] + + try: + final_index, packed = packed_nest_with_indices( + structure, flat_sequence, 0, is_nested_fn, sequence_fn + ) + if final_index < len(flat_sequence): + raise IndexError + except IndexError: + flat_structure = dmtree.flatten(structure) + if len(flat_structure) != len(flat_sequence): + # pylint: disable=raise-missing-from + raise ValueError( + "Could not pack sequence. " + f"Structure had {len(flat_structure)} atoms, but " + f"flat_sequence had {len(flat_sequence)} items. " + f"Structure: {structure}, flat_sequence: {flat_sequence}." + ) + return sequence_fn(structure, packed) + + +def packed_nest_with_indices( + structure, flat, index, is_nested_fn, sequence_fn=None +): + """Helper function for pack_sequence_as. + + Args: + structure: structure to mimic. + flat: Flattened values to output substructure for. + index: Index at which to start reading from flat. + is_nested_fn: Function used to test if a value should + be treated as a nested structure. + sequence_fn: Function used to generate a new structure instance. + + Returns: + The tuple (new_index, child), where: + * new_index - the updated index into `flat` + having processed `structure`. + * packed - the subset of `flat` corresponding to `structure`, + having started at `index`, and packed into the same nested + format. + """ + packed = [] + sequence_fn = sequence_fn or dmtree._sequence_like + for s in yield_value(structure): + if is_nested_fn(s): + new_index, child = packed_nest_with_indices( + s, flat, index, is_nested_fn, sequence_fn + ) + packed.append(sequence_fn(s, child)) + index = new_index + else: + packed.append(flat[index]) + index += 1 + return index, packed + + +def yield_value(iterable): + for _, v in dmtree._yield_sorted_items(iterable): + yield v + + +def lists_to_tuples(structure): + def sequence_fn(instance, args): + if isinstance(instance, list): + return tuple(args) + return dmtree._sequence_like(instance, args) + + return pack_sequence_as( + structure, + dmtree.flatten(structure), + sequence_fn=sequence_fn, + ) + + +def is_shape_tuple(x): + if isinstance(x, (list, tuple)): + if all(isinstance(e, (int, type(None))) for e in x): + return True + return False + + +def map_shape_structure(func, structure): + if is_shape_tuple(structure): + return func(tuple(structure)) + if isinstance(structure, list): + return [map_shape_structure(func, e) for e in structure] + if isinstance(structure, tuple): + return tuple(map_shape_structure(func, e) for e in structure) + if isinstance(structure, dict): + return {k: map_shape_structure(func, v) for k, v in structure.items()} + else: + raise ValueError(f"Cannot map function to unknown object {structure}") diff --git a/keras/utils/tree.py b/keras/tree/optree_impl.py similarity index 53% rename from keras/utils/tree.py rename to keras/tree/optree_impl.py index 36f22c878e90..c823381b56f3 100644 --- a/keras/utils/tree.py +++ b/keras/tree/optree_impl.py @@ -3,10 +3,15 @@ import types import optree +import optree.utils -from keras.api_export import keras_export from keras.backend.config import backend + +def register_tree_node_class(cls): + return optree.register_pytree_node_class(cls, namespace="keras") + + # Register backend-specific node classes if backend() == "tensorflow": from tensorflow.python.trackable.data_structures import ListWrapper @@ -19,77 +24,13 @@ ) -@keras_export("keras.tree.is_nested") def is_nested(structure): - """Checks if a given structure is nested. - - Examples: - - >>> keras.tree.is_nested(42) - False - >>> keras.tree.is_nested({"foo": 42}) - True - - Args: - structure: A structure to check. - - Returns: - `True` if a given structure is nested, i.e. is a sequence, a mapping, - or a namedtuple, and `False` otherwise. - """ return not optree.tree_is_leaf( structure, none_is_leaf=True, namespace="keras" ) -@keras_export("keras.tree.traverse") def traverse(func, structure, top_down=True): - """Traverses the given nested structure, applying the given function. - - The traversal is depth-first. If `top_down` is True (default), parents - are returned before their children (giving the option to avoid traversing - into a sub-tree). - - Examples: - - >>> v = [] - >>> keras.tree.traverse(v.append, [(1, 2), [3], {"a": 4}], top_down=True) - [(1, 2), [3], {'a': 4}] - >>> v - [[(1, 2), [3], {'a': 4}], (1, 2), 1, 2, [3], 3, {'a': 4}, 4] - - >>> v = [] - >>> keras.tree.traverse(v.append, [(1, 2), [3], {"a": 4}], top_down=False) - [(1, 2), [3], {'a': 4}] - >>> v - [1, 2, (1, 2), 3, [3], 4, {'a': 4}, [(1, 2), [3], {'a': 4}]] - - Args: - func: The function to be applied to each sub-nest of the structure. - - When traversing top-down: - If `func(subtree) is None` the traversal continues into the - sub-tree. - If `func(subtree) is not None` the traversal does not continue - into the sub-tree. The sub-tree will be replaced by `func(subtree)` - in the returned structure (to replace the sub-tree with `None`, use - the special value `_MAP_TO_NONE`). - - When traversing bottom-up: - If `func(subtree) is None` the traversed sub-tree is returned - unaltered. - If `func(subtree) is not None` the sub-tree will be replaced by - `func(subtree)` in the returned structure (to replace the sub-tree - with None, use the special value `_MAP_TO_NONE`). - - structure: The structure to traverse. - top_down: If True, parent structures will be visited before their - children. - - Returns: - The structured output from the traversal. - """ - # From https://github.com/google/jax/pull/19695 def traverse_children(): children, treedef = optree.tree_flatten( @@ -118,36 +59,7 @@ def traverse_children(): return None if ret is _MAP_TO_NONE else ret -@keras_export("keras.tree.flatten") def flatten(structure): - """Flattens a possibly nested structure into a list. - - In the case of dict instances, the sequence consists of the values, - sorted by key to ensure deterministic behavior. This is true also for - `collections.OrderedDict` instances: their sequence order is - considered. The same convention is followed in `unflatten_as`. - This correctly unflattens dicts and `OrderedDict` after they have been - flattened, or vice-versa. - - Dictionaries with non-sortable keys cannot be flattened. - - Examples: - - >>> keras.tree.flatten([[1, 2, 3], [4, [5], [[6]]]]) - [1, 2, 3, 4, 5, 6] - >>> keras.tree.flatten(None) - [None] - >>> keras.tree.flatten(1) - [1] - >>> keras.tree.flatten({100: 'world!', 6: 'Hello'}) - ['Hello', 'world!'] - - Args: - structure: An arbitrarily nested structure. - - Returns: - A list, the flattened version of the input `structure`. - """ # optree.tree_flatten returns a pair (leaves, treespec) where the first # element is a list of leaf values and the second element is a treespec # representing the structure of the pytree. @@ -157,79 +69,7 @@ def flatten(structure): return leaves -@keras_export("keras.tree.unflatten_as") -def unflatten_as(structure, flat_sequence): - """Unflattens a sequence into a given structure. - - If `structure` is a scalar, `flat_sequence` must be a single-element list; - in this case the return value is ``flat_sequence[0]``. - - If `structure` is or contains a dict instance, the keys will be sorted to - pack the flat sequence in deterministic order. This is true also for - `collections.OrderedDict` instances: their sequence order is considered. - The same convention is followed in `flatten`. This correctly unflattens - dicts and `OrderedDict` after they have been flattened, or vice-versa. - - Dictionaries with non-sortable keys cannot be unflattened. - - Examples: - - >>> keras.tree.unflatten_as([[1, 2], [[3], [4]]], [5, 6, 7, 8]) - [[5, 6], [[7], [8]]] - >>> keras.tree.unflatten_as(None, [1]) - 1 - >>> keras.tree.unflatten_as({1: None, 2: None}, ['Hello', 'world!']) - {1: 'Hello', 2: 'world!'} - - Args: - structure: Arbitrarily nested structure. - flat_sequence: Sequence to unflatten. - - Returns: - `flat_sequence` unflattened into `structure`. - """ - if not is_nested(flat_sequence): - raise TypeError( - f"flat_sequence must be a sequence not a {type(flat_sequence)}:\n" - f"{flat_sequence}" - ) - if not is_nested(structure): - if len(flat_sequence) != 1: - raise ValueError( - "Structure is a scalar but " - f"len(flat_sequence) == {len(flat_sequence)} > 1" - ) - return flat_sequence[0] - structure_spec = optree.tree_structure( - structure, none_is_leaf=True, namespace="keras" - ) - return structure_spec.unflatten(flat_sequence) - - -@keras_export("keras.tree.map_structure") def map_structure(func, *structures): - """Maps `func` through given structures. - - Examples: - - >>> structure = [[1], [2], [3]] - >>> keras.tree.map_structure(lambda v: v**2, structure) - [[1], [4], [9]] - >>> keras.tree.map_structure(lambda x, y: x * y, structure, structure) - [[1], [4], [9]] - - >>> Foo = collections.namedtuple('Foo', ['a', 'b']) - >>> structure = Foo(a=1, b=2) - >>> keras.tree.map_structure(lambda v: v * 2, structure) - Foo(a=2, b=4) - - Args: - func: A callable that accepts as many arguments as there are structures. - *structures: Arbitrarily nested structures of the same layout. - - Returns: - A new structure with the same layout as the given ones. - """ if not callable(func): raise TypeError(f"`func` must be callable. Received: func={func}") if not structures: @@ -241,32 +81,7 @@ def map_structure(func, *structures): ) -@keras_export("keras.tree.map_structure_up_to") def map_structure_up_to(shallow_structure, func, *structures): - """Maps `func` through given structures up to `shallow_structure`. - - This is a variant of `map_structure` which only maps the given structures - up to `shallow_structure`. All further nested components are retained as-is. - - Examples: - - >>> shallow_structure = [None, None] - >>> structure = [[1, 1], [2, 2]] - >>> keras.tree.map_structure_up_to(shallow_structure, len, structure) - [2, 2] - - >>> shallow_structure = [None, [None, None]] - >>> keras.tree.map_structure_up_to(shallow_structure, str, structure) - ['[1, 1]', ['2', '2']] - - Args: - shallow_structure: A structure with layout common to all `structures`. - func: A callable that accepts as many arguments as there are structures. - *structures: Arbitrarily nested structures of the same layout. - - Returns: - A new structure with the same layout as `shallow_structure`. - """ return _map_structure_with_path_up_to( shallow_structure, lambda _, *args: func(*args), # Discards path. @@ -274,31 +89,7 @@ def map_structure_up_to(shallow_structure, func, *structures): ) -@keras_export("keras.tree.assert_same_structure") def assert_same_structure(a, b, check_types=True): - """Asserts that two structures are nested in the same way. - - Note that namedtuples with identical name and fields will not be considered - as same structures even `check_types=False`. - - Examples: - - >>> keras.tree.assert_same_structure([(0, 1)], [(2, 3)]) - - >>> Foo = collections.namedtuple('Foo', ['a', 'b']) - >>> AlsoFoo = collections.namedtuple('Foo', ['a', 'b']) - >>> keras.tree.assert_same_structure(Foo(0, 1), Foo(2, 3)) - >>> keras.tree.assert_same_structure(Foo(0, 1), AlsoFoo(2, 3)) - Traceback (most recent call last): - ... - ValueError: `a` and `b` don't have the same structure. - ... - - Args: - a: an arbitrarily nested structure. - b: an arbitrarily nested structure. - check_types: if `True` (default) types of leaves are checked as well. - """ a_structure = optree.tree_structure(a, none_is_leaf=True, namespace="keras") b_structure = optree.tree_structure(b, none_is_leaf=True, namespace="keras") if a_structure != b_structure: @@ -323,60 +114,7 @@ def assert_same_structure(a, b, check_types=True): ) -@keras_export("keras.tree.pack_sequence_as") def pack_sequence_as(structure, flat_sequence, sequence_fn=None): - """Returns a given flattened sequence packed into a given structure. - - If `structure` is an atom, `flat_sequence` must be a single-item list; in - this case the return value is `flat_sequence[0]`. - - If `structure` is or contains a dict instance, the keys will be sorted to - pack the flat sequence in deterministic order. This is true also for - `OrderedDict` instances: their sequence order is considered. The same - convention is followed in `flatten`. This correctly repacks dicts and - `OrderedDicts` after they have been flattened, or vice-versa. - - Dictionaries with non-sortable keys cannot be flattened. - - Examples: - - >>> structure = {"key3": "", "key1": "", "key2": ""} - >>> flat_sequence = ["value1", "value2", "value3"] - >>> keras.tree.pack_sequence_as(structure, flat_sequence) - {"key3": "value3", "key1": "value1", "key2": "value2"} - - >>> structure = (("a", "b"), ("c", "d", "e"), "f") - >>> flat_sequence = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] - >>> keras.tree.pack_sequence_as(structure, flat_sequence) - ((1.0, 2.0), (3.0, 4.0, 5.0), 6.0) - - >>> structure = {"key3": {"c": ("alpha", "beta"), "a": ("gamma")}, - ... "key1": {"e": "val1", "d": "val2"}} - >>> flat_sequence = ["val2", "val1", 3.0, 1.0, 2.0] - >>> keras.tree.pack_sequence_as(structure, flat_sequence) - {'key3': {'c': (1.0, 2.0), 'a': 3.0}, 'key1': {'e': 'val1', 'd': 'val2'}} - - >>> structure = ["a"] - >>> flat_sequence = [np.array([[1, 2], [3, 4]])] - >>> keras.tree.pack_sequence_as(structure, flat_sequence) - [array([[1, 2], - [3, 4]])] - - >>> structure = ["a"] - >>> flat_sequence = [keras.ops.ones([2, 2])] - >>> keras.tree.pack_sequence_as(structure, flat_sequence) - [array([[1., 1.], - [1., 1.]]] - - Args: - structure: Arbitrarily nested structure. - flat_sequence: Flat sequence to pack. - sequence_fn: Defaults to `_sequence_like`. - - Returns: - `flat_sequence` converted to have the same recursive structure as - `structure`. - """ sequence_fn = sequence_fn or _sequence_like def truncate(value, length): @@ -425,9 +163,7 @@ def truncate(value, length): return sequence_fn(structure, packed) -@keras_export("keras.tree.lists_to_tuples") def lists_to_tuples(structure): - """Converts `list`s to `tuple`s.""" def sequence_fn(instance, args): if isinstance(instance, list): @@ -440,7 +176,6 @@ def sequence_fn(instance, args): def map_shape_structure(func, structure): - """Variant of tree.map_structure that operates on shape tuples.""" def is_shape_tuple(x): return isinstance(x, (list, tuple)) and all( diff --git a/keras/tree/tree_api.py b/keras/tree/tree_api.py new file mode 100644 index 000000000000..09a99daa10e9 --- /dev/null +++ b/keras/tree/tree_api.py @@ -0,0 +1,275 @@ +from keras.api_export import keras_export +from keras.utils.module_utils import dmtree +from keras.utils.module_utils import optree + +if optree.available: + from keras.tree import optree_impl as tree_impl +elif dmtree.available: + from keras.tree import dmtree_impl as tree_impl +else: + raise ImportError( + "To use Keras, you need to have `optree` installed. " + "Install it via `pip install optree`" + ) + + +def register_tree_node_class(cls): + return tree_impl.register_tree_node_class(cls) + + +@keras_export("keras.tree.is_nested") +def is_nested(structure): + """Checks if a given structure is nested. + + Examples: + + >>> keras.tree.is_nested(42) + False + >>> keras.tree.is_nested({"foo": 42}) + True + + Args: + structure: A structure to check. + + Returns: + `True` if a given structure is nested, i.e. is a sequence, a mapping, + or a namedtuple, and `False` otherwise. + """ + return tree_impl.is_nested(structure) + + +@keras_export("keras.tree.traverse") +def traverse(func, structure, top_down=True): + """Traverses the given nested structure, applying the given function. + + The traversal is depth-first. If `top_down` is True (default), parents + are returned before their children (giving the option to avoid traversing + into a sub-tree). + + Examples: + + >>> v = [] + >>> keras.tree.traverse(v.append, [(1, 2), [3], {"a": 4}], top_down=True) + [(1, 2), [3], {'a': 4}] + >>> v + [[(1, 2), [3], {'a': 4}], (1, 2), 1, 2, [3], 3, {'a': 4}, 4] + + >>> v = [] + >>> keras.tree.traverse(v.append, [(1, 2), [3], {"a": 4}], top_down=False) + [(1, 2), [3], {'a': 4}] + >>> v + [1, 2, (1, 2), 3, [3], 4, {'a': 4}, [(1, 2), [3], {'a': 4}]] + + Args: + func: The function to be applied to each sub-nest of the structure. + + When traversing top-down: + If `func(subtree) is None` the traversal continues into the + sub-tree. + If `func(subtree) is not None` the traversal does not continue + into the sub-tree. The sub-tree will be replaced by `func(subtree)` + in the returned structure (to replace the sub-tree with `None`, use + the special value `_MAP_TO_NONE`). + + When traversing bottom-up: + If `func(subtree) is None` the traversed sub-tree is returned + unaltered. + If `func(subtree) is not None` the sub-tree will be replaced by + `func(subtree)` in the returned structure (to replace the sub-tree + with None, use the special value `_MAP_TO_NONE`). + + structure: The structure to traverse. + top_down: If True, parent structures will be visited before their + children. + + Returns: + The structured output from the traversal. + """ + return tree_impl.traverse(func, structure, top_down=top_down) + + +@keras_export("keras.tree.flatten") +def flatten(structure): + """Flattens a possibly nested structure into a list. + + In the case of dict instances, the sequence consists of the values, + sorted by key to ensure deterministic behavior. This is true also for + `collections.OrderedDict` instances: their sequence order is + considered. The same convention is followed in `unflatten_as`. + This correctly unflattens dicts and `OrderedDict` after they have been + flattened, or vice-versa. + + Dictionaries with non-sortable keys cannot be flattened. + + Examples: + + >>> keras.tree.flatten([[1, 2, 3], [4, [5], [[6]]]]) + [1, 2, 3, 4, 5, 6] + >>> keras.tree.flatten(None) + [None] + >>> keras.tree.flatten(1) + [1] + >>> keras.tree.flatten({100: 'world!', 6: 'Hello'}) + ['Hello', 'world!'] + + Args: + structure: An arbitrarily nested structure. + + Returns: + A list, the flattened version of the input `structure`. + """ + return tree_impl.flatten(structure) + + +@keras_export("keras.tree.map_structure") +def map_structure(func, *structures): + """Maps `func` through given structures. + + Examples: + + >>> structure = [[1], [2], [3]] + >>> keras.tree.map_structure(lambda v: v**2, structure) + [[1], [4], [9]] + >>> keras.tree.map_structure(lambda x, y: x * y, structure, structure) + [[1], [4], [9]] + + >>> Foo = collections.namedtuple('Foo', ['a', 'b']) + >>> structure = Foo(a=1, b=2) + >>> keras.tree.map_structure(lambda v: v * 2, structure) + Foo(a=2, b=4) + + Args: + func: A callable that accepts as many arguments as there are structures. + *structures: Arbitrarily nested structures of the same layout. + + Returns: + A new structure with the same layout as the given ones. + """ + return tree_impl.map_structure(func, *structures) + + +@keras_export("keras.tree.map_structure_up_to") +def map_structure_up_to(shallow_structure, func, *structures): + """Maps `func` through given structures up to `shallow_structure`. + + This is a variant of `map_structure` which only maps the given structures + up to `shallow_structure`. All further nested components are retained as-is. + + Examples: + + >>> shallow_structure = [None, None] + >>> structure = [[1, 1], [2, 2]] + >>> keras.tree.map_structure_up_to(shallow_structure, len, structure) + [2, 2] + + >>> shallow_structure = [None, [None, None]] + >>> keras.tree.map_structure_up_to(shallow_structure, str, structure) + ['[1, 1]', ['2', '2']] + + Args: + shallow_structure: A structure with layout common to all `structures`. + func: A callable that accepts as many arguments as there are structures. + *structures: Arbitrarily nested structures of the same layout. + + Returns: + A new structure with the same layout as `shallow_structure`. + """ + return tree_impl.map_structure_up_to(shallow_structure, func, *structures) + + +@keras_export("keras.tree.assert_same_structure") +def assert_same_structure(a, b, check_types=True): + """Asserts that two structures are nested in the same way. + + Note that namedtuples with identical name and fields will not be considered + as same structures even `check_types=False`. + + Examples: + + >>> keras.tree.assert_same_structure([(0, 1)], [(2, 3)]) + + >>> Foo = collections.namedtuple('Foo', ['a', 'b']) + >>> AlsoFoo = collections.namedtuple('Foo', ['a', 'b']) + >>> keras.tree.assert_same_structure(Foo(0, 1), Foo(2, 3)) + >>> keras.tree.assert_same_structure(Foo(0, 1), AlsoFoo(2, 3)) + Traceback (most recent call last): + ... + ValueError: `a` and `b` don't have the same structure. + ... + + Args: + a: an arbitrarily nested structure. + b: an arbitrarily nested structure. + check_types: if `True` (default) types of leaves are checked as well. + """ + return tree_impl.assert_same_structure(a, b, check_types=check_types) + + +@keras_export("keras.tree.pack_sequence_as") +def pack_sequence_as(structure, flat_sequence, sequence_fn=None): + """Returns a given flattened sequence packed into a given structure. + + If `structure` is an atom, `flat_sequence` must be a single-item list; in + this case the return value is `flat_sequence[0]`. + + If `structure` is or contains a dict instance, the keys will be sorted to + pack the flat sequence in deterministic order. This is true also for + `OrderedDict` instances: their sequence order is considered. The same + convention is followed in `flatten`. This correctly repacks dicts and + `OrderedDicts` after they have been flattened, or vice-versa. + + Dictionaries with non-sortable keys cannot be flattened. + + Examples: + + >>> structure = {"key3": "", "key1": "", "key2": ""} + >>> flat_sequence = ["value1", "value2", "value3"] + >>> keras.tree.pack_sequence_as(structure, flat_sequence) + {"key3": "value3", "key1": "value1", "key2": "value2"} + + >>> structure = (("a", "b"), ("c", "d", "e"), "f") + >>> flat_sequence = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + >>> keras.tree.pack_sequence_as(structure, flat_sequence) + ((1.0, 2.0), (3.0, 4.0, 5.0), 6.0) + + >>> structure = {"key3": {"c": ("alpha", "beta"), "a": ("gamma")}, + ... "key1": {"e": "val1", "d": "val2"}} + >>> flat_sequence = ["val2", "val1", 3.0, 1.0, 2.0] + >>> keras.tree.pack_sequence_as(structure, flat_sequence) + {'key3': {'c': (1.0, 2.0), 'a': 3.0}, 'key1': {'e': 'val1', 'd': 'val2'}} + + >>> structure = ["a"] + >>> flat_sequence = [np.array([[1, 2], [3, 4]])] + >>> keras.tree.pack_sequence_as(structure, flat_sequence) + [array([[1, 2], + [3, 4]])] + + >>> structure = ["a"] + >>> flat_sequence = [keras.ops.ones([2, 2])] + >>> keras.tree.pack_sequence_as(structure, flat_sequence) + [array([[1., 1.], + [1., 1.]]] + + Args: + structure: Arbitrarily nested structure. + flat_sequence: Flat sequence to pack. + sequence_fn: Defaults to `_sequence_like`. + + Returns: + `flat_sequence` converted to have the same recursive structure as + `structure`. + """ + return tree_impl.pack_sequence_as( + structure, flat_sequence, sequence_fn=sequence_fn + ) + + +@keras_export("keras.tree.lists_to_tuples") +def lists_to_tuples(structure): + return tree_impl.lists_to_tuples(structure) + + +@keras_export("keras.tree.map_shape_structure") +def map_shape_structure(func, structure): + """Variant of keras.tree.map_structure that operates on shape tuples.""" + return tree_impl.map_shape_structure(func, structure) diff --git a/keras/utils/tree_test.py b/keras/tree/tree_test.py similarity index 84% rename from keras/utils/tree_test.py rename to keras/tree/tree_test.py index a5ca84dab5c2..7335b7d3f377 100644 --- a/keras/utils/tree_test.py +++ b/keras/tree/tree_test.py @@ -4,7 +4,7 @@ from keras import ops from keras import testing -from keras.utils import tree +from keras import tree STRUCTURE1 = (((1, 2), 3), 4, (5, 6)) STRUCTURE2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) @@ -27,46 +27,19 @@ def test_is_nested(self): self.assertFalse(tree.is_nested(np.tanh(ones))) self.assertFalse(tree.is_nested(np.ones((4, 5)))) - def test_flatten_and_unflatten(self): + def test_flatten(self): structure = ((3, 4), 5, (6, 7, (9, 10), 8)) flat = ["a", "b", "c", "d", "e", "f", "g", "h"] self.assertEqual(tree.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8]) - self.assertEqual( - tree.unflatten_as(structure, flat), - (("a", "b"), "c", ("d", "e", ("f", "g"), "h")), - ) point = collections.namedtuple("Point", ["x", "y"]) structure = (point(x=4, y=2), ((point(x=1, y=0),),)) flat = [4, 2, 1, 0] self.assertEqual(tree.flatten(structure), flat) - restructured_from_flat = tree.unflatten_as(structure, flat) - self.assertEqual(restructured_from_flat, structure) - self.assertEqual(restructured_from_flat[0].x, 4) - self.assertEqual(restructured_from_flat[0].y, 2) - self.assertEqual(restructured_from_flat[1][0][0].x, 1) - self.assertEqual(restructured_from_flat[1][0][0].y, 0) self.assertEqual([5], tree.flatten(5)) self.assertEqual([np.array([5])], tree.flatten(np.array([5]))) - self.assertEqual("a", tree.unflatten_as(5, ["a"])) - self.assertEqual( - np.array([5]), tree.unflatten_as("scalar", [np.array([5])]) - ) - - with self.assertRaisesRegex(ValueError, "Structure is a scalar"): - tree.unflatten_as("scalar", [4, 5]) - with self.assertRaisesRegex(TypeError, "flat_sequence"): - tree.unflatten_as([4, 5], "bad_sequence") - with self.assertRaises(ValueError): - tree.unflatten_as([5, 6, [7, 8]], ["a", "b", "c"]) - - self.assertEqual( - tree.unflatten_as({1: None, 2: None}, ["Hello", "world!"]), - {1: "Hello", 2: "world!"}, - ) - def test_flatten_dict_order(self): ordered = collections.OrderedDict( [("d", 3), ("b", 1), ("a", 0), ("c", 2)] @@ -77,20 +50,6 @@ def test_flatten_dict_order(self): self.assertEqual([3, 1, 0, 2], ordered_flat) self.assertEqual([0, 1, 2, 3], plain_flat) - def test_unflatten_dict_order(self): - ordered = collections.OrderedDict( - [("d", 0), ("b", 0), ("a", 0), ("c", 0)] - ) - plain = {"d": 0, "b": 0, "a": 0, "c": 0} - seq = [0, 1, 2, 3] - ordered_reconstruction = tree.unflatten_as(ordered, seq) - plain_reconstruction = tree.unflatten_as(plain, seq) - self.assertEqual( - collections.OrderedDict([("d", 0), ("b", 1), ("a", 2), ("c", 3)]), - ordered_reconstruction, - ) - self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction) - def test_map_structure(self): structure2 = (((7, 8), 9), 10, (11, 12)) structure1_plus1 = tree.map_structure(lambda x: x + 1, STRUCTURE1) diff --git a/keras/utils/jax_layer.py b/keras/utils/jax_layer.py index 773be24ed57c..83786b0b74f6 100644 --- a/keras/utils/jax_layer.py +++ b/keras/utils/jax_layer.py @@ -3,12 +3,12 @@ import numpy as np from keras import backend +from keras import tree from keras.api_export import keras_export from keras.layers.layer import Layer from keras.saving import serialization_lib from keras.utils import jax_utils from keras.utils import tracking -from keras.utils import tree from keras.utils.module_utils import jax diff --git a/keras/utils/jax_layer_test.py b/keras/utils/jax_layer_test.py index 59c392af20d0..ba7104946b04 100644 --- a/keras/utils/jax_layer_test.py +++ b/keras/utils/jax_layer_test.py @@ -13,10 +13,10 @@ from keras import models from keras import saving from keras import testing +from keras import tree from keras import utils from keras.export import export_lib from keras.saving import object_registration -from keras.utils import tree from keras.utils.jax_layer import FlaxLayer from keras.utils.jax_layer import JaxLayer diff --git a/keras/utils/model_visualization.py b/keras/utils/model_visualization.py index 34c3b42e72fe..332eb6d97cc6 100644 --- a/keras/utils/model_visualization.py +++ b/keras/utils/model_visualization.py @@ -3,9 +3,9 @@ import os import sys +from keras import tree from keras.api_export import keras_export from keras.utils import io_utils -from keras.utils import tree try: # pydot-ng is a fork of pydot that is better maintained. diff --git a/keras/utils/module_utils.py b/keras/utils/module_utils.py index 30c03bcc9dfe..c0991fd6bed3 100644 --- a/keras/utils/module_utils.py +++ b/keras/utils/module_utils.py @@ -41,3 +41,5 @@ def __getattr__(self, name): tensorflow_io = LazyModule("tensorflow_io") scipy = LazyModule("scipy") jax = LazyModule("jax") +optree = LazyModule("optree") +dmtree = LazyModule("tree") diff --git a/keras/utils/summary_utils.py b/keras/utils/summary_utils.py index 2fa9c7a0919f..18e3ac539cb3 100644 --- a/keras/utils/summary_utils.py +++ b/keras/utils/summary_utils.py @@ -11,9 +11,9 @@ import rich.table from keras import backend +from keras import tree from keras.utils import dtype_utils from keras.utils import io_utils -from keras.utils import tree def count_params(weights): diff --git a/keras/utils/traceback_utils.py b/keras/utils/traceback_utils.py index 53e98658d7a2..2b0bddefad46 100644 --- a/keras/utils/traceback_utils.py +++ b/keras/utils/traceback_utils.py @@ -5,9 +5,9 @@ from functools import wraps from keras import backend +from keras import tree from keras.api_export import keras_export from keras.backend.common import global_state -from keras.utils import tree _EXCLUDED_PATHS = ( os.path.abspath(os.path.join(__file__, "..", "..")), diff --git a/keras/utils/tracking.py b/keras/utils/tracking.py index 9a9fc106a7be..4a3a76c15d6c 100644 --- a/keras/utils/tracking.py +++ b/keras/utils/tracking.py @@ -1,8 +1,6 @@ from functools import wraps -import optree -import optree.utils - +from keras import tree from keras.backend.common.global_state import get_global_attribute from keras.backend.common.global_state import set_global_attribute from keras.utils import python_utils @@ -135,7 +133,7 @@ def replace_tracked_value(self, store_name, old_value, new_value): self.stored_ids[store_name].add(id(new_value)) -@optree.register_pytree_node_class(namespace="keras") +@tree.register_tree_node_class class TrackedList(list): def __init__(self, values=None, tracker=None): self.tracker = tracker @@ -196,7 +194,7 @@ def tree_unflatten(cls, metadata, children): return cls(children) -@optree.register_pytree_node_class(namespace="keras") +@tree.register_tree_node_class class TrackedDict(dict): def __init__(self, values=None, tracker=None): self.tracker = tracker @@ -236,6 +234,8 @@ def clear(self): super().clear() def tree_flatten(self): + from keras.utils.module_utils import optree + # For optree keys, values = optree.utils.unzip2( optree.utils.total_order_sorted(self.items(), key=lambda kv: kv[0]) @@ -244,11 +244,13 @@ def tree_flatten(self): @classmethod def tree_unflatten(cls, keys, values): + from keras.utils.module_utils import optree + # For optree return cls(optree.utils.safe_zip(keys, values)) -@optree.register_pytree_node_class(namespace="keras") +@tree.register_tree_node_class class TrackedSet(set): def __init__(self, values=None, tracker=None): self.tracker = tracker From 61bbff593a0914f5a2c426c14caadb7372f56da0 Mon Sep 17 00:00:00 2001 From: Luca Pizzini Date: Mon, 15 Apr 2024 22:31:16 +0200 Subject: [PATCH 004/101] feat(losses): add Tversky loss implementation (#19511) * feat(losses): add Tversky loss implementation * adjusted documentation --- keras/losses/__init__.py | 4 ++ keras/losses/losses.py | 90 +++++++++++++++++++++++++++++++++++++ keras/losses/losses_test.py | 37 +++++++++++++++ 3 files changed, 131 insertions(+) diff --git a/keras/losses/__init__.py b/keras/losses/__init__.py index 788b54948444..e2cc81d49cef 100644 --- a/keras/losses/__init__.py +++ b/keras/losses/__init__.py @@ -21,6 +21,7 @@ from keras.losses.losses import Poisson from keras.losses.losses import SparseCategoricalCrossentropy from keras.losses.losses import SquaredHinge +from keras.losses.losses import Tversky from keras.losses.losses import binary_crossentropy from keras.losses.losses import binary_focal_crossentropy from keras.losses.losses import categorical_crossentropy @@ -40,6 +41,7 @@ from keras.losses.losses import poisson from keras.losses.losses import sparse_categorical_crossentropy from keras.losses.losses import squared_hinge +from keras.losses.losses import tversky from keras.saving import serialization_lib ALL_OBJECTS = { @@ -68,6 +70,7 @@ CategoricalHinge, # Image segmentation Dice, + Tversky, # Probabilistic kl_divergence, poisson, @@ -90,6 +93,7 @@ categorical_hinge, # Image segmentation dice, + tversky, } ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} diff --git a/keras/losses/losses.py b/keras/losses/losses.py index af4e68a0846e..74e4803d2435 100644 --- a/keras/losses/losses.py +++ b/keras/losses/losses.py @@ -2000,3 +2000,93 @@ def dice(y_true, y_pred): ) return 1 - dice + + +@keras_export("keras.losses.Tversky") +class Tversky(LossFunctionWrapper): + """Computes the Tversky loss value between `y_true` and `y_pred`. + + This loss function is weighted by the alpha and beta coefficients + that penalize false positives and false negatives. + + With `alpha=0.5` and `beta=0.5`, the loss value becomes equivalent to + Dice Loss. + + Args: + y_true: tensor of true targets. + y_pred: tensor of predicted targets. + alpha: coefficient controlling incidence of false positives. + beta: coefficient controlling incidence of false negatives. + + Returns: + Tversky loss value. + + Reference: + + - [Salehi et al., 2017](https://arxiv.org/abs/1706.05721) + """ + + def __init__( + self, + alpha=0.5, + beta=0.5, + reduction="sum_over_batch_size", + name="tversky", + ): + super().__init__( + tversky, + alpha=alpha, + beta=beta, + name=name, + reduction=reduction, + ) + self.alpha = alpha + self.beta = beta + + def get_config(self): + return { + "name": self.name, + "alpha": self.alpha, + "beta": self.beta, + "reduction": self.reduction, + } + + +@keras_export("keras.losses.tversky") +def tversky(y_true, y_pred, alpha=0.5, beta=0.5): + """Computes the Tversky loss value between `y_true` and `y_pred`. + + This loss function is weighted by the alpha and beta coefficients + that penalize false positives and false negatives. + + With `alpha=0.5` and `beta=0.5`, the loss value becomes equivalent to + Dice Loss. + + Args: + y_true: tensor of true targets. + y_pred: tensor of predicted targets. + alpha: coefficient controlling incidence of false positives. + beta: coefficient controlling incidence of false negatives. + + Returns: + Tversky loss value. + + Reference: + + - [Salehi et al., 2017](https://arxiv.org/abs/1706.05721) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + + inputs = ops.reshape(y_true, [-1]) + targets = ops.reshape(y_pred, [-1]) + + intersection = ops.sum(inputs * targets) + fp = ops.sum((1 - targets) * inputs) + fn = ops.sum(targets * (1 - inputs)) + tversky = ops.divide( + intersection, + intersection + fp * alpha + fn * beta + backend.epsilon(), + ) + + return 1 - tversky diff --git a/keras/losses/losses_test.py b/keras/losses/losses_test.py index c49b86b3bc4b..59af6abf9c1e 100644 --- a/keras/losses/losses_test.py +++ b/keras/losses/losses_test.py @@ -1409,3 +1409,40 @@ def test_binary_segmentation(self): ) output = losses.Dice()(y_true, y_pred) self.assertAllClose(output, 0.77777773) + + +class TverskyTest(testing.TestCase): + def test_config(self): + self.run_class_serialization_test(losses.Tversky(name="mytversky")) + + def test_correctness(self): + y_true = np.array(([[1, 2], [1, 2]])) + y_pred = np.array(([[4, 1], [6, 1]])) + output = losses.Tversky()(y_true, y_pred) + self.assertAllClose(output, -0.55555546) + + def test_correctness_custom_coefficients(self): + y_true = np.array(([[1, 2], [1, 2]])) + y_pred = np.array(([[4, 1], [6, 1]])) + output = losses.Tversky(alpha=0.2, beta=0.8)(y_true, y_pred) + self.assertAllClose(output, -0.29629636) + + def test_binary_segmentation(self): + y_true = np.array( + ([[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]]) + ) + y_pred = np.array( + ([[0, 1, 0, 1], [1, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 1]]) + ) + output = losses.Tversky()(y_true, y_pred) + self.assertAllClose(output, 0.77777773) + + def test_binary_segmentation_custom_coefficients(self): + y_true = np.array( + ([[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]]) + ) + y_pred = np.array( + ([[0, 1, 0, 1], [1, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 1]]) + ) + output = losses.Tversky(alpha=0.2, beta=0.8)(y_true, y_pred) + self.assertAllClose(output, 0.7916667) From 6503b6d988a494e4785373917ada8b66031f5fef Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 15 Apr 2024 14:55:21 -0700 Subject: [PATCH 005/101] Update KLD docs --- keras/losses/losses.py | 8 ++++++++ keras/metrics/probabilistic_metrics.py | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/keras/losses/losses.py b/keras/losses/losses.py index 74e4803d2435..ef0a2229dafc 100644 --- a/keras/losses/losses.py +++ b/keras/losses/losses.py @@ -342,6 +342,10 @@ class KLDivergence(LossFunctionWrapper): loss = y_true * log(y_true / y_pred) ``` + `y_true` and `y_pred` are expected to be probability + distributions, with values between 0 and 1. They will get + clipped to the `[0, 1]` range. + Args: reduction: Type of reduction to apply to the loss. In almost all cases this should be `"sum_over_batch_size"`. @@ -1443,6 +1447,10 @@ def kl_divergence(y_true, y_pred): loss = y_true * log(y_true / y_pred) ``` + `y_true` and `y_pred` are expected to be probability + distributions, with values between 0 and 1. They will get + clipped to the `[0, 1]` range. + Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. diff --git a/keras/metrics/probabilistic_metrics.py b/keras/metrics/probabilistic_metrics.py index 192c7ca01d73..b6f4551e0796 100644 --- a/keras/metrics/probabilistic_metrics.py +++ b/keras/metrics/probabilistic_metrics.py @@ -18,6 +18,10 @@ class KLDivergence(reduction_metrics.MeanMetricWrapper): metric = y_true * log(y_true / y_pred) ``` + `y_true` and `y_pred` are expected to be probability + distributions, with values between 0 and 1. They will get + clipped to the `[0, 1]` range. + Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. From 6b9430d5f7b6d3ecbb1c35a37690a661759341b5 Mon Sep 17 00:00:00 2001 From: hertschuh <1091026+hertschuh@users.noreply.github.com> Date: Mon, 15 Apr 2024 16:16:41 -0700 Subject: [PATCH 006/101] Models and layers now return owned metrics recursively. (#19522) - added `Layer.metrics` to return all metrics owned by the layer and its sub-layers recursively. - `Layer.metrics_variables` now returns variables from all metrics recursively, not just the layer and its direct sub-layers. - `Model.metrics` now returns all metrics recursively, not just the model level metrics. - `Model.metrics_variables` now returns variables from all metrics recursively, not just the model level metrics. - added test coverage to test metrics and variables 2 levels deep. This is consistent with the Keras 2 behavior and how `Model/Layer.variables` and `Model/Layer.weights` work. --- keras/layers/layer.py | 13 +++++++---- keras/layers/layer_test.py | 44 +++++++++++++++++++++++++++----------- keras/trainers/trainer.py | 9 +------- 3 files changed, 41 insertions(+), 25 deletions(-) diff --git a/keras/layers/layer.py b/keras/layers/layer.py index a19a29f7f229..db91e349b987 100644 --- a/keras/layers/layer.py +++ b/keras/layers/layer.py @@ -635,15 +635,20 @@ def non_trainable_weights(self): return self.weights return [v for v in self.weights if not v.trainable] + @property + def metrics(self): + """List of all metrics.""" + metrics = list(self._metrics) + for layer in self._layers: + metrics.extend(layer.metrics) + return metrics + @property def metrics_variables(self): """List of all metric variables.""" vars = [] - for metric in self._metrics: + for metric in self.metrics: vars.extend(metric.variables) - for layer in self._layers: - for metric in layer._metrics: - vars.extend(metric.variables) return vars def get_weights(self): diff --git a/keras/layers/layer_test.py b/keras/layers/layer_test.py index ab93d808c728..0e8ca4548df5 100644 --- a/keras/layers/layer_test.py +++ b/keras/layers/layer_test.py @@ -176,7 +176,7 @@ def call(self, x): self.assertAllClose(layer.variables[1], [10, 1]) def test_layer_tracking(self): - class NestedLayer(layers.Layer): + class LayerWithDenseLayers(layers.Layer): def __init__(self, units): super().__init__() self.dense1 = layers.Dense(units) @@ -185,6 +185,7 @@ def __init__(self, units): } self.layer_list = [layers.Dense(units)] self.units = units + self.seed_generator = backend.random.SeedGenerator(seed=1) def build(self, input_shape): self.layer_list.append(layers.Dense(self.units)) @@ -196,24 +197,31 @@ def call(self, x): x = self.layer_list[1](x) return x - class DoubleNestedLayer(layers.Layer): - def __init__(self, units): + class ParentLayer(layers.Layer): + def __init__(self, inner_layer): super().__init__() - self.inner_layer = NestedLayer(units) + self.inner_layer = inner_layer def call(self, x): return self.inner_layer(x) - layer = NestedLayer(3) + layer = LayerWithDenseLayers(3) layer.build((1, 3)) self.assertLen(layer._layers, 4) layer(np.zeros((1, 3))) + self.assertLen(layer.variables, 9) + self.assertLen(layer.weights, 8) + + layer = ParentLayer(LayerWithDenseLayers(3)) + self.assertLen(layer._layers, 1) + layer(np.zeros((1, 3))) + self.assertLen(layer.variables, 9) self.assertLen(layer.weights, 8) - layer = DoubleNestedLayer(3) + layer = ParentLayer(ParentLayer(LayerWithDenseLayers(3))) self.assertLen(layer._layers, 1) layer(np.zeros((1, 3))) - self.assertLen(layer.inner_layer.weights, 8) + self.assertLen(layer.variables, 9) self.assertLen(layer.weights, 8) def test_metric_tracking(self): @@ -229,32 +237,42 @@ def build(self, input_shape): def call(self, x): return self.dense(x) - class NestedLayerWithMetric(layers.Layer): - def __init__(self, units): + class ParentLayerWithMetric(layers.Layer): + def __init__(self, inner_layer): super().__init__() - self.layer_with_metric = LayerWithMetric(units) + self.inner_layer = inner_layer self.metric = metrics.MeanSquaredError(name="my_metric") def build(self, input_shape): - self.layer_with_metric.build(input_shape) + self.inner_layer.build(input_shape) def call(self, x): - return self.layer_with_metric(x) + return self.inner_layer(x) layer = LayerWithMetric(3) layer.build((1, 3)) + self.assertLen(layer.metrics, 1) self.assertLen(layer.metrics_variables, 2) self.assertLen(layer.trainable_variables, 2) self.assertLen(layer.non_trainable_variables, 0) - layer = NestedLayerWithMetric(3) + layer = ParentLayerWithMetric(LayerWithMetric(3)) layer.build((1, 3)) + self.assertLen(layer.metrics, 2) self.assertLen(layer.metrics_variables, 4) self.assertLen(layer.trainable_variables, 2) self.assertLen(layer.non_trainable_variables, 0) + layer = ParentLayerWithMetric(ParentLayerWithMetric(LayerWithMetric(3))) + layer.build((1, 3)) + + self.assertLen(layer.metrics, 3) + self.assertLen(layer.metrics_variables, 6) + self.assertLen(layer.trainable_variables, 2) + self.assertLen(layer.non_trainable_variables, 0) + def test_build_on_call(self): class LayerWithUnbuiltState(layers.Layer): def __init__(self, units): diff --git a/keras/trainers/trainer.py b/keras/trainers/trainer.py index 32d818e0b641..0ee156f9c5e2 100644 --- a/keras/trainers/trainer.py +++ b/keras/trainers/trainer.py @@ -242,7 +242,7 @@ def run_eagerly(self, value): @property def metrics(self): metrics = [self._loss_tracker] if self.compiled else [] - metrics.extend(self._metrics[:]) + metrics.extend(super().metrics) if self.compiled and self._compile_metrics is not None: metrics += [self._compile_metrics] return metrics @@ -251,13 +251,6 @@ def metrics(self): def metrics_names(self): return [m.name for m in self.metrics] - @property - def metrics_variables(self): - vars = [] - for metric in self.metrics: - vars.extend(metric.variables) - return vars - def reset_metrics(self): for m in self.metrics: m.reset_state() From 2ff3f94751bec84512ad6d0452799334c275aec0 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 15 Apr 2024 21:30:07 -0700 Subject: [PATCH 007/101] Update IoU ignore_class handling --- keras/metrics/iou_metrics.py | 8 +++++--- keras/metrics/iou_metrics_test.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/keras/metrics/iou_metrics.py b/keras/metrics/iou_metrics.py index 56bf3d07efe9..e8dd9594b5d7 100644 --- a/keras/metrics/iou_metrics.py +++ b/keras/metrics/iou_metrics.py @@ -115,10 +115,12 @@ def update_state(self, y_true, y_pred, sample_weight=None): self.ignore_class, y_true.dtype ) valid_mask = ops.not_equal(y_true, ignore_class) - y_true = y_true[valid_mask] - y_pred = y_pred[valid_mask] + y_true = y_true * ops.cast(valid_mask, y_true.dtype) + y_pred = y_pred * ops.cast(valid_mask, y_pred.dtype) if sample_weight is not None: - sample_weight = sample_weight[valid_mask] + sample_weight = sample_weight * ops.cast( + valid_mask, sample_weight.dtype + ) y_pred = ops.cast(y_pred, dtype=self.dtype) y_true = ops.cast(y_true, dtype=self.dtype) diff --git a/keras/metrics/iou_metrics_test.py b/keras/metrics/iou_metrics_test.py index 7f5941454de8..907af12ceac7 100644 --- a/keras/metrics/iou_metrics_test.py +++ b/keras/metrics/iou_metrics_test.py @@ -98,7 +98,7 @@ def test_zero_and_non_zero_entries(self): @pytest.mark.requires_trainable_backend def test_compilation(self): - m_obj = metrics.MeanIoU(num_classes=2) + m_obj = metrics.MeanIoU(num_classes=2, ignore_class=0) model = models.Sequential( [ layers.Dense(2, activation="softmax"), From 5b9992297398a7d7f5ed113747f074685081a2a4 Mon Sep 17 00:00:00 2001 From: Faisal Alsrheed <47912291+Faisal-Alsrheed@users.noreply.github.com> Date: Tue, 16 Apr 2024 22:29:39 +0300 Subject: [PATCH 008/101] Fix `RandomBrightness`, Enhance `IndexLookup` Initialization and Expand Test Coverage for `Preprocessing Layers` (#19513) * Add tests for CategoryEncoding class in category_encoding_test.py * fix * Fix IndexLookup class initialization and add test cases * Add test case for IndexLookupLayerTest without vocabulary * Fix IndexLookup class initialization * Add normalization test cases * Add test cases for Hashing class * Fix value range validation error in RandomBrightness class * Refactor IndexLookup class initialization and add test cases * Reffix ndexLookup class initialization and afix est cases --- .../preprocessing/category_encoding_test.py | 68 +++++++ keras/layers/preprocessing/hashing_test.py | 37 ++++ .../layers/preprocessing/index_lookup_test.py | 190 ++++++++++++++++++ .../preprocessing/normalization_test.py | 45 ++++- .../layers/preprocessing/random_brightness.py | 4 +- .../preprocessing/random_brightness_test.py | 56 ++++++ 6 files changed, 394 insertions(+), 6 deletions(-) diff --git a/keras/layers/preprocessing/category_encoding_test.py b/keras/layers/preprocessing/category_encoding_test.py index 025d30da9f84..da55f9d35297 100644 --- a/keras/layers/preprocessing/category_encoding_test.py +++ b/keras/layers/preprocessing/category_encoding_test.py @@ -260,3 +260,71 @@ def test_tf_data_compatibility(self): for output in ds.take(1): output = output.numpy() self.assertAllClose(output, expected_output) + + def test_category_encoding_without_num_tokens(self): + with self.assertRaisesRegex( + ValueError, r"num_tokens must be set to use this layer" + ): + layers.CategoryEncoding(output_mode="multi_hot") + + def test_category_encoding_with_invalid_num_tokens(self): + with self.assertRaisesRegex(ValueError, r"`num_tokens` must be >= 1"): + layers.CategoryEncoding(num_tokens=0, output_mode="multi_hot") + + with self.assertRaisesRegex(ValueError, r"`num_tokens` must be >= 1"): + layers.CategoryEncoding(num_tokens=-1, output_mode="multi_hot") + + def test_category_encoding_with_unnecessary_count_weights(self): + layer = layers.CategoryEncoding(num_tokens=4, output_mode="multi_hot") + input_data = np.array([0, 1, 2, 3]) + count_weights = np.array([0.1, 0.2, 0.3, 0.4]) + with self.assertRaisesRegex( + ValueError, r"`count_weights` is not used when `output_mode`" + ): + layer(input_data, count_weights=count_weights) + + def test_invalid_output_mode_raises_error(self): + with self.assertRaisesRegex( + ValueError, r"Unknown arg for output_mode: invalid_mode" + ): + layers.CategoryEncoding(num_tokens=4, output_mode="invalid_mode") + + def test_encode_one_hot_single_sample(self): + layer = layers.CategoryEncoding(num_tokens=4, output_mode="one_hot") + input_array = np.array([1, 2, 3, 1]) + expected_output = np.array( + [ + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + ] + ) + output = layer._encode(input_array) + self.assertAllClose(expected_output, output) + + def test_encode_one_hot_batched_samples(self): + layer = layers.CategoryEncoding(num_tokens=4, output_mode="one_hot") + input_array = np.array([[3, 2, 0, 1], [3, 2, 0, 1]]) + expected_output = np.array( + [ + [[0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0]], + [[0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0]], + ] + ) + output = layer._encode(input_array) + self.assertAllClose(expected_output, output) + + def test_count_single_sample(self): + layer = layers.CategoryEncoding(num_tokens=4, output_mode="count") + input_array = np.array([1, 2, 3, 1]) + expected_output = np.array([0, 2, 1, 1]) + output = layer._count(input_array) + self.assertAllClose(expected_output, output) + + def test_count_batched_samples(self): + layer = layers.CategoryEncoding(num_tokens=4, output_mode="count") + input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]]) + expected_output = np.array([[0, 2, 1, 1], [2, 1, 0, 1]]) + output = layer._count(input_array) + self.assertAllClose(expected_output, output) diff --git a/keras/layers/preprocessing/hashing_test.py b/keras/layers/preprocessing/hashing_test.py index 4cd5f71667e6..cabd567aa6bb 100644 --- a/keras/layers/preprocessing/hashing_test.py +++ b/keras/layers/preprocessing/hashing_test.py @@ -393,6 +393,43 @@ def test_hash_list_input(self, input_data, expected): expected, backend.convert_to_numpy(out_data).tolist() ) + def test_hashing_invalid_num_bins(self): + # Test with `num_bins` set to None + with self.assertRaisesRegex( + ValueError, + "The `num_bins` for `Hashing` cannot be `None` or non-positive", + ): + layers.Hashing(num_bins=None) + + # Test with `num_bins` set to 0 + with self.assertRaisesRegex( + ValueError, + "The `num_bins` for `Hashing` cannot be `None` or non-positive", + ): + layers.Hashing(num_bins=0) + + def test_hashing_invalid_output_mode(self): + # Test with an unsupported `output_mode` + with self.assertRaisesRegex( + ValueError, + "Invalid value for argument `output_mode`. Expected one of", + ): + layers.Hashing(num_bins=3, output_mode="unsupported_mode") + + def test_hashing_invalid_dtype_for_int_mode(self): + with self.assertRaisesRegex( + ValueError, + 'When `output_mode="int"`, `dtype` should be an integer type,', + ): + layers.Hashing(num_bins=3, output_mode="int", dtype="float32") + + def test_hashing_sparse_with_int_mode(self): + # Test setting `sparse=True` with `output_mode='int'` + with self.assertRaisesRegex( + ValueError, "`sparse` may only be true if `output_mode` is" + ): + layers.Hashing(num_bins=3, output_mode="int", sparse=True) + # TODO: support tf.RaggedTensor. # def test_hash_ragged_string_input_farmhash(self): diff --git a/keras/layers/preprocessing/index_lookup_test.py b/keras/layers/preprocessing/index_lookup_test.py index 7bf596e41936..1a0ef9428beb 100644 --- a/keras/layers/preprocessing/index_lookup_test.py +++ b/keras/layers/preprocessing/index_lookup_test.py @@ -427,3 +427,193 @@ def test_adapt_with_tf_data(self): self.assertEqual(list(output), [2, 3, 1]) if backend.backend() != "torch": self.run_class_serialization_test(layer) + + def test_max_tokens_less_than_two(self): + with self.assertRaisesRegex( + ValueError, + "If set, `max_tokens` must be greater than 1.", + ): + layers.IndexLookup( + max_tokens=1, + num_oov_indices=1, + mask_token=None, + oov_token=None, + vocabulary_dtype="int64", + ) + + def test_max_tokens_none_with_pad_to_max_tokens(self): + with self.assertRaisesRegex( + ValueError, + "If pad_to_max_tokens is True, must set `max_tokens`.", + ): + layers.IndexLookup( + num_oov_indices=1, + max_tokens=None, + mask_token=None, + oov_token=None, + vocabulary_dtype="int64", + pad_to_max_tokens=True, + ) + + def test_negative_num_oov_indices(self): + with self.assertRaisesRegex( + ValueError, + "`num_oov_indices` must be greater than or equal to 0.", + ): + layers.IndexLookup( + max_tokens=10, + num_oov_indices=-1, + mask_token=None, + oov_token=None, + vocabulary_dtype="int64", + ) + + def test_invert_with_non_int_output_mode(self): + with self.assertRaisesRegex( + ValueError, r"`output_mode` must be `'int'` when `invert` is true." + ): + layers.IndexLookup( + num_oov_indices=1, + max_tokens=None, + mask_token=None, + oov_token=None, + vocabulary_dtype="string", + invert=True, + output_mode="one_hot", # Invalid combination + ) + + def test_sparse_true_with_int_output_mode(self): + with self.assertRaisesRegex( + ValueError, + r"`sparse` may only be true if `output_mode` is `'one_hot'`", + ): + layers.IndexLookup( + num_oov_indices=1, + max_tokens=None, + mask_token=None, + oov_token=None, + vocabulary_dtype="string", + sparse=True, + output_mode="int", # Invalid combination + ) + + def test_idf_weights_set_with_non_tfidf_output_mode(self): + with self.assertRaisesRegex( + ValueError, + r"`idf_weights` should only be set if `output_mode` is `'tf_idf'`", + ): + layers.IndexLookup( + num_oov_indices=1, + max_tokens=None, + mask_token=None, + oov_token=None, + vocabulary_dtype="string", + idf_weights=[ + 0.5, + 0.1, + 0.3, + ], # Should not be set for non-TF-IDF modes + output_mode="int", + ) + + def test_unrecognized_kwargs(self): + with self.assertRaisesRegex( + ValueError, "Unrecognized keyword argument" + ): + layers.IndexLookup( + num_oov_indices=1, + max_tokens=None, + mask_token=None, + oov_token=None, + vocabulary_dtype="string", + output_mode="int", + # This is an unrecognized argument + extra_arg=True, + ) + + def test_non_tf_idf_with_idf_weights(self): + with self.assertRaisesRegex( + ValueError, + "`idf_weights` should only be set if `output_mode` is", + ): + layers.IndexLookup( + num_oov_indices=1, + max_tokens=None, + mask_token=None, + oov_token=None, + vocabulary_dtype="string", + output_mode="multi_hot", + idf_weights=[ + 0.5, + 0.1, + 0.3, + ], # idf_weights not valid for multi_hot mode + ) + + def test_vocabulary_file_does_not_exist(self): + with self.assertRaisesRegex( + ValueError, + "Vocabulary file path/to/missing_vocab.txt does not exist", + ): + layers.IndexLookup( + num_oov_indices=1, + max_tokens=None, + mask_token=None, + oov_token=None, + vocabulary_dtype="string", + output_mode="int", + # Nonexistent file path + vocabulary="path/to/missing_vocab.txt", + ) + + def test_repeated_tokens_in_vocabulary(self): + with self.assertRaisesRegex( + ValueError, "The passed vocabulary has at least one repeated term." + ): + layers.IndexLookup( + num_oov_indices=1, + max_tokens=None, + mask_token=None, + oov_token=None, + vocabulary_dtype="string", + vocabulary=["token", "token", "unique"], + ) + + def test_mask_token_in_wrong_position(self): + with self.assertRaisesRegex( + ValueError, + "Found reserved mask token at unexpected location in `vocabulary`.", + ): + layers.IndexLookup( + num_oov_indices=1, + max_tokens=None, + mask_token="mask", + oov_token=None, + vocabulary_dtype="string", + vocabulary=[ + "token", + "mask", + "unique", + ], # 'mask' should be at the start if included explicitly + ) + + def test_ensure_known_vocab_size_without_vocabulary(self): + kwargs = { + "num_oov_indices": 1, + # Assume empty string or some default token is valid. + "mask_token": "", + # Assume [OOV] or some default token is valid. + "oov_token": "[OOV]", + "output_mode": "multi_hot", + "pad_to_max_tokens": False, + "vocabulary_dtype": "string", + "max_tokens": None, + } + layer = layers.IndexLookup(**kwargs) + + # Try calling the layer without setting the vocabulary. + with self.assertRaisesRegex( + RuntimeError, "When using `output_mode=multi_hot` and" + ): + input_data = ["sample", "data"] + layer(input_data) diff --git a/keras/layers/preprocessing/normalization_test.py b/keras/layers/preprocessing/normalization_test.py index c0c402709bce..c2db784f95c9 100644 --- a/keras/layers/preprocessing/normalization_test.py +++ b/keras/layers/preprocessing/normalization_test.py @@ -91,10 +91,6 @@ def test_normalization_adapt(self, input_type): self.assertAllClose(np.var(output, axis=(0, 3)), 1.0, atol=1e-5) self.assertAllClose(np.mean(output, axis=(0, 3)), 0.0, atol=1e-5) - def test_normalization_errors(self): - # TODO - pass - @pytest.mark.skipif( backend.backend() != "torch", reason="Test symbolic call for torch meta device.", @@ -107,3 +103,44 @@ def test_call_on_meta_device_after_built(self): layer.adapt(data) with core.device_scope("meta"): layer(data) + + def test_normalization_with_mean_only_raises_error(self): + # Test error when only `mean` is provided + with self.assertRaisesRegex( + ValueError, "both `mean` and `variance` must be set" + ): + layers.Normalization(mean=0.5) + + def test_normalization_with_variance_only_raises_error(self): + # Test error when only `variance` is provided + with self.assertRaisesRegex( + ValueError, "both `mean` and `variance` must be set" + ): + layers.Normalization(variance=0.1) + + def test_normalization_axis_too_high(self): + with self.assertRaisesRegex( + ValueError, "All `axis` values must be in the range" + ): + layer = layers.Normalization(axis=3) + layer.build((2, 2)) + + def test_normalization_axis_too_low(self): + with self.assertRaisesRegex( + ValueError, "All `axis` values must be in the range" + ): + layer = layers.Normalization(axis=-4) + layer.build((2, 3, 4)) + + def test_normalization_unknown_axis_shape(self): + with self.assertRaisesRegex(ValueError, "All `axis` values to be kept"): + layer = layers.Normalization(axis=1) + layer.build((None, None)) + + def test_normalization_adapt_with_incompatible_shape(self): + layer = layers.Normalization(axis=-1) + initial_shape = (10, 5) + layer.build(initial_shape) + new_shape_data = np.random.random((10, 3)) + with self.assertRaisesRegex(ValueError, "an incompatible shape"): + layer.adapt(new_shape_data) diff --git a/keras/layers/preprocessing/random_brightness.py b/keras/layers/preprocessing/random_brightness.py index 8055dbeab9ec..16816c66d544 100644 --- a/keras/layers/preprocessing/random_brightness.py +++ b/keras/layers/preprocessing/random_brightness.py @@ -80,12 +80,12 @@ def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs): def _set_value_range(self, value_range): if not isinstance(value_range, (tuple, list)): raise ValueError( - self.value_range_VALIDATION_ERROR + self._VALUE_RANGE_VALIDATION_ERROR + f"Received: value_range={value_range}" ) if len(value_range) != 2: raise ValueError( - self.value_range_VALIDATION_ERROR + self._VALUE_RANGE_VALIDATION_ERROR + f"Received: value_range={value_range}" ) self.value_range = sorted(value_range) diff --git a/keras/layers/preprocessing/random_brightness_test.py b/keras/layers/preprocessing/random_brightness_test.py index 1044d28eace4..129ddd946642 100644 --- a/keras/layers/preprocessing/random_brightness_test.py +++ b/keras/layers/preprocessing/random_brightness_test.py @@ -58,3 +58,59 @@ def test_tf_data_compatibility(self): ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer) for output in ds.take(1): output.numpy() + + def test_value_range_incorrect_type(self): + with self.assertRaisesRegex( + ValueError, + "The `value_range` argument should be a list of two numbers.*", + ): + layers.RandomBrightness(factor=0.1, value_range="incorrect_type") + + def test_value_range_incorrect_length(self): + with self.assertRaisesRegex( + ValueError, + "The `value_range` argument should be a list of two numbers.*", + ): + layers.RandomBrightness(factor=0.1, value_range=[10]) + + def test_set_factor_incorrect_length(self): + layer = layers.RandomBrightness(factor=0.5) + with self.assertRaisesRegex( + ValueError, "The `factor` argument should be a number.*" + ): + layer._set_factor([0.1]) # Only one element in list + + def test_set_factor_incorrect_type(self): + layer = layers.RandomBrightness(factor=0.5) + with self.assertRaisesRegex( + ValueError, "The `factor` argument should be a number.*" + ): + layer._set_factor( + "invalid_type" + ) # Passing a string instead of a number or a list/tuple of numbers + + def test_factor_range_below_lower_bound(self): + with self.assertRaisesRegex( + ValueError, "The `factor` argument should be a number.*" + ): + # Passing a value less than -1.0 + layers.RandomBrightness(factor=-1.1) + + def test_factor_range_above_upper_bound(self): + with self.assertRaisesRegex( + ValueError, "The `factor` argument should be a number.*" + ): + # Passing a value more than 1.0 + layers.RandomBrightness(factor=1.1) + + def test_randomly_adjust_brightness_input_incorrect_rank(self): + layer = layers.RandomBrightness(factor=0.1) + wrong_rank_input = np.random.rand(10, 10) + + with self.assertRaisesRegex( + ValueError, + "Expected the input image to be rank 3 or 4.", + ): + layer( + wrong_rank_input, training=True + ) # Call the method that triggers the error From d33b5852b9f8ef7a8fe6c69ee2eae566fe6b7bf2 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Tue, 16 Apr 2024 12:30:47 -0700 Subject: [PATCH 009/101] Add test for spectral norm --- .../normalization/spectral_normalization_test.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/keras/layers/normalization/spectral_normalization_test.py b/keras/layers/normalization/spectral_normalization_test.py index 19b870b6909b..8f14ff845686 100644 --- a/keras/layers/normalization/spectral_normalization_test.py +++ b/keras/layers/normalization/spectral_normalization_test.py @@ -4,6 +4,7 @@ from keras import backend from keras import initializers from keras import layers +from keras import models from keras import testing @@ -66,3 +67,18 @@ def test_apply_layer(self): self.assertAllClose(result, expected_output) # max eigen value of 2x2 matrix of ones is 2 self.assertAllClose(result_train, expected_output / 2) + + def test_end_to_end(self): + sn_wrapper = layers.SpectralNormalization( + layers.Conv2D( + 3, + (2, 2), + padding="same", + ), + power_iterations=2, + ) + model = models.Sequential([sn_wrapper]) + model.compile("rmsprop", loss="mse") + x = np.random.random((4, 8, 8, 3)) + y = np.random.random((4, 8, 8, 3)) + model.fit(x, y) From a4e27166eaa58b35667aedac6e2ff5c38bf9c3b0 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Tue, 16 Apr 2024 13:47:12 -0700 Subject: [PATCH 010/101] Add missing test decorator --- keras/layers/normalization/spectral_normalization_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/keras/layers/normalization/spectral_normalization_test.py b/keras/layers/normalization/spectral_normalization_test.py index 8f14ff845686..efa522140d90 100644 --- a/keras/layers/normalization/spectral_normalization_test.py +++ b/keras/layers/normalization/spectral_normalization_test.py @@ -68,6 +68,7 @@ def test_apply_layer(self): # max eigen value of 2x2 matrix of ones is 2 self.assertAllClose(result_train, expected_output / 2) + @pytest.mark.requires_trainable_backend def test_end_to_end(self): sn_wrapper = layers.SpectralNormalization( layers.Conv2D( From 344b3e3f4d03b2b76dbdfd231cecb70f6bb94ed1 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Tue, 16 Apr 2024 14:12:12 -0700 Subject: [PATCH 011/101] Fix torch test --- keras/layers/normalization/spectral_normalization_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/keras/layers/normalization/spectral_normalization_test.py b/keras/layers/normalization/spectral_normalization_test.py index efa522140d90..f3c6920224cf 100644 --- a/keras/layers/normalization/spectral_normalization_test.py +++ b/keras/layers/normalization/spectral_normalization_test.py @@ -75,6 +75,7 @@ def test_end_to_end(self): 3, (2, 2), padding="same", + data_format="channels_last" ), power_iterations=2, ) From 559f1dd9ff3ccaedc22e217c2564964f0a119828 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Tue, 16 Apr 2024 14:12:28 -0700 Subject: [PATCH 012/101] Fix code format --- keras/layers/normalization/spectral_normalization_test.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/keras/layers/normalization/spectral_normalization_test.py b/keras/layers/normalization/spectral_normalization_test.py index f3c6920224cf..632edd20ecbe 100644 --- a/keras/layers/normalization/spectral_normalization_test.py +++ b/keras/layers/normalization/spectral_normalization_test.py @@ -72,10 +72,7 @@ def test_apply_layer(self): def test_end_to_end(self): sn_wrapper = layers.SpectralNormalization( layers.Conv2D( - 3, - (2, 2), - padding="same", - data_format="channels_last" + 3, (2, 2), padding="same", data_format="channels_last" ), power_iterations=2, ) From 1937d487b0ee4cf265051c7bd0524f2fd1a2cb51 Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Tue, 16 Apr 2024 17:01:16 -0500 Subject: [PATCH 013/101] Generate API (#19530) * API Generator for Keras * API Generator for Keras * Generates API Gen via api_gen.sh * Remove recursive import of _tf_keras * Generate API Files via api_gen.sh --- .github/workflows/actions.yml | 23 +- .github/workflows/nightly.yml | 13 +- api_gen.py | 175 ++++++++++++++ conftest.py | 2 +- integration_tests/basic_full_flow.py | 10 +- .../dataset_tests/boston_housing_test.py | 4 +- .../dataset_tests/california_housing_test.py | 4 +- .../dataset_tests/cifar100_test.py | 4 +- .../dataset_tests/cifar10_test.py | 4 +- .../dataset_tests/fashion_mnist_test.py | 4 +- integration_tests/dataset_tests/imdb_test.py | 4 +- integration_tests/dataset_tests/mnist_test.py | 4 +- .../dataset_tests/reuters_test.py | 4 +- integration_tests/import_test.py | 19 +- integration_tests/model_visualization_test.py | 2 +- .../tf_distribute_training_test.py | 12 +- integration_tests/torch_workflow_test.py | 6 +- keras/__init__.py | 52 ++-- keras/api/__init__.py | 58 +++++ keras/api/_tf_keras/__init__.py | 1 + keras/api/_tf_keras/keras/__init__.py | 57 +++++ .../_tf_keras/keras/activations/__init__.py | 29 +++ .../_tf_keras/keras/applications/__init__.py | 63 +++++ .../keras/applications/convnext/__init__.py | 13 + .../keras/applications/densenet/__init__.py | 11 + .../applications/efficientnet/__init__.py | 16 ++ .../applications/efficientnet_v2/__init__.py | 15 ++ .../applications/imagenet_utils/__init__.py | 8 + .../inception_resnet_v2/__init__.py | 9 + .../applications/inception_v3/__init__.py | 9 + .../keras/applications/mobilenet/__init__.py | 9 + .../applications/mobilenet_v2/__init__.py | 9 + .../applications/mobilenet_v3/__init__.py | 8 + .../keras/applications/nasnet/__init__.py | 10 + .../keras/applications/resnet/__init__.py | 11 + .../keras/applications/resnet50/__init__.py | 9 + .../keras/applications/resnet_v2/__init__.py | 11 + .../keras/applications/vgg16/__init__.py | 9 + .../keras/applications/vgg19/__init__.py | 9 + .../keras/applications/xception/__init__.py | 9 + keras/api/_tf_keras/keras/backend/__init__.py | 20 ++ .../api/_tf_keras/keras/callbacks/__init__.py | 21 ++ keras/api/_tf_keras/keras/config/__init__.py | 23 ++ .../_tf_keras/keras/constraints/__init__.py | 18 ++ .../api/_tf_keras/keras/datasets/__init__.py | 14 ++ .../keras/datasets/boston_housing/__init__.py | 7 + .../datasets/california_housing/__init__.py | 7 + .../keras/datasets/cifar10/__init__.py | 7 + .../keras/datasets/cifar100/__init__.py | 7 + .../keras/datasets/fashion_mnist/__init__.py | 7 + .../_tf_keras/keras/datasets/imdb/__init__.py | 8 + .../keras/datasets/mnist/__init__.py | 7 + .../keras/datasets/reuters/__init__.py | 9 + .../_tf_keras/keras/distribution/__init__.py | 16 ++ .../keras/dtype_policies/__init__.py | 10 + keras/api/_tf_keras/keras/export/__init__.py | 7 + .../_tf_keras/keras/initializers/__init__.py | 64 +++++ keras/api/_tf_keras/keras/layers/__init__.py | 195 +++++++++++++++ keras/api/_tf_keras/keras/legacy/__init__.py | 7 + .../_tf_keras/keras/legacy/saving/__init__.py | 8 + keras/api/_tf_keras/keras/losses/__init__.py | 50 ++++ keras/api/_tf_keras/keras/metrics/__init__.py | 76 ++++++ .../keras/mixed_precision/__init__.py | 15 ++ keras/api/_tf_keras/keras/models/__init__.py | 12 + keras/api/_tf_keras/keras/ops/__init__.py | 223 ++++++++++++++++++ .../api/_tf_keras/keras/ops/image/__init__.py | 13 + .../_tf_keras/keras/ops/linalg/__init__.py | 16 ++ keras/api/_tf_keras/keras/ops/nn/__init__.py | 38 +++ .../api/_tf_keras/keras/ops/numpy/__init__.py | 146 ++++++++++++ .../_tf_keras/keras/optimizers/__init__.py | 24 ++ .../keras/optimizers/legacy/__init__.py | 12 + .../keras/optimizers/schedules/__init__.py | 27 +++ .../_tf_keras/keras/preprocessing/__init__.py | 13 + .../keras/preprocessing/image/__init__.py | 11 + .../keras/preprocessing/sequence/__init__.py | 7 + .../_tf_keras/keras/quantizers/__init__.py | 15 ++ keras/api/_tf_keras/keras/random/__init__.py | 17 ++ .../_tf_keras/keras/regularizers/__init__.py | 20 ++ keras/api/_tf_keras/keras/saving/__init__.py | 20 ++ keras/api/_tf_keras/keras/tree/__init__.py | 15 ++ keras/api/_tf_keras/keras/utils/__init__.py | 54 +++++ .../_tf_keras/keras/utils/legacy/__init__.py | 8 + keras/api/activations/__init__.py | 29 +++ keras/api/applications/__init__.py | 63 +++++ keras/api/applications/convnext/__init__.py | 13 + keras/api/applications/densenet/__init__.py | 11 + .../api/applications/efficientnet/__init__.py | 16 ++ .../applications/efficientnet_v2/__init__.py | 15 ++ .../applications/imagenet_utils/__init__.py | 8 + .../inception_resnet_v2/__init__.py | 9 + .../api/applications/inception_v3/__init__.py | 9 + keras/api/applications/mobilenet/__init__.py | 9 + .../api/applications/mobilenet_v2/__init__.py | 9 + .../api/applications/mobilenet_v3/__init__.py | 8 + keras/api/applications/nasnet/__init__.py | 10 + keras/api/applications/resnet/__init__.py | 11 + keras/api/applications/resnet50/__init__.py | 9 + keras/api/applications/resnet_v2/__init__.py | 11 + keras/api/applications/vgg16/__init__.py | 9 + keras/api/applications/vgg19/__init__.py | 9 + keras/api/applications/xception/__init__.py | 9 + keras/api/backend/__init__.py | 20 ++ keras/api/callbacks/__init__.py | 21 ++ keras/api/config/__init__.py | 23 ++ keras/api/constraints/__init__.py | 18 ++ keras/api/datasets/__init__.py | 14 ++ keras/api/datasets/boston_housing/__init__.py | 7 + .../datasets/california_housing/__init__.py | 7 + keras/api/datasets/cifar10/__init__.py | 7 + keras/api/datasets/cifar100/__init__.py | 7 + keras/api/datasets/fashion_mnist/__init__.py | 7 + keras/api/datasets/imdb/__init__.py | 8 + keras/api/datasets/mnist/__init__.py | 7 + keras/api/datasets/reuters/__init__.py | 9 + keras/api/distribution/__init__.py | 16 ++ keras/api/dtype_policies/__init__.py | 10 + keras/api/export/__init__.py | 7 + keras/api/initializers/__init__.py | 64 +++++ keras/api/layers/__init__.py | 195 +++++++++++++++ keras/api/legacy/__init__.py | 7 + keras/api/legacy/saving/__init__.py | 8 + keras/api/losses/__init__.py | 50 ++++ keras/api/metrics/__init__.py | 76 ++++++ keras/api/mixed_precision/__init__.py | 15 ++ keras/api/models/__init__.py | 12 + keras/api/ops/__init__.py | 223 ++++++++++++++++++ keras/api/ops/image/__init__.py | 13 + keras/api/ops/linalg/__init__.py | 16 ++ keras/api/ops/nn/__init__.py | 38 +++ keras/api/ops/numpy/__init__.py | 146 ++++++++++++ keras/api/optimizers/__init__.py | 24 ++ keras/api/optimizers/legacy/__init__.py | 12 + keras/api/optimizers/schedules/__init__.py | 27 +++ keras/api/preprocessing/__init__.py | 13 + keras/api/preprocessing/image/__init__.py | 11 + keras/api/preprocessing/sequence/__init__.py | 7 + keras/api/quantizers/__init__.py | 15 ++ keras/api/random/__init__.py | 17 ++ keras/api/regularizers/__init__.py | 20 ++ keras/api/saving/__init__.py | 20 ++ keras/api/tree/__init__.py | 15 ++ keras/api/utils/__init__.py | 54 +++++ keras/api/utils/legacy/__init__.py | 8 + keras/backend/__init__.py | 45 ---- keras/backend/common/__init__.py | 10 - keras/backend/jax/__init__.py | 25 -- keras/backend/numpy/__init__.py | 21 -- keras/backend/tensorflow/__init__.py | 27 --- keras/backend/torch/__init__.py | 41 ---- keras/backend/torch/optimizers/__init__.py | 1 - keras/backend/torch/optimizers/torch_adamw.py | 6 - keras/callbacks/__init__.py | 15 -- keras/datasets/__init__.py | 10 - keras/distribution/__init__.py | 11 - keras/export/__init__.py | 1 - keras/layers/__init__.py | 161 ------------- keras/layers/activations/__init__.py | 5 - keras/models/__init__.py | 3 - keras/ops/__init__.py | 16 -- keras/optimizers/schedules/__init__.py | 10 - keras/random/__init__.py | 9 - keras/saving/__init__.py | 9 - keras/src/__init__.py | 19 ++ keras/{ => src}/activations/__init__.py | 42 ++-- keras/{ => src}/activations/activations.py | 6 +- .../{ => src}/activations/activations_test.py | 6 +- keras/{ => src}/api_export.py | 0 keras/{ => src}/applications/__init__.py | 0 .../applications/applications_test.py | 40 ++-- keras/{ => src}/applications/convnext.py | 24 +- keras/{ => src}/applications/densenet.py | 14 +- keras/{ => src}/applications/efficientnet.py | 14 +- .../{ => src}/applications/efficientnet_v2.py | 16 +- .../{ => src}/applications/imagenet_utils.py | 10 +- .../applications/imagenet_utils_test.py | 8 +- .../applications/inception_resnet_v2.py | 16 +- keras/{ => src}/applications/inception_v3.py | 14 +- keras/{ => src}/applications/mobilenet.py | 14 +- keras/{ => src}/applications/mobilenet_v2.py | 14 +- keras/{ => src}/applications/mobilenet_v3.py | 14 +- keras/{ => src}/applications/nasnet.py | 14 +- keras/{ => src}/applications/resnet.py | 14 +- keras/{ => src}/applications/resnet_v2.py | 6 +- keras/{ => src}/applications/vgg16.py | 14 +- keras/{ => src}/applications/vgg19.py | 14 +- keras/{ => src}/applications/xception.py | 14 +- keras/src/backend/__init__.py | 45 ++++ keras/src/backend/common/__init__.py | 10 + .../{ => src}/backend/common/backend_utils.py | 0 .../backend/common/backend_utils_test.py | 12 +- .../common/compute_output_spec_test.py | 4 +- keras/{ => src}/backend/common/dtypes.py | 6 +- keras/{ => src}/backend/common/dtypes_test.py | 12 +- .../{ => src}/backend/common/global_state.py | 6 +- .../backend/common/global_state_test.py | 6 +- .../{ => src}/backend/common/keras_tensor.py | 78 +++--- .../backend/common/keras_tensor_test.py | 72 +++--- keras/{ => src}/backend/common/name_scope.py | 2 +- .../backend/common/name_scope_test.py | 6 +- .../backend/common/stateless_scope.py | 12 +- .../backend/common/stateless_scope_test.py | 8 +- keras/{ => src}/backend/common/variables.py | 28 +-- .../backend/common/variables_test.py | 20 +- keras/{ => src}/backend/config.py | 2 +- keras/{ => src}/backend/exports.py | 6 +- keras/src/backend/jax/__init__.py | 25 ++ keras/{ => src}/backend/jax/core.py | 14 +- .../{ => src}/backend/jax/distribution_lib.py | 2 +- .../backend/jax/distribution_lib_test.py | 12 +- keras/{ => src}/backend/jax/image.py | 2 +- keras/{ => src}/backend/jax/layer.py | 0 keras/{ => src}/backend/jax/linalg.py | 10 +- keras/{ => src}/backend/jax/math.py | 12 +- keras/{ => src}/backend/jax/nn.py | 12 +- keras/{ => src}/backend/jax/numpy.py | 20 +- keras/{ => src}/backend/jax/optimizer.py | 2 +- keras/{ => src}/backend/jax/random.py | 8 +- keras/{ => src}/backend/jax/rnn.py | 4 +- keras/{ => src}/backend/jax/sparse.py | 2 +- keras/{ => src}/backend/jax/trainer.py | 22 +- keras/src/backend/numpy/__init__.py | 21 ++ keras/{ => src}/backend/numpy/core.py | 12 +- keras/{ => src}/backend/numpy/image.py | 4 +- keras/{ => src}/backend/numpy/layer.py | 0 keras/{ => src}/backend/numpy/linalg.py | 6 +- keras/{ => src}/backend/numpy/math.py | 12 +- keras/{ => src}/backend/numpy/nn.py | 16 +- keras/{ => src}/backend/numpy/numpy.py | 12 +- keras/{ => src}/backend/numpy/random.py | 10 +- keras/{ => src}/backend/numpy/rnn.py | 2 +- keras/{ => src}/backend/numpy/trainer.py | 20 +- keras/src/backend/tensorflow/__init__.py | 27 +++ keras/{ => src}/backend/tensorflow/core.py | 20 +- .../backend/tensorflow/distribute_test.py | 10 +- .../backend/tensorflow/distribution_lib.py | 0 keras/{ => src}/backend/tensorflow/image.py | 2 +- keras/{ => src}/backend/tensorflow/layer.py | 14 +- keras/{ => src}/backend/tensorflow/linalg.py | 10 +- keras/{ => src}/backend/tensorflow/math.py | 10 +- .../backend/tensorflow/name_scope_test.py | 4 +- keras/{ => src}/backend/tensorflow/nn.py | 12 +- keras/{ => src}/backend/tensorflow/numpy.py | 28 ++- .../{ => src}/backend/tensorflow/optimizer.py | 8 +- .../tensorflow/optimizer_distribute_test.py | 6 +- keras/{ => src}/backend/tensorflow/random.py | 10 +- keras/{ => src}/backend/tensorflow/rnn.py | 14 +- .../backend/tensorflow/saved_model_test.py | 14 +- keras/{ => src}/backend/tensorflow/sparse.py | 0 .../backend/tensorflow/tensorboard.py | 0 .../{ => src}/backend/tensorflow/trackable.py | 2 +- keras/{ => src}/backend/tensorflow/trainer.py | 18 +- .../backend/tests/compute_output_spec_test.py | 6 +- .../backend/tests/device_scope_test.py | 4 +- keras/src/backend/torch/__init__.py | 41 ++++ keras/{ => src}/backend/torch/core.py | 16 +- keras/{ => src}/backend/torch/image.py | 2 +- keras/{ => src}/backend/torch/layer.py | 8 +- keras/{ => src}/backend/torch/linalg.py | 10 +- keras/{ => src}/backend/torch/math.py | 14 +- keras/{ => src}/backend/torch/nn.py | 24 +- keras/{ => src}/backend/torch/numpy.py | 22 +- .../src/backend/torch/optimizers/__init__.py | 1 + .../torch/optimizers/torch_adadelta.py | 6 +- .../backend/torch/optimizers/torch_adagrad.py | 6 +- .../backend/torch/optimizers/torch_adam.py | 6 +- .../backend/torch/optimizers/torch_adamax.py | 6 +- .../backend/torch/optimizers/torch_adamw.py | 6 + .../backend/torch/optimizers/torch_lion.py | 6 +- .../backend/torch/optimizers/torch_nadam.py | 8 +- .../torch/optimizers/torch_optimizer.py | 24 +- .../optimizers/torch_parallel_optimizer.py | 4 +- .../backend/torch/optimizers/torch_rmsprop.py | 6 +- .../backend/torch/optimizers/torch_sgd.py | 4 +- keras/{ => src}/backend/torch/random.py | 14 +- keras/{ => src}/backend/torch/rnn.py | 4 +- keras/{ => src}/backend/torch/trainer.py | 18 +- keras/src/callbacks/__init__.py | 15 ++ .../{ => src}/callbacks/backup_and_restore.py | 6 +- .../callbacks/backup_and_restore_test.py | 10 +- keras/{ => src}/callbacks/callback.py | 4 +- keras/{ => src}/callbacks/callback_list.py | 10 +- keras/{ => src}/callbacks/callback_test.py | 6 +- keras/{ => src}/callbacks/csv_logger.py | 6 +- keras/{ => src}/callbacks/csv_logger_test.py | 12 +- keras/{ => src}/callbacks/early_stopping.py | 10 +- .../callbacks/early_stopping_test.py | 12 +- keras/{ => src}/callbacks/history.py | 4 +- keras/{ => src}/callbacks/lambda_callback.py | 4 +- .../callbacks/lambda_callback_test.py | 12 +- .../callbacks/learning_rate_scheduler.py | 8 +- .../callbacks/learning_rate_scheduler_test.py | 16 +- keras/{ => src}/callbacks/model_checkpoint.py | 10 +- .../callbacks/model_checkpoint_test.py | 18 +- keras/{ => src}/callbacks/progbar_logger.py | 8 +- .../callbacks/reduce_lr_on_plateau.py | 8 +- .../callbacks/reduce_lr_on_plateau_test.py | 16 +- keras/{ => src}/callbacks/remote_monitor.py | 4 +- .../callbacks/remote_monitor_test.py | 12 +- keras/{ => src}/callbacks/swap_ema_weights.py | 8 +- .../callbacks/swap_ema_weights_test.py | 22 +- keras/{ => src}/callbacks/tensorboard.py | 16 +- keras/{ => src}/callbacks/tensorboard_test.py | 18 +- keras/{ => src}/callbacks/terminate_on_nan.py | 6 +- .../callbacks/terminate_on_nan_test.py | 12 +- keras/{ => src}/constraints/__init__.py | 16 +- keras/{ => src}/constraints/constraints.py | 6 +- .../{ => src}/constraints/constraints_test.py | 6 +- keras/src/datasets/__init__.py | 10 + keras/{ => src}/datasets/boston_housing.py | 4 +- .../{ => src}/datasets/california_housing.py | 4 +- keras/{ => src}/datasets/cifar.py | 0 keras/{ => src}/datasets/cifar10.py | 8 +- keras/{ => src}/datasets/cifar100.py | 8 +- keras/{ => src}/datasets/fashion_mnist.py | 4 +- keras/{ => src}/datasets/imdb.py | 6 +- keras/{ => src}/datasets/mnist.py | 4 +- keras/{ => src}/datasets/reuters.py | 6 +- keras/src/distribution/__init__.py | 11 + .../distribution/distribution_lib.py | 12 +- .../distribution/distribution_lib_test.py | 8 +- keras/{ => src}/dtype_policies/__init__.py | 16 +- .../{ => src}/dtype_policies/dtype_policy.py | 8 +- .../dtype_policies/dtype_policy_test.py | 18 +- keras/src/export/__init__.py | 1 + keras/{ => src}/export/export_lib.py | 24 +- keras/{ => src}/export/export_lib_test.py | 22 +- keras/{ => src}/initializers/__init__.py | 38 +-- .../initializers/constant_initializers.py | 10 +- .../constant_initializers_test.py | 6 +- keras/{ => src}/initializers/initializer.py | 2 +- .../initializers/random_initializers.py | 10 +- .../initializers/random_initializers_test.py | 8 +- keras/src/layers/__init__.py | 175 ++++++++++++++ keras/src/layers/activations/__init__.py | 5 + .../layers/activations/activation.py | 6 +- .../layers/activations/activation_test.py | 6 +- keras/{ => src}/layers/activations/elu.py | 6 +- .../{ => src}/layers/activations/elu_test.py | 4 +- .../layers/activations/leaky_relu.py | 6 +- .../layers/activations/leaky_relu_test.py | 4 +- keras/{ => src}/layers/activations/prelu.py | 14 +- .../layers/activations/prelu_test.py | 4 +- keras/{ => src}/layers/activations/relu.py | 6 +- .../{ => src}/layers/activations/relu_test.py | 4 +- keras/{ => src}/layers/activations/softmax.py | 8 +- .../layers/activations/softmax_test.py | 4 +- keras/{ => src}/layers/attention/__init__.py | 0 .../layers/attention/additive_attention.py | 6 +- .../attention/additive_attention_test.py | 4 +- keras/{ => src}/layers/attention/attention.py | 8 +- .../layers/attention/attention_test.py | 6 +- .../attention/grouped_query_attention.py | 18 +- .../attention/grouped_query_attention_test.py | 8 +- .../layers/attention/multi_head_attention.py | 20 +- .../attention/multi_head_attention_test.py | 14 +- .../layers/convolutional/__init__.py | 0 .../layers/convolutional/base_conv.py | 22 +- .../convolutional/base_conv_transpose.py | 22 +- .../convolutional/base_depthwise_conv.py | 22 +- .../convolutional/base_separable_conv.py | 22 +- .../{ => src}/layers/convolutional/conv1d.py | 6 +- .../layers/convolutional/conv1d_transpose.py | 4 +- .../{ => src}/layers/convolutional/conv2d.py | 4 +- .../layers/convolutional/conv2d_transpose.py | 4 +- .../{ => src}/layers/convolutional/conv3d.py | 4 +- .../layers/convolutional/conv3d_transpose.py | 4 +- .../layers/convolutional/conv_test.py | 12 +- .../convolutional/conv_transpose_test.py | 12 +- .../layers/convolutional/depthwise_conv1d.py | 4 +- .../layers/convolutional/depthwise_conv2d.py | 4 +- .../convolutional/depthwise_conv_test.py | 4 +- .../layers/convolutional/separable_conv1d.py | 4 +- .../layers/convolutional/separable_conv2d.py | 4 +- .../convolutional/separable_conv_test.py | 16 +- keras/{ => src}/layers/core/__init__.py | 0 keras/{ => src}/layers/core/dense.py | 22 +- keras/{ => src}/layers/core/dense_test.py | 24 +- keras/{ => src}/layers/core/einsum_dense.py | 22 +- .../layers/core/einsum_dense_test.py | 22 +- keras/{ => src}/layers/core/embedding.py | 18 +- keras/{ => src}/layers/core/embedding_test.py | 16 +- keras/{ => src}/layers/core/identity.py | 8 +- keras/{ => src}/layers/core/identity_test.py | 6 +- keras/{ => src}/layers/core/input_layer.py | 8 +- .../{ => src}/layers/core/input_layer_test.py | 8 +- keras/{ => src}/layers/core/lambda_layer.py | 12 +- .../layers/core/lambda_layer_test.py | 6 +- keras/{ => src}/layers/core/masking.py | 8 +- keras/{ => src}/layers/core/masking_test.py | 6 +- keras/{ => src}/layers/core/wrapper.py | 6 +- keras/{ => src}/layers/core/wrapper_test.py | 6 +- keras/{ => src}/layers/input_spec.py | 6 +- keras/{ => src}/layers/layer.py | 46 ++-- keras/{ => src}/layers/layer_test.py | 14 +- keras/{ => src}/layers/merging/__init__.py | 0 keras/{ => src}/layers/merging/add.py | 6 +- keras/{ => src}/layers/merging/average.py | 6 +- keras/{ => src}/layers/merging/base_merge.py | 8 +- keras/{ => src}/layers/merging/concatenate.py | 6 +- keras/{ => src}/layers/merging/dot.py | 8 +- keras/{ => src}/layers/merging/maximum.py | 6 +- .../{ => src}/layers/merging/merging_test.py | 8 +- keras/{ => src}/layers/merging/minimum.py | 6 +- keras/{ => src}/layers/merging/multiply.py | 6 +- keras/{ => src}/layers/merging/subtract.py | 6 +- .../layers/normalization/__init__.py | 0 .../normalization/batch_normalization.py | 18 +- .../normalization/batch_normalization_test.py | 12 +- .../normalization/group_normalization.py | 14 +- .../normalization/group_normalization_test.py | 8 +- .../normalization/layer_normalization.py | 12 +- .../normalization/layer_normalization_test.py | 10 +- .../normalization/spectral_normalization.py | 12 +- .../spectral_normalization_test.py | 10 +- .../normalization/unit_normalization.py | 6 +- .../normalization/unit_normalization_test.py | 6 +- keras/{ => src}/layers/pooling/__init__.py | 0 .../layers/pooling/average_pooling1d.py | 4 +- .../layers/pooling/average_pooling2d.py | 4 +- .../layers/pooling/average_pooling3d.py | 4 +- .../layers/pooling/average_pooling_test.py | 6 +- .../layers/pooling/base_global_pooling.py | 6 +- .../{ => src}/layers/pooling/base_pooling.py | 12 +- .../pooling/global_average_pooling1d.py | 8 +- .../pooling/global_average_pooling2d.py | 6 +- .../pooling/global_average_pooling3d.py | 6 +- .../pooling/global_average_pooling_test.py | 4 +- .../layers/pooling/global_max_pooling1d.py | 6 +- .../layers/pooling/global_max_pooling2d.py | 6 +- .../layers/pooling/global_max_pooling3d.py | 6 +- .../layers/pooling/global_max_pooling_test.py | 4 +- .../{ => src}/layers/pooling/max_pooling1d.py | 4 +- .../{ => src}/layers/pooling/max_pooling2d.py | 4 +- .../{ => src}/layers/pooling/max_pooling3d.py | 4 +- .../layers/pooling/max_pooling_test.py | 4 +- .../layers/preprocessing/__init__.py | 0 .../preprocessing/audio_preprocessing.py | 4 +- .../preprocessing/audio_preprocessing_test.py | 4 +- .../layers/preprocessing/category_encoding.py | 8 +- .../preprocessing/category_encoding_test.py | 6 +- .../layers/preprocessing/center_crop.py | 8 +- .../layers/preprocessing/center_crop_test.py | 6 +- .../layers/preprocessing/discretization.py | 12 +- .../preprocessing/discretization_test.py | 12 +- .../layers/preprocessing/feature_space.py | 22 +- .../preprocessing/feature_space_test.py | 14 +- .../layers/preprocessing/hashed_crossing.py | 14 +- .../preprocessing/hashed_crossing_test.py | 6 +- .../{ => src}/layers/preprocessing/hashing.py | 12 +- .../layers/preprocessing/hashing_test.py | 10 +- .../layers/preprocessing/index_lookup.py | 10 +- .../layers/preprocessing/index_lookup_test.py | 10 +- .../layers/preprocessing/integer_lookup.py | 10 +- .../preprocessing/integer_lookup_test.py | 6 +- .../layers/preprocessing/normalization.py | 10 +- .../preprocessing/normalization_test.py | 8 +- .../layers/preprocessing/random_brightness.py | 6 +- .../preprocessing/random_brightness_test.py | 6 +- .../layers/preprocessing/random_contrast.py | 6 +- .../preprocessing/random_contrast_test.py | 6 +- .../layers/preprocessing/random_crop.py | 10 +- .../layers/preprocessing/random_crop_test.py | 6 +- .../layers/preprocessing/random_flip.py | 6 +- .../layers/preprocessing/random_flip_test.py | 8 +- .../layers/preprocessing/random_rotation.py | 8 +- .../preprocessing/random_rotation_test.py | 6 +- .../preprocessing/random_translation.py | 8 +- .../preprocessing/random_translation_test.py | 6 +- .../layers/preprocessing/random_zoom.py | 8 +- .../layers/preprocessing/random_zoom_test.py | 8 +- .../layers/preprocessing/rescaling.py | 6 +- .../layers/preprocessing/rescaling_test.py | 6 +- .../layers/preprocessing/resizing.py | 6 +- .../layers/preprocessing/resizing_test.py | 8 +- .../layers/preprocessing/string_lookup.py | 10 +- .../preprocessing/string_lookup_test.py | 6 +- .../preprocessing/text_vectorization.py | 20 +- .../preprocessing/text_vectorization_test.py | 12 +- .../layers/preprocessing/tf_data_layer.py | 12 +- .../layers/regularization/__init__.py | 0 .../regularization/activity_regularization.py | 6 +- .../activity_regularization_test.py | 4 +- .../layers/regularization/alpha_dropout.py | 8 +- .../regularization/alpha_dropout_test.py | 6 +- .../layers/regularization/dropout.py | 6 +- .../layers/regularization/dropout_test.py | 6 +- .../layers/regularization/gaussian_dropout.py | 8 +- .../regularization/gaussian_dropout_test.py | 6 +- .../layers/regularization/gaussian_noise.py | 8 +- .../regularization/gaussian_noise_test.py | 6 +- .../layers/regularization/spatial_dropout.py | 10 +- .../regularization/spatial_dropout_test.py | 6 +- keras/{ => src}/layers/reshaping/__init__.py | 0 .../{ => src}/layers/reshaping/cropping1d.py | 8 +- .../layers/reshaping/cropping1d_test.py | 6 +- .../{ => src}/layers/reshaping/cropping2d.py | 10 +- .../layers/reshaping/cropping2d_test.py | 8 +- .../{ => src}/layers/reshaping/cropping3d.py | 10 +- .../layers/reshaping/cropping3d_test.py | 8 +- keras/{ => src}/layers/reshaping/flatten.py | 12 +- .../layers/reshaping/flatten_test.py | 8 +- keras/{ => src}/layers/reshaping/permute.py | 10 +- .../layers/reshaping/permute_test.py | 8 +- .../layers/reshaping/repeat_vector.py | 8 +- .../layers/reshaping/repeat_vector_test.py | 6 +- keras/{ => src}/layers/reshaping/reshape.py | 10 +- .../layers/reshaping/reshape_test.py | 8 +- .../layers/reshaping/up_sampling1d.py | 8 +- .../layers/reshaping/up_sampling1d_test.py | 6 +- .../layers/reshaping/up_sampling2d.py | 12 +- .../layers/reshaping/up_sampling2d_test.py | 6 +- .../layers/reshaping/up_sampling3d.py | 12 +- .../layers/reshaping/up_sampling3d_test.py | 6 +- .../layers/reshaping/zero_padding1d.py | 10 +- .../layers/reshaping/zero_padding1d_test.py | 4 +- .../layers/reshaping/zero_padding2d.py | 12 +- .../layers/reshaping/zero_padding2d_test.py | 6 +- .../layers/reshaping/zero_padding3d.py | 12 +- .../layers/reshaping/zero_padding3d_test.py | 6 +- keras/{ => src}/layers/rnn/__init__.py | 0 keras/{ => src}/layers/rnn/bidirectional.py | 12 +- .../layers/rnn/bidirectional_test.py | 6 +- keras/{ => src}/layers/rnn/conv_lstm.py | 26 +- keras/{ => src}/layers/rnn/conv_lstm1d.py | 4 +- .../{ => src}/layers/rnn/conv_lstm1d_test.py | 8 +- keras/{ => src}/layers/rnn/conv_lstm2d.py | 4 +- .../{ => src}/layers/rnn/conv_lstm2d_test.py | 8 +- keras/{ => src}/layers/rnn/conv_lstm3d.py | 4 +- .../{ => src}/layers/rnn/conv_lstm3d_test.py | 8 +- keras/{ => src}/layers/rnn/conv_lstm_test.py | 10 +- .../{ => src}/layers/rnn/dropout_rnn_cell.py | 4 +- .../layers/rnn/dropout_rnn_cell_test.py | 10 +- keras/{ => src}/layers/rnn/gru.py | 24 +- keras/{ => src}/layers/rnn/gru_test.py | 6 +- keras/{ => src}/layers/rnn/lstm.py | 24 +- keras/{ => src}/layers/rnn/lstm_test.py | 6 +- keras/{ => src}/layers/rnn/rnn.py | 22 +- keras/{ => src}/layers/rnn/rnn_test.py | 6 +- keras/{ => src}/layers/rnn/simple_rnn.py | 22 +- keras/{ => src}/layers/rnn/simple_rnn_test.py | 6 +- .../{ => src}/layers/rnn/stacked_rnn_cells.py | 10 +- .../layers/rnn/stacked_rnn_cells_test.py | 8 +- .../{ => src}/layers/rnn/time_distributed.py | 10 +- .../layers/rnn/time_distributed_test.py | 10 +- keras/{ => src}/legacy/__init__.py | 0 keras/{ => src}/legacy/backend.py | 6 +- keras/{ => src}/legacy/layers.py | 8 +- keras/{ => src}/legacy/losses.py | 2 +- .../legacy/preprocessing/__init__.py | 0 keras/{ => src}/legacy/preprocessing/image.py | 12 +- .../legacy/preprocessing/sequence.py | 4 +- keras/{ => src}/legacy/preprocessing/text.py | 2 +- keras/{ => src}/legacy/saving/__init__.py | 0 keras/{ => src}/legacy/saving/json_utils.py | 6 +- .../legacy/saving/json_utils_test.py | 6 +- .../legacy/saving/legacy_h5_format.py | 18 +- .../legacy/saving/legacy_h5_format_test.py | 16 +- .../{ => src}/legacy/saving/saving_options.py | 2 +- keras/{ => src}/legacy/saving/saving_utils.py | 20 +- .../{ => src}/legacy/saving/serialization.py | 4 +- keras/{ => src}/losses/__init__.py | 86 +++---- keras/{ => src}/losses/loss.py | 10 +- keras/{ => src}/losses/loss_test.py | 12 +- keras/{ => src}/losses/losses.py | 14 +- keras/{ => src}/losses/losses_test.py | 6 +- keras/{ => src}/metrics/__init__.py | 96 ++++---- keras/{ => src}/metrics/accuracy_metrics.py | 10 +- .../metrics/accuracy_metrics_test.py | 4 +- keras/{ => src}/metrics/confusion_metrics.py | 16 +- .../metrics/confusion_metrics_test.py | 12 +- keras/{ => src}/metrics/f_score_metrics.py | 10 +- .../{ => src}/metrics/f_score_metrics_test.py | 4 +- keras/{ => src}/metrics/hinge_metrics.py | 10 +- keras/{ => src}/metrics/hinge_metrics_test.py | 4 +- keras/{ => src}/metrics/iou_metrics.py | 12 +- keras/{ => src}/metrics/iou_metrics_test.py | 8 +- keras/{ => src}/metrics/metric.py | 12 +- keras/{ => src}/metrics/metric_test.py | 12 +- keras/{ => src}/metrics/metrics_utils.py | 8 +- .../metrics/probabilistic_metrics.py | 14 +- .../metrics/probabilistic_metrics_test.py | 4 +- keras/{ => src}/metrics/reduction_metrics.py | 14 +- .../metrics/reduction_metrics_test.py | 6 +- keras/{ => src}/metrics/regression_metrics.py | 22 +- .../metrics/regression_metrics_test.py | 4 +- keras/src/models/__init__.py | 3 + keras/{ => src}/models/cloning.py | 20 +- keras/{ => src}/models/cloning_test.py | 10 +- keras/{ => src}/models/functional.py | 36 +-- keras/{ => src}/models/functional_test.py | 14 +- keras/{ => src}/models/model.py | 46 ++-- keras/{ => src}/models/model_test.py | 14 +- keras/{ => src}/models/sequential.py | 20 +- keras/{ => src}/models/sequential_test.py | 12 +- keras/{ => src}/models/variable_mapping.py | 8 +- .../{ => src}/models/variable_mapping_test.py | 4 +- keras/src/ops/__init__.py | 16 ++ keras/{ => src}/ops/core.py | 14 +- keras/{ => src}/ops/core_test.py | 22 +- keras/{ => src}/ops/function.py | 10 +- keras/{ => src}/ops/function_test.py | 14 +- keras/{ => src}/ops/image.py | 16 +- keras/{ => src}/ops/image_test.py | 8 +- keras/{ => src}/ops/linalg.py | 12 +- keras/{ => src}/ops/linalg_test.py | 12 +- keras/{ => src}/ops/math.py | 12 +- keras/{ => src}/ops/math_test.py | 10 +- keras/{ => src}/ops/nn.py | 18 +- keras/{ => src}/ops/nn_test.py | 52 ++-- keras/{ => src}/ops/node.py | 6 +- keras/{ => src}/ops/node_test.py | 8 +- keras/{ => src}/ops/numpy.py | 32 +-- keras/{ => src}/ops/numpy_test.py | 14 +- keras/{ => src}/ops/operation.py | 20 +- keras/{ => src}/ops/operation_test.py | 10 +- keras/{ => src}/ops/operation_utils.py | 8 +- keras/{ => src}/ops/operation_utils_test.py | 10 +- keras/{ => src}/ops/symbolic_arguments.py | 4 +- .../{ => src}/ops/symbolic_arguments_test.py | 8 +- keras/{ => src}/optimizers/__init__.py | 30 +-- keras/{ => src}/optimizers/adadelta.py | 6 +- keras/{ => src}/optimizers/adadelta_test.py | 8 +- keras/{ => src}/optimizers/adafactor.py | 8 +- keras/{ => src}/optimizers/adafactor_test.py | 6 +- keras/{ => src}/optimizers/adagrad.py | 8 +- keras/{ => src}/optimizers/adagrad_test.py | 8 +- keras/{ => src}/optimizers/adam.py | 6 +- keras/{ => src}/optimizers/adam_test.py | 8 +- keras/{ => src}/optimizers/adamax.py | 6 +- keras/{ => src}/optimizers/adamax_test.py | 8 +- keras/{ => src}/optimizers/adamw.py | 6 +- keras/{ => src}/optimizers/adamw_test.py | 8 +- keras/{ => src}/optimizers/base_optimizer.py | 14 +- keras/{ => src}/optimizers/ftrl.py | 8 +- keras/{ => src}/optimizers/ftrl_test.py | 6 +- keras/{ => src}/optimizers/lion.py | 6 +- keras/{ => src}/optimizers/lion_test.py | 8 +- .../optimizers/loss_scale_optimizer.py | 16 +- .../optimizers/loss_scale_optimizer_test.py | 10 +- keras/{ => src}/optimizers/nadam.py | 8 +- keras/{ => src}/optimizers/nadam_test.py | 8 +- keras/{ => src}/optimizers/optimizer.py | 12 +- .../optimizers/optimizer_sparse_test.py | 8 +- keras/{ => src}/optimizers/optimizer_test.py | 12 +- keras/{ => src}/optimizers/rmsprop.py | 6 +- keras/{ => src}/optimizers/rmsprop_test.py | 8 +- keras/src/optimizers/schedules/__init__.py | 16 ++ .../schedules/learning_rate_schedule.py | 6 +- .../schedules/learning_rate_schedule_test.py | 12 +- keras/{ => src}/optimizers/sgd.py | 6 +- keras/{ => src}/optimizers/sgd_test.py | 8 +- keras/{ => src}/quantizers/__init__.py | 18 +- keras/{ => src}/quantizers/quantizers.py | 6 +- keras/{ => src}/quantizers/quantizers_test.py | 8 +- keras/src/random/__init__.py | 9 + keras/{ => src}/random/random.py | 4 +- keras/{ => src}/random/random_test.py | 20 +- keras/{ => src}/random/seed_generator.py | 12 +- keras/{ => src}/random/seed_generator_test.py | 8 +- keras/{ => src}/regularizers/__init__.py | 16 +- keras/{ => src}/regularizers/regularizers.py | 6 +- .../regularizers/regularizers_test.py | 8 +- keras/src/saving/__init__.py | 9 + keras/{ => src}/saving/object_registration.py | 4 +- .../saving/object_registration_test.py | 6 +- keras/{ => src}/saving/saving_api.py | 10 +- keras/{ => src}/saving/saving_api_test.py | 10 +- keras/{ => src}/saving/saving_lib.py | 30 +-- keras/{ => src}/saving/saving_lib_test.py | 8 +- keras/{ => src}/saving/serialization_lib.py | 14 +- .../saving/serialization_lib_test.py | 6 +- keras/src/testing/__init__.py | 5 + keras/{ => src}/testing/test_case.py | 30 +-- keras/{ => src}/testing/test_utils.py | 0 keras/{ => src}/testing/test_utils_test.py | 4 +- keras/{ => src}/trainers/__init__.py | 0 keras/{ => src}/trainers/compile_utils.py | 12 +- .../{ => src}/trainers/compile_utils_test.py | 14 +- .../trainers/data_adapters/__init__.py | 16 +- .../data_adapters/array_data_adapter.py | 14 +- .../data_adapters/array_data_adapter_test.py | 8 +- .../trainers/data_adapters/array_slicing.py | 24 +- .../trainers/data_adapters/data_adapter.py | 0 .../data_adapters/data_adapter_utils.py | 16 +- .../data_adapters/generator_data_adapter.py | 10 +- .../generator_data_adapter_test.py | 6 +- .../data_adapters/py_dataset_adapter.py | 8 +- .../data_adapters/py_dataset_adapter_test.py | 8 +- .../data_adapters/tf_dataset_adapter.py | 18 +- .../data_adapters/tf_dataset_adapter_test.py | 6 +- .../torch_data_loader_adapter.py | 8 +- .../torch_data_loader_adapter_test.py | 6 +- keras/{ => src}/trainers/epoch_iterator.py | 2 +- .../{ => src}/trainers/epoch_iterator_test.py | 8 +- keras/{ => src}/trainers/trainer.py | 26 +- keras/{ => src}/trainers/trainer_test.py | 38 +-- keras/src/tree/__init__.py | 10 + keras/{ => src}/tree/dmtree_impl.py | 2 +- keras/{ => src}/tree/optree_impl.py | 2 +- keras/{ => src}/tree/tree_api.py | 10 +- keras/{ => src}/tree/tree_test.py | 6 +- keras/src/utils/__init__.py | 26 ++ keras/{ => src}/utils/argument_validation.py | 0 keras/{ => src}/utils/audio_dataset_utils.py | 8 +- .../utils/audio_dataset_utils_test.py | 6 +- keras/{ => src}/utils/backend_utils.py | 18 +- keras/{ => src}/utils/code_stats.py | 0 keras/{ => src}/utils/code_stats_test.py | 4 +- keras/{ => src}/utils/dataset_utils.py | 10 +- keras/{ => src}/utils/dataset_utils_test.py | 6 +- keras/{ => src}/utils/dtype_utils.py | 4 +- keras/{ => src}/utils/dtype_utils_test.py | 6 +- keras/{ => src}/utils/file_utils.py | 10 +- keras/{ => src}/utils/file_utils_test.py | 4 +- keras/{ => src}/utils/image_dataset_utils.py | 12 +- .../utils/image_dataset_utils_test.py | 10 +- keras/{ => src}/utils/image_utils.py | 4 +- keras/{ => src}/utils/io_utils.py | 4 +- keras/{ => src}/utils/io_utils_test.py | 4 +- keras/{ => src}/utils/jax_layer.py | 16 +- keras/{ => src}/utils/jax_layer_test.py | 24 +- keras/{ => src}/utils/jax_utils.py | 2 +- keras/{ => src}/utils/model_visualization.py | 14 +- keras/{ => src}/utils/module_utils.py | 0 keras/{ => src}/utils/naming.py | 4 +- keras/{ => src}/utils/naming_test.py | 4 +- keras/{ => src}/utils/numerical_utils.py | 6 +- keras/{ => src}/utils/numerical_utils_test.py | 6 +- keras/{ => src}/utils/progbar.py | 6 +- keras/{ => src}/utils/python_utils.py | 0 keras/{ => src}/utils/python_utils_test.py | 4 +- keras/{ => src}/utils/rng_utils.py | 6 +- keras/{ => src}/utils/rng_utils_test.py | 6 +- keras/{ => src}/utils/sequence_utils.py | 2 +- keras/{ => src}/utils/sequence_utils_test.py | 4 +- keras/{ => src}/utils/summary_utils.py | 12 +- keras/{ => src}/utils/summary_utils_test.py | 8 +- keras/{ => src}/utils/text_dataset_utils.py | 6 +- .../utils/text_dataset_utils_test.py | 4 +- keras/{ => src}/utils/tf_utils.py | 2 +- .../utils/timeseries_dataset_utils.py | 4 +- .../utils/timeseries_dataset_utils_test.py | 4 +- keras/{ => src}/utils/torch_utils.py | 14 +- keras/{ => src}/utils/torch_utils_test.py | 12 +- keras/{ => src}/utils/traceback_utils.py | 8 +- keras/{ => src}/utils/tracking.py | 12 +- keras/{ => src}/utils/tracking_test.py | 6 +- keras/{ => src}/version.py | 2 +- keras/testing/__init__.py | 5 - keras/tree/__init__.py | 10 - keras/utils/__init__.py | 24 -- pip_build.py | 138 +---------- setup.py | 5 +- shell/api_gen.sh | 12 + 754 files changed, 6889 insertions(+), 3479 deletions(-) create mode 100644 api_gen.py create mode 100644 keras/api/__init__.py create mode 100644 keras/api/_tf_keras/__init__.py create mode 100644 keras/api/_tf_keras/keras/__init__.py create mode 100644 keras/api/_tf_keras/keras/activations/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/convnext/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/densenet/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/efficientnet/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/efficientnet_v2/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/imagenet_utils/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/inception_resnet_v2/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/inception_v3/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/mobilenet/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/mobilenet_v2/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/mobilenet_v3/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/nasnet/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/resnet/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/resnet50/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/resnet_v2/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/vgg16/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/vgg19/__init__.py create mode 100644 keras/api/_tf_keras/keras/applications/xception/__init__.py create mode 100644 keras/api/_tf_keras/keras/backend/__init__.py create mode 100644 keras/api/_tf_keras/keras/callbacks/__init__.py create mode 100644 keras/api/_tf_keras/keras/config/__init__.py create mode 100644 keras/api/_tf_keras/keras/constraints/__init__.py create mode 100644 keras/api/_tf_keras/keras/datasets/__init__.py create mode 100644 keras/api/_tf_keras/keras/datasets/boston_housing/__init__.py create mode 100644 keras/api/_tf_keras/keras/datasets/california_housing/__init__.py create mode 100644 keras/api/_tf_keras/keras/datasets/cifar10/__init__.py create mode 100644 keras/api/_tf_keras/keras/datasets/cifar100/__init__.py create mode 100644 keras/api/_tf_keras/keras/datasets/fashion_mnist/__init__.py create mode 100644 keras/api/_tf_keras/keras/datasets/imdb/__init__.py create mode 100644 keras/api/_tf_keras/keras/datasets/mnist/__init__.py create mode 100644 keras/api/_tf_keras/keras/datasets/reuters/__init__.py create mode 100644 keras/api/_tf_keras/keras/distribution/__init__.py create mode 100644 keras/api/_tf_keras/keras/dtype_policies/__init__.py create mode 100644 keras/api/_tf_keras/keras/export/__init__.py create mode 100644 keras/api/_tf_keras/keras/initializers/__init__.py create mode 100644 keras/api/_tf_keras/keras/layers/__init__.py create mode 100644 keras/api/_tf_keras/keras/legacy/__init__.py create mode 100644 keras/api/_tf_keras/keras/legacy/saving/__init__.py create mode 100644 keras/api/_tf_keras/keras/losses/__init__.py create mode 100644 keras/api/_tf_keras/keras/metrics/__init__.py create mode 100644 keras/api/_tf_keras/keras/mixed_precision/__init__.py create mode 100644 keras/api/_tf_keras/keras/models/__init__.py create mode 100644 keras/api/_tf_keras/keras/ops/__init__.py create mode 100644 keras/api/_tf_keras/keras/ops/image/__init__.py create mode 100644 keras/api/_tf_keras/keras/ops/linalg/__init__.py create mode 100644 keras/api/_tf_keras/keras/ops/nn/__init__.py create mode 100644 keras/api/_tf_keras/keras/ops/numpy/__init__.py create mode 100644 keras/api/_tf_keras/keras/optimizers/__init__.py create mode 100644 keras/api/_tf_keras/keras/optimizers/legacy/__init__.py create mode 100644 keras/api/_tf_keras/keras/optimizers/schedules/__init__.py create mode 100644 keras/api/_tf_keras/keras/preprocessing/__init__.py create mode 100644 keras/api/_tf_keras/keras/preprocessing/image/__init__.py create mode 100644 keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py create mode 100644 keras/api/_tf_keras/keras/quantizers/__init__.py create mode 100644 keras/api/_tf_keras/keras/random/__init__.py create mode 100644 keras/api/_tf_keras/keras/regularizers/__init__.py create mode 100644 keras/api/_tf_keras/keras/saving/__init__.py create mode 100644 keras/api/_tf_keras/keras/tree/__init__.py create mode 100644 keras/api/_tf_keras/keras/utils/__init__.py create mode 100644 keras/api/_tf_keras/keras/utils/legacy/__init__.py create mode 100644 keras/api/activations/__init__.py create mode 100644 keras/api/applications/__init__.py create mode 100644 keras/api/applications/convnext/__init__.py create mode 100644 keras/api/applications/densenet/__init__.py create mode 100644 keras/api/applications/efficientnet/__init__.py create mode 100644 keras/api/applications/efficientnet_v2/__init__.py create mode 100644 keras/api/applications/imagenet_utils/__init__.py create mode 100644 keras/api/applications/inception_resnet_v2/__init__.py create mode 100644 keras/api/applications/inception_v3/__init__.py create mode 100644 keras/api/applications/mobilenet/__init__.py create mode 100644 keras/api/applications/mobilenet_v2/__init__.py create mode 100644 keras/api/applications/mobilenet_v3/__init__.py create mode 100644 keras/api/applications/nasnet/__init__.py create mode 100644 keras/api/applications/resnet/__init__.py create mode 100644 keras/api/applications/resnet50/__init__.py create mode 100644 keras/api/applications/resnet_v2/__init__.py create mode 100644 keras/api/applications/vgg16/__init__.py create mode 100644 keras/api/applications/vgg19/__init__.py create mode 100644 keras/api/applications/xception/__init__.py create mode 100644 keras/api/backend/__init__.py create mode 100644 keras/api/callbacks/__init__.py create mode 100644 keras/api/config/__init__.py create mode 100644 keras/api/constraints/__init__.py create mode 100644 keras/api/datasets/__init__.py create mode 100644 keras/api/datasets/boston_housing/__init__.py create mode 100644 keras/api/datasets/california_housing/__init__.py create mode 100644 keras/api/datasets/cifar10/__init__.py create mode 100644 keras/api/datasets/cifar100/__init__.py create mode 100644 keras/api/datasets/fashion_mnist/__init__.py create mode 100644 keras/api/datasets/imdb/__init__.py create mode 100644 keras/api/datasets/mnist/__init__.py create mode 100644 keras/api/datasets/reuters/__init__.py create mode 100644 keras/api/distribution/__init__.py create mode 100644 keras/api/dtype_policies/__init__.py create mode 100644 keras/api/export/__init__.py create mode 100644 keras/api/initializers/__init__.py create mode 100644 keras/api/layers/__init__.py create mode 100644 keras/api/legacy/__init__.py create mode 100644 keras/api/legacy/saving/__init__.py create mode 100644 keras/api/losses/__init__.py create mode 100644 keras/api/metrics/__init__.py create mode 100644 keras/api/mixed_precision/__init__.py create mode 100644 keras/api/models/__init__.py create mode 100644 keras/api/ops/__init__.py create mode 100644 keras/api/ops/image/__init__.py create mode 100644 keras/api/ops/linalg/__init__.py create mode 100644 keras/api/ops/nn/__init__.py create mode 100644 keras/api/ops/numpy/__init__.py create mode 100644 keras/api/optimizers/__init__.py create mode 100644 keras/api/optimizers/legacy/__init__.py create mode 100644 keras/api/optimizers/schedules/__init__.py create mode 100644 keras/api/preprocessing/__init__.py create mode 100644 keras/api/preprocessing/image/__init__.py create mode 100644 keras/api/preprocessing/sequence/__init__.py create mode 100644 keras/api/quantizers/__init__.py create mode 100644 keras/api/random/__init__.py create mode 100644 keras/api/regularizers/__init__.py create mode 100644 keras/api/saving/__init__.py create mode 100644 keras/api/tree/__init__.py create mode 100644 keras/api/utils/__init__.py create mode 100644 keras/api/utils/legacy/__init__.py delete mode 100644 keras/backend/__init__.py delete mode 100644 keras/backend/common/__init__.py delete mode 100644 keras/backend/jax/__init__.py delete mode 100644 keras/backend/numpy/__init__.py delete mode 100644 keras/backend/tensorflow/__init__.py delete mode 100644 keras/backend/torch/__init__.py delete mode 100644 keras/backend/torch/optimizers/__init__.py delete mode 100644 keras/backend/torch/optimizers/torch_adamw.py delete mode 100644 keras/callbacks/__init__.py delete mode 100644 keras/datasets/__init__.py delete mode 100644 keras/distribution/__init__.py delete mode 100644 keras/export/__init__.py delete mode 100644 keras/layers/__init__.py delete mode 100644 keras/layers/activations/__init__.py delete mode 100644 keras/models/__init__.py delete mode 100644 keras/ops/__init__.py delete mode 100644 keras/optimizers/schedules/__init__.py delete mode 100644 keras/random/__init__.py delete mode 100644 keras/saving/__init__.py create mode 100644 keras/src/__init__.py rename keras/{ => src}/activations/__init__.py (68%) rename keras/{ => src}/activations/activations.py (99%) rename keras/{ => src}/activations/activations_test.py (99%) rename keras/{ => src}/api_export.py (100%) rename keras/{ => src}/applications/__init__.py (100%) rename keras/{ => src}/applications/applications_test.py (91%) rename keras/{ => src}/applications/convnext.py (98%) rename keras/{ => src}/applications/densenet.py (98%) rename keras/{ => src}/applications/efficientnet.py (98%) rename keras/{ => src}/applications/efficientnet_v2.py (99%) rename keras/{ => src}/applications/imagenet_utils.py (98%) rename keras/{ => src}/applications/imagenet_utils_test.py (98%) rename keras/{ => src}/applications/inception_resnet_v2.py (97%) rename keras/{ => src}/applications/inception_v3.py (98%) rename keras/{ => src}/applications/mobilenet.py (98%) rename keras/{ => src}/applications/mobilenet_v2.py (98%) rename keras/{ => src}/applications/mobilenet_v3.py (98%) rename keras/{ => src}/applications/nasnet.py (99%) rename keras/{ => src}/applications/resnet.py (98%) rename keras/{ => src}/applications/resnet_v2.py (97%) rename keras/{ => src}/applications/vgg16.py (96%) rename keras/{ => src}/applications/vgg19.py (97%) rename keras/{ => src}/applications/xception.py (97%) create mode 100644 keras/src/backend/__init__.py create mode 100644 keras/src/backend/common/__init__.py rename keras/{ => src}/backend/common/backend_utils.py (100%) rename keras/{ => src}/backend/common/backend_utils_test.py (96%) rename keras/{ => src}/backend/common/compute_output_spec_test.py (97%) rename keras/{ => src}/backend/common/dtypes.py (98%) rename keras/{ => src}/backend/common/dtypes_test.py (96%) rename keras/{ => src}/backend/common/global_state.py (95%) rename keras/{ => src}/backend/common/global_state_test.py (72%) rename keras/{ => src}/backend/common/keras_tensor.py (85%) rename keras/{ => src}/backend/common/keras_tensor_test.py (88%) rename keras/{ => src}/backend/common/name_scope.py (98%) rename keras/{ => src}/backend/common/name_scope_test.py (92%) rename keras/{ => src}/backend/common/stateless_scope.py (91%) rename keras/{ => src}/backend/common/stateless_scope_test.py (92%) rename keras/{ => src}/backend/common/variables.py (96%) rename keras/{ => src}/backend/common/variables_test.py (98%) rename keras/{ => src}/backend/config.py (99%) rename keras/{ => src}/backend/exports.py (86%) create mode 100644 keras/src/backend/jax/__init__.py rename keras/{ => src}/backend/jax/core.py (97%) rename keras/{ => src}/backend/jax/distribution_lib.py (99%) rename keras/{ => src}/backend/jax/distribution_lib_test.py (98%) rename keras/{ => src}/backend/jax/image.py (99%) rename keras/{ => src}/backend/jax/layer.py (100%) rename keras/{ => src}/backend/jax/linalg.py (86%) rename keras/{ => src}/backend/jax/math.py (96%) rename keras/{ => src}/backend/jax/nn.py (98%) rename keras/{ => src}/backend/jax/numpy.py (98%) rename keras/{ => src}/backend/jax/optimizer.py (98%) rename keras/{ => src}/backend/jax/random.py (93%) rename keras/{ => src}/backend/jax/rnn.py (98%) rename keras/{ => src}/backend/jax/sparse.py (99%) rename keras/{ => src}/backend/jax/trainer.py (98%) create mode 100644 keras/src/backend/numpy/__init__.py rename keras/{ => src}/backend/numpy/core.py (95%) rename keras/{ => src}/backend/numpy/image.py (98%) rename keras/{ => src}/backend/numpy/layer.py (100%) rename keras/{ => src}/backend/numpy/linalg.py (92%) rename keras/{ => src}/backend/numpy/math.py (97%) rename keras/{ => src}/backend/numpy/nn.py (97%) rename keras/{ => src}/backend/numpy/numpy.py (98%) rename keras/{ => src}/backend/numpy/random.py (93%) rename keras/{ => src}/backend/numpy/rnn.py (99%) rename keras/{ => src}/backend/numpy/trainer.py (95%) create mode 100644 keras/src/backend/tensorflow/__init__.py rename keras/{ => src}/backend/tensorflow/core.py (93%) rename keras/{ => src}/backend/tensorflow/distribute_test.py (95%) rename keras/{ => src}/backend/tensorflow/distribution_lib.py (100%) rename keras/{ => src}/backend/tensorflow/image.py (99%) rename keras/{ => src}/backend/tensorflow/layer.py (92%) rename keras/{ => src}/backend/tensorflow/linalg.py (96%) rename keras/{ => src}/backend/tensorflow/math.py (97%) rename keras/{ => src}/backend/tensorflow/name_scope_test.py (94%) rename keras/{ => src}/backend/tensorflow/nn.py (98%) rename keras/{ => src}/backend/tensorflow/numpy.py (98%) rename keras/{ => src}/backend/tensorflow/optimizer.py (97%) rename keras/{ => src}/backend/tensorflow/optimizer_distribute_test.py (98%) rename keras/{ => src}/backend/tensorflow/random.py (95%) rename keras/{ => src}/backend/tensorflow/rnn.py (99%) rename keras/{ => src}/backend/tensorflow/saved_model_test.py (98%) rename keras/{ => src}/backend/tensorflow/sparse.py (100%) rename keras/{ => src}/backend/tensorflow/tensorboard.py (100%) rename keras/{ => src}/backend/tensorflow/trackable.py (98%) rename keras/{ => src}/backend/tensorflow/trainer.py (98%) rename keras/{ => src}/backend/tests/compute_output_spec_test.py (97%) rename keras/{ => src}/backend/tests/device_scope_test.py (98%) create mode 100644 keras/src/backend/torch/__init__.py rename keras/{ => src}/backend/torch/core.py (97%) rename keras/{ => src}/backend/torch/image.py (99%) rename keras/{ => src}/backend/torch/layer.py (87%) rename keras/{ => src}/backend/torch/linalg.py (86%) rename keras/{ => src}/backend/torch/math.py (97%) rename keras/{ => src}/backend/torch/nn.py (97%) rename keras/{ => src}/backend/torch/numpy.py (98%) create mode 100644 keras/src/backend/torch/optimizers/__init__.py rename keras/{ => src}/backend/torch/optimizers/torch_adadelta.py (92%) rename keras/{ => src}/backend/torch/optimizers/torch_adagrad.py (87%) rename keras/{ => src}/backend/torch/optimizers/torch_adam.py (93%) rename keras/{ => src}/backend/torch/optimizers/torch_adamax.py (91%) create mode 100644 keras/src/backend/torch/optimizers/torch_adamw.py rename keras/{ => src}/backend/torch/optimizers/torch_lion.py (87%) rename keras/{ => src}/backend/torch/optimizers/torch_nadam.py (92%) rename keras/{ => src}/backend/torch/optimizers/torch_optimizer.py (58%) rename keras/{ => src}/backend/torch/optimizers/torch_parallel_optimizer.py (87%) rename keras/{ => src}/backend/torch/optimizers/torch_rmsprop.py (93%) rename keras/{ => src}/backend/torch/optimizers/torch_sgd.py (91%) rename keras/{ => src}/backend/torch/random.py (95%) rename keras/{ => src}/backend/torch/rnn.py (99%) rename keras/{ => src}/backend/torch/trainer.py (97%) create mode 100644 keras/src/callbacks/__init__.py rename keras/{ => src}/callbacks/backup_and_restore.py (98%) rename keras/{ => src}/callbacks/backup_and_restore_test.py (97%) rename keras/{ => src}/callbacks/callback.py (99%) rename keras/{ => src}/callbacks/callback_list.py (95%) rename keras/{ => src}/callbacks/callback_test.py (89%) rename keras/{ => src}/callbacks/csv_logger.py (95%) rename keras/{ => src}/callbacks/csv_logger_test.py (96%) rename keras/{ => src}/callbacks/early_stopping.py (97%) rename keras/{ => src}/callbacks/early_stopping_test.py (98%) rename keras/{ => src}/callbacks/history.py (92%) rename keras/{ => src}/callbacks/lambda_callback.py (97%) rename keras/{ => src}/callbacks/lambda_callback_test.py (96%) rename keras/{ => src}/callbacks/learning_rate_scheduler.py (94%) rename keras/{ => src}/callbacks/learning_rate_scheduler_test.py (92%) rename keras/{ => src}/callbacks/model_checkpoint.py (98%) rename keras/{ => src}/callbacks/model_checkpoint_test.py (98%) rename keras/{ => src}/callbacks/progbar_logger.py (94%) rename keras/{ => src}/callbacks/reduce_lr_on_plateau.py (96%) rename keras/{ => src}/callbacks/reduce_lr_on_plateau_test.py (93%) rename keras/{ => src}/callbacks/remote_monitor.py (96%) rename keras/{ => src}/callbacks/remote_monitor_test.py (94%) rename keras/{ => src}/callbacks/swap_ema_weights.py (97%) rename keras/{ => src}/callbacks/swap_ema_weights_test.py (94%) rename keras/{ => src}/callbacks/tensorboard.py (98%) rename keras/{ => src}/callbacks/tensorboard_test.py (98%) rename keras/{ => src}/callbacks/terminate_on_nan.py (80%) rename keras/{ => src}/callbacks/terminate_on_nan_test.py (87%) rename keras/{ => src}/constraints/__init__.py (75%) rename keras/{ => src}/constraints/constraints.py (98%) rename keras/{ => src}/constraints/constraints_test.py (97%) create mode 100644 keras/src/datasets/__init__.py rename keras/{ => src}/datasets/boston_housing.py (96%) rename keras/{ => src}/datasets/california_housing.py (97%) rename keras/{ => src}/datasets/cifar.py (100%) rename keras/{ => src}/datasets/cifar10.py (94%) rename keras/{ => src}/datasets/cifar100.py (94%) rename keras/{ => src}/datasets/fashion_mnist.py (96%) rename keras/{ => src}/datasets/imdb.py (97%) rename keras/{ => src}/datasets/mnist.py (96%) rename keras/{ => src}/datasets/reuters.py (97%) create mode 100644 keras/src/distribution/__init__.py rename keras/{ => src}/distribution/distribution_lib.py (98%) rename keras/{ => src}/distribution/distribution_lib_test.py (99%) rename keras/{ => src}/dtype_policies/__init__.py (64%) rename keras/{ => src}/dtype_policies/dtype_policy.py (98%) rename keras/{ => src}/dtype_policies/dtype_policy_test.py (96%) create mode 100644 keras/src/export/__init__.py rename keras/{ => src}/export/export_lib.py (98%) rename keras/{ => src}/export/export_lib_test.py (98%) rename keras/{ => src}/initializers/__init__.py (69%) rename keras/{ => src}/initializers/constant_initializers.py (95%) rename keras/{ => src}/initializers/constant_initializers_test.py (95%) rename keras/{ => src}/initializers/initializer.py (98%) rename keras/{ => src}/initializers/random_initializers.py (99%) rename keras/{ => src}/initializers/random_initializers_test.py (98%) create mode 100644 keras/src/layers/__init__.py create mode 100644 keras/src/layers/activations/__init__.py rename keras/{ => src}/layers/activations/activation.py (90%) rename keras/{ => src}/layers/activations/activation_test.py (91%) rename keras/{ => src}/layers/activations/elu.py (85%) rename keras/{ => src}/layers/activations/elu_test.py (91%) rename keras/{ => src}/layers/activations/leaky_relu.py (93%) rename keras/{ => src}/layers/activations/leaky_relu_test.py (93%) rename keras/{ => src}/layers/activations/prelu.py (92%) rename keras/{ => src}/layers/activations/prelu_test.py (93%) rename keras/{ => src}/layers/activations/relu.py (95%) rename keras/{ => src}/layers/activations/relu_test.py (97%) rename keras/{ => src}/layers/activations/softmax.py (93%) rename keras/{ => src}/layers/activations/softmax_test.py (95%) rename keras/{ => src}/layers/attention/__init__.py (100%) rename keras/{ => src}/layers/attention/additive_attention.py (96%) rename keras/{ => src}/layers/attention/additive_attention_test.py (98%) rename keras/{ => src}/layers/attention/attention.py (98%) rename keras/{ => src}/layers/attention/attention_test.py (99%) rename keras/{ => src}/layers/attention/grouped_query_attention.py (97%) rename keras/{ => src}/layers/attention/grouped_query_attention_test.py (98%) rename keras/{ => src}/layers/attention/multi_head_attention.py (98%) rename keras/{ => src}/layers/attention/multi_head_attention_test.py (98%) rename keras/{ => src}/layers/convolutional/__init__.py (100%) rename keras/{ => src}/layers/convolutional/base_conv.py (97%) rename keras/{ => src}/layers/convolutional/base_conv_transpose.py (95%) rename keras/{ => src}/layers/convolutional/base_depthwise_conv.py (95%) rename keras/{ => src}/layers/convolutional/base_separable_conv.py (95%) rename keras/{ => src}/layers/convolutional/conv1d.py (98%) rename keras/{ => src}/layers/convolutional/conv1d_transpose.py (97%) rename keras/{ => src}/layers/convolutional/conv2d.py (98%) rename keras/{ => src}/layers/convolutional/conv2d_transpose.py (97%) rename keras/{ => src}/layers/convolutional/conv3d.py (98%) rename keras/{ => src}/layers/convolutional/conv3d_transpose.py (97%) rename keras/{ => src}/layers/convolutional/conv_test.py (99%) rename keras/{ => src}/layers/convolutional/conv_transpose_test.py (99%) rename keras/{ => src}/layers/convolutional/depthwise_conv1d.py (97%) rename keras/{ => src}/layers/convolutional/depthwise_conv2d.py (97%) rename keras/{ => src}/layers/convolutional/depthwise_conv_test.py (99%) rename keras/{ => src}/layers/convolutional/separable_conv1d.py (98%) rename keras/{ => src}/layers/convolutional/separable_conv2d.py (98%) rename keras/{ => src}/layers/convolutional/separable_conv_test.py (96%) rename keras/{ => src}/layers/core/__init__.py (100%) rename keras/{ => src}/layers/core/dense.py (98%) rename keras/{ => src}/layers/core/dense_test.py (98%) rename keras/{ => src}/layers/core/einsum_dense.py (99%) rename keras/{ => src}/layers/core/einsum_dense_test.py (98%) rename keras/{ => src}/layers/core/embedding.py (98%) rename keras/{ => src}/layers/core/embedding_test.py (98%) rename keras/{ => src}/layers/core/identity.py (80%) rename keras/{ => src}/layers/core/identity_test.py (91%) rename keras/{ => src}/layers/core/input_layer.py (97%) rename keras/{ => src}/layers/core/input_layer_test.py (96%) rename keras/{ => src}/layers/core/lambda_layer.py (97%) rename keras/{ => src}/layers/core/lambda_layer_test.py (97%) rename keras/{ => src}/layers/core/masking.py (94%) rename keras/{ => src}/layers/core/masking_test.py (94%) rename keras/{ => src}/layers/core/wrapper.py (91%) rename keras/{ => src}/layers/core/wrapper_test.py (96%) rename keras/{ => src}/layers/input_spec.py (98%) rename keras/{ => src}/layers/layer.py (98%) rename keras/{ => src}/layers/layer_test.py (99%) rename keras/{ => src}/layers/merging/__init__.py (100%) rename keras/{ => src}/layers/merging/add.py (94%) rename keras/{ => src}/layers/merging/average.py (94%) rename keras/{ => src}/layers/merging/base_merge.py (98%) rename keras/{ => src}/layers/merging/concatenate.py (98%) rename keras/{ => src}/layers/merging/dot.py (98%) rename keras/{ => src}/layers/merging/maximum.py (94%) rename keras/{ => src}/layers/merging/merging_test.py (98%) rename keras/{ => src}/layers/merging/minimum.py (94%) rename keras/{ => src}/layers/merging/multiply.py (94%) rename keras/{ => src}/layers/merging/subtract.py (95%) rename keras/{ => src}/layers/normalization/__init__.py (100%) rename keras/{ => src}/layers/normalization/batch_normalization.py (97%) rename keras/{ => src}/layers/normalization/batch_normalization_test.py (97%) rename keras/{ => src}/layers/normalization/group_normalization.py (96%) rename keras/{ => src}/layers/normalization/group_normalization_test.py (97%) rename keras/{ => src}/layers/normalization/layer_normalization.py (97%) rename keras/{ => src}/layers/normalization/layer_normalization_test.py (96%) rename keras/{ => src}/layers/normalization/spectral_normalization.py (94%) rename keras/{ => src}/layers/normalization/spectral_normalization_test.py (94%) rename keras/{ => src}/layers/normalization/unit_normalization.py (93%) rename keras/{ => src}/layers/normalization/unit_normalization_test.py (95%) rename keras/{ => src}/layers/pooling/__init__.py (100%) rename keras/{ => src}/layers/pooling/average_pooling1d.py (96%) rename keras/{ => src}/layers/pooling/average_pooling2d.py (97%) rename keras/{ => src}/layers/pooling/average_pooling3d.py (96%) rename keras/{ => src}/layers/pooling/average_pooling_test.py (99%) rename keras/{ => src}/layers/pooling/base_global_pooling.py (91%) rename keras/{ => src}/layers/pooling/base_pooling.py (89%) rename keras/{ => src}/layers/pooling/global_average_pooling1d.py (94%) rename keras/{ => src}/layers/pooling/global_average_pooling2d.py (94%) rename keras/{ => src}/layers/pooling/global_average_pooling3d.py (94%) rename keras/{ => src}/layers/pooling/global_average_pooling_test.py (99%) rename keras/{ => src}/layers/pooling/global_max_pooling1d.py (93%) rename keras/{ => src}/layers/pooling/global_max_pooling2d.py (94%) rename keras/{ => src}/layers/pooling/global_max_pooling3d.py (94%) rename keras/{ => src}/layers/pooling/global_max_pooling_test.py (98%) rename keras/{ => src}/layers/pooling/max_pooling1d.py (96%) rename keras/{ => src}/layers/pooling/max_pooling2d.py (97%) rename keras/{ => src}/layers/pooling/max_pooling3d.py (96%) rename keras/{ => src}/layers/pooling/max_pooling_test.py (99%) rename keras/{ => src}/layers/preprocessing/__init__.py (100%) rename keras/{ => src}/layers/preprocessing/audio_preprocessing.py (99%) rename keras/{ => src}/layers/preprocessing/audio_preprocessing_test.py (98%) rename keras/{ => src}/layers/preprocessing/category_encoding.py (97%) rename keras/{ => src}/layers/preprocessing/category_encoding_test.py (99%) rename keras/{ => src}/layers/preprocessing/center_crop.py (96%) rename keras/{ => src}/layers/preprocessing/center_crop_test.py (98%) rename keras/{ => src}/layers/preprocessing/discretization.py (97%) rename keras/{ => src}/layers/preprocessing/discretization_test.py (96%) rename keras/{ => src}/layers/preprocessing/feature_space.py (98%) rename keras/{ => src}/layers/preprocessing/feature_space_test.py (98%) rename keras/{ => src}/layers/preprocessing/hashed_crossing.py (96%) rename keras/{ => src}/layers/preprocessing/hashed_crossing_test.py (98%) rename keras/{ => src}/layers/preprocessing/hashing.py (97%) rename keras/{ => src}/layers/preprocessing/hashing_test.py (99%) rename keras/{ => src}/layers/preprocessing/index_lookup.py (99%) rename keras/{ => src}/layers/preprocessing/index_lookup_test.py (99%) rename keras/{ => src}/layers/preprocessing/integer_lookup.py (98%) rename keras/{ => src}/layers/preprocessing/integer_lookup_test.py (97%) rename keras/{ => src}/layers/preprocessing/normalization.py (98%) rename keras/{ => src}/layers/preprocessing/normalization_test.py (97%) rename keras/{ => src}/layers/preprocessing/random_brightness.py (97%) rename keras/{ => src}/layers/preprocessing/random_brightness_test.py (97%) rename keras/{ => src}/layers/preprocessing/random_contrast.py (95%) rename keras/{ => src}/layers/preprocessing/random_contrast_test.py (94%) rename keras/{ => src}/layers/preprocessing/random_crop.py (96%) rename keras/{ => src}/layers/preprocessing/random_crop_test.py (97%) rename keras/{ => src}/layers/preprocessing/random_flip.py (95%) rename keras/{ => src}/layers/preprocessing/random_flip_test.py (97%) rename keras/{ => src}/layers/preprocessing/random_rotation.py (97%) rename keras/{ => src}/layers/preprocessing/random_rotation_test.py (96%) rename keras/{ => src}/layers/preprocessing/random_translation.py (98%) rename keras/{ => src}/layers/preprocessing/random_translation_test.py (99%) rename keras/{ => src}/layers/preprocessing/random_zoom.py (98%) rename keras/{ => src}/layers/preprocessing/random_zoom_test.py (97%) rename keras/{ => src}/layers/preprocessing/rescaling.py (93%) rename keras/{ => src}/layers/preprocessing/rescaling_test.py (97%) rename keras/{ => src}/layers/preprocessing/resizing.py (96%) rename keras/{ => src}/layers/preprocessing/resizing_test.py (98%) rename keras/{ => src}/layers/preprocessing/string_lookup.py (98%) rename keras/{ => src}/layers/preprocessing/string_lookup_test.py (95%) rename keras/{ => src}/layers/preprocessing/text_vectorization.py (98%) rename keras/{ => src}/layers/preprocessing/text_vectorization_test.py (96%) rename keras/{ => src}/layers/preprocessing/tf_data_layer.py (89%) rename keras/{ => src}/layers/regularization/__init__.py (100%) rename keras/{ => src}/layers/regularization/activity_regularization.py (89%) rename keras/{ => src}/layers/regularization/activity_regularization_test.py (92%) rename keras/{ => src}/layers/regularization/alpha_dropout.py (96%) rename keras/{ => src}/layers/regularization/alpha_dropout_test.py (95%) rename keras/{ => src}/layers/regularization/dropout.py (96%) rename keras/{ => src}/layers/regularization/dropout_test.py (95%) rename keras/{ => src}/layers/regularization/gaussian_dropout.py (93%) rename keras/{ => src}/layers/regularization/gaussian_dropout_test.py (91%) rename keras/{ => src}/layers/regularization/gaussian_noise.py (93%) rename keras/{ => src}/layers/regularization/gaussian_noise_test.py (91%) rename keras/{ => src}/layers/regularization/spatial_dropout.py (97%) rename keras/{ => src}/layers/regularization/spatial_dropout_test.py (97%) rename keras/{ => src}/layers/reshaping/__init__.py (100%) rename keras/{ => src}/layers/reshaping/cropping1d.py (93%) rename keras/{ => src}/layers/reshaping/cropping1d_test.py (97%) rename keras/{ => src}/layers/reshaping/cropping2d.py (97%) rename keras/{ => src}/layers/reshaping/cropping2d_test.py (97%) rename keras/{ => src}/layers/reshaping/cropping3d.py (98%) rename keras/{ => src}/layers/reshaping/cropping3d_test.py (98%) rename keras/{ => src}/layers/reshaping/flatten.py (91%) rename keras/{ => src}/layers/reshaping/flatten_test.py (97%) rename keras/{ => src}/layers/reshaping/permute.py (89%) rename keras/{ => src}/layers/reshaping/permute_test.py (95%) rename keras/{ => src}/layers/reshaping/repeat_vector.py (87%) rename keras/{ => src}/layers/reshaping/repeat_vector_test.py (94%) rename keras/{ => src}/layers/reshaping/reshape.py (90%) rename keras/{ => src}/layers/reshaping/reshape_test.py (96%) rename keras/{ => src}/layers/reshaping/up_sampling1d.py (89%) rename keras/{ => src}/layers/reshaping/up_sampling1d_test.py (94%) rename keras/{ => src}/layers/reshaping/up_sampling2d.py (95%) rename keras/{ => src}/layers/reshaping/up_sampling2d_test.py (98%) rename keras/{ => src}/layers/reshaping/up_sampling3d.py (95%) rename keras/{ => src}/layers/reshaping/up_sampling3d_test.py (98%) rename keras/{ => src}/layers/reshaping/zero_padding1d.py (89%) rename keras/{ => src}/layers/reshaping/zero_padding1d_test.py (96%) rename keras/{ => src}/layers/reshaping/zero_padding2d.py (94%) rename keras/{ => src}/layers/reshaping/zero_padding2d_test.py (97%) rename keras/{ => src}/layers/reshaping/zero_padding3d.py (95%) rename keras/{ => src}/layers/reshaping/zero_padding3d_test.py (97%) rename keras/{ => src}/layers/rnn/__init__.py (100%) rename keras/{ => src}/layers/rnn/bidirectional.py (98%) rename keras/{ => src}/layers/rnn/bidirectional_test.py (98%) rename keras/{ => src}/layers/rnn/conv_lstm.py (98%) rename keras/{ => src}/layers/rnn/conv_lstm1d.py (98%) rename keras/{ => src}/layers/rnn/conv_lstm1d_test.py (95%) rename keras/{ => src}/layers/rnn/conv_lstm2d.py (98%) rename keras/{ => src}/layers/rnn/conv_lstm2d_test.py (96%) rename keras/{ => src}/layers/rnn/conv_lstm3d.py (98%) rename keras/{ => src}/layers/rnn/conv_lstm3d_test.py (96%) rename keras/{ => src}/layers/rnn/conv_lstm_test.py (91%) rename keras/{ => src}/layers/rnn/dropout_rnn_cell.py (97%) rename keras/{ => src}/layers/rnn/dropout_rnn_cell_test.py (94%) rename keras/{ => src}/layers/rnn/gru.py (98%) rename keras/{ => src}/layers/rnn/gru_test.py (99%) rename keras/{ => src}/layers/rnn/lstm.py (98%) rename keras/{ => src}/layers/rnn/lstm_test.py (99%) rename keras/{ => src}/layers/rnn/rnn.py (97%) rename keras/{ => src}/layers/rnn/rnn_test.py (99%) rename keras/{ => src}/layers/rnn/simple_rnn.py (97%) rename keras/{ => src}/layers/rnn/simple_rnn_test.py (98%) rename keras/{ => src}/layers/rnn/stacked_rnn_cells.py (96%) rename keras/{ => src}/layers/rnn/stacked_rnn_cells_test.py (98%) rename keras/{ => src}/layers/rnn/time_distributed.py (95%) rename keras/{ => src}/layers/rnn/time_distributed_test.py (94%) rename keras/{ => src}/legacy/__init__.py (100%) rename keras/{ => src}/legacy/backend.py (99%) rename keras/{ => src}/legacy/layers.py (97%) rename keras/{ => src}/legacy/losses.py (91%) rename keras/{ => src}/legacy/preprocessing/__init__.py (100%) rename keras/{ => src}/legacy/preprocessing/image.py (99%) rename keras/{ => src}/legacy/preprocessing/sequence.py (98%) rename keras/{ => src}/legacy/preprocessing/text.py (99%) rename keras/{ => src}/legacy/saving/__init__.py (100%) rename keras/{ => src}/legacy/saving/json_utils.py (97%) rename keras/{ => src}/legacy/saving/json_utils_test.py (96%) rename keras/{ => src}/legacy/saving/legacy_h5_format.py (98%) rename keras/{ => src}/legacy/saving/legacy_h5_format_test.py (98%) rename keras/{ => src}/legacy/saving/saving_options.py (89%) rename keras/{ => src}/legacy/saving/saving_utils.py (95%) rename keras/{ => src}/legacy/saving/serialization.py (99%) rename keras/{ => src}/losses/__init__.py (64%) rename keras/{ => src}/losses/loss.py (97%) rename keras/{ => src}/losses/loss_test.py (97%) rename keras/{ => src}/losses/losses.py (99%) rename keras/{ => src}/losses/losses_test.py (99%) rename keras/{ => src}/metrics/__init__.py (59%) rename keras/{ => src}/metrics/accuracy_metrics.py (98%) rename keras/{ => src}/metrics/accuracy_metrics_test.py (99%) rename keras/{ => src}/metrics/confusion_metrics.py (99%) rename keras/{ => src}/metrics/confusion_metrics_test.py (99%) rename keras/{ => src}/metrics/f_score_metrics.py (98%) rename keras/{ => src}/metrics/f_score_metrics_test.py (99%) rename keras/{ => src}/metrics/hinge_metrics.py (92%) rename keras/{ => src}/metrics/hinge_metrics_test.py (98%) rename keras/{ => src}/metrics/iou_metrics.py (99%) rename keras/{ => src}/metrics/iou_metrics_test.py (99%) rename keras/{ => src}/metrics/metric.py (97%) rename keras/{ => src}/metrics/metric_test.py (96%) rename keras/{ => src}/metrics/metrics_utils.py (99%) rename keras/{ => src}/metrics/probabilistic_metrics.py (96%) rename keras/{ => src}/metrics/probabilistic_metrics_test.py (99%) rename keras/{ => src}/metrics/reduction_metrics.py (96%) rename keras/{ => src}/metrics/reduction_metrics_test.py (97%) rename keras/{ => src}/metrics/regression_metrics.py (97%) rename keras/{ => src}/metrics/regression_metrics_test.py (99%) create mode 100644 keras/src/models/__init__.py rename keras/{ => src}/models/cloning.py (96%) rename keras/{ => src}/models/cloning_test.py (96%) rename keras/{ => src}/models/functional.py (97%) rename keras/{ => src}/models/functional_test.py (98%) rename keras/{ => src}/models/model.py (94%) rename keras/{ => src}/models/model_test.py (98%) rename keras/{ => src}/models/sequential.py (96%) rename keras/{ => src}/models/sequential_test.py (97%) rename keras/{ => src}/models/variable_mapping.py (91%) rename keras/{ => src}/models/variable_mapping_test.py (94%) create mode 100644 keras/src/ops/__init__.py rename keras/{ => src}/ops/core.py (98%) rename keras/{ => src}/ops/core_test.py (98%) rename keras/{ => src}/ops/function.py (98%) rename keras/{ => src}/ops/function_test.py (94%) rename keras/{ => src}/ops/image.py (99%) rename keras/{ => src}/ops/image_test.py (99%) rename keras/{ => src}/ops/linalg.py (98%) rename keras/{ => src}/ops/linalg_test.py (98%) rename keras/{ => src}/ops/math.py (99%) rename keras/{ => src}/ops/math_test.py (99%) rename keras/{ => src}/ops/nn.py (99%) rename keras/{ => src}/ops/nn_test.py (98%) rename keras/{ => src}/ops/node.py (97%) rename keras/{ => src}/ops/node_test.py (93%) rename keras/{ => src}/ops/numpy.py (99%) rename keras/{ => src}/ops/numpy_test.py (99%) rename keras/{ => src}/ops/operation.py (95%) rename keras/{ => src}/ops/operation_test.py (96%) rename keras/{ => src}/ops/operation_utils.py (98%) rename keras/{ => src}/ops/operation_utils_test.py (97%) rename keras/{ => src}/ops/symbolic_arguments.py (95%) rename keras/{ => src}/ops/symbolic_arguments_test.py (95%) rename keras/{ => src}/optimizers/__init__.py (81%) rename keras/{ => src}/optimizers/adadelta.py (97%) rename keras/{ => src}/optimizers/adadelta_test.py (95%) rename keras/{ => src}/optimizers/adafactor.py (98%) rename keras/{ => src}/optimizers/adafactor_test.py (97%) rename keras/{ => src}/optimizers/adagrad.py (95%) rename keras/{ => src}/optimizers/adagrad_test.py (95%) rename keras/{ => src}/optimizers/adam.py (97%) rename keras/{ => src}/optimizers/adam_test.py (96%) rename keras/{ => src}/optimizers/adamax.py (97%) rename keras/{ => src}/optimizers/adamax_test.py (95%) rename keras/{ => src}/optimizers/adamw.py (96%) rename keras/{ => src}/optimizers/adamw_test.py (95%) rename keras/{ => src}/optimizers/base_optimizer.py (99%) rename keras/{ => src}/optimizers/ftrl.py (98%) rename keras/{ => src}/optimizers/ftrl_test.py (96%) rename keras/{ => src}/optimizers/lion.py (97%) rename keras/{ => src}/optimizers/lion_test.py (95%) rename keras/{ => src}/optimizers/loss_scale_optimizer.py (97%) rename keras/{ => src}/optimizers/loss_scale_optimizer_test.py (95%) rename keras/{ => src}/optimizers/nadam.py (97%) rename keras/{ => src}/optimizers/nadam_test.py (96%) rename keras/{ => src}/optimizers/optimizer.py (61%) rename keras/{ => src}/optimizers/optimizer_sparse_test.py (98%) rename keras/{ => src}/optimizers/optimizer_test.py (98%) rename keras/{ => src}/optimizers/rmsprop.py (98%) rename keras/{ => src}/optimizers/rmsprop_test.py (95%) create mode 100644 keras/src/optimizers/schedules/__init__.py rename keras/{ => src}/optimizers/schedules/learning_rate_schedule.py (99%) rename keras/{ => src}/optimizers/schedules/learning_rate_schedule_test.py (98%) rename keras/{ => src}/optimizers/sgd.py (97%) rename keras/{ => src}/optimizers/sgd_test.py (96%) rename keras/{ => src}/quantizers/__init__.py (70%) rename keras/{ => src}/quantizers/quantizers.py (97%) rename keras/{ => src}/quantizers/quantizers_test.py (96%) create mode 100644 keras/src/random/__init__.py rename keras/{ => src}/random/random.py (99%) rename keras/{ => src}/random/random_test.py (97%) rename keras/{ => src}/random/seed_generator.py (94%) rename keras/{ => src}/random/seed_generator_test.py (95%) rename keras/{ => src}/regularizers/__init__.py (75%) rename keras/{ => src}/regularizers/regularizers.py (98%) rename keras/{ => src}/regularizers/regularizers_test.py (97%) create mode 100644 keras/src/saving/__init__.py rename keras/{ => src}/saving/object_registration.py (98%) rename keras/{ => src}/saving/object_registration_test.py (97%) rename keras/{ => src}/saving/saving_api.py (97%) rename keras/{ => src}/saving/saving_api_test.py (97%) rename keras/{ => src}/saving/saving_lib.py (97%) rename keras/{ => src}/saving/saving_lib_test.py (99%) rename keras/{ => src}/saving/serialization_lib.py (98%) rename keras/{ => src}/saving/serialization_lib_test.py (99%) create mode 100644 keras/src/testing/__init__.py rename keras/{ => src}/testing/test_case.py (97%) rename keras/{ => src}/testing/test_utils.py (100%) rename keras/{ => src}/testing/test_utils_test.py (99%) rename keras/{ => src}/trainers/__init__.py (100%) rename keras/{ => src}/trainers/compile_utils.py (99%) rename keras/{ => src}/trainers/compile_utils_test.py (97%) rename keras/{ => src}/trainers/data_adapters/__init__.py (89%) rename keras/{ => src}/trainers/data_adapters/array_data_adapter.py (97%) rename keras/{ => src}/trainers/data_adapters/array_data_adapter_test.py (98%) rename keras/{ => src}/trainers/data_adapters/array_slicing.py (95%) rename keras/{ => src}/trainers/data_adapters/data_adapter.py (100%) rename keras/{ => src}/trainers/data_adapters/data_adapter_utils.py (95%) rename keras/{ => src}/trainers/data_adapters/generator_data_adapter.py (89%) rename keras/{ => src}/trainers/data_adapters/generator_data_adapter_test.py (97%) rename keras/{ => src}/trainers/data_adapters/py_dataset_adapter.py (98%) rename keras/{ => src}/trainers/data_adapters/py_dataset_adapter_test.py (97%) rename keras/{ => src}/trainers/data_adapters/tf_dataset_adapter.py (89%) rename keras/{ => src}/trainers/data_adapters/tf_dataset_adapter_test.py (98%) rename keras/{ => src}/trainers/data_adapters/torch_data_loader_adapter.py (91%) rename keras/{ => src}/trainers/data_adapters/torch_data_loader_adapter_test.py (97%) rename keras/{ => src}/trainers/epoch_iterator.py (98%) rename keras/{ => src}/trainers/epoch_iterator_test.py (97%) rename keras/{ => src}/trainers/trainer.py (98%) rename keras/{ => src}/trainers/trainer_test.py (98%) create mode 100644 keras/src/tree/__init__.py rename keras/{ => src}/tree/dmtree_impl.py (99%) rename keras/{ => src}/tree/optree_impl.py (99%) rename keras/{ => src}/tree/tree_api.py (97%) rename keras/{ => src}/tree/tree_test.py (99%) create mode 100644 keras/src/utils/__init__.py rename keras/{ => src}/utils/argument_validation.py (100%) rename keras/{ => src}/utils/audio_dataset_utils.py (98%) rename keras/{ => src}/utils/audio_dataset_utils_test.py (99%) rename keras/{ => src}/utils/backend_utils.py (86%) rename keras/{ => src}/utils/code_stats.py (100%) rename keras/{ => src}/utils/code_stats_test.py (98%) rename keras/{ => src}/utils/dataset_utils.py (99%) rename keras/{ => src}/utils/dataset_utils_test.py (98%) rename keras/{ => src}/utils/dtype_utils.py (96%) rename keras/{ => src}/utils/dtype_utils_test.py (97%) rename keras/{ => src}/utils/file_utils.py (98%) rename keras/{ => src}/utils/file_utils_test.py (99%) rename keras/{ => src}/utils/image_dataset_utils.py (98%) rename keras/{ => src}/utils/image_dataset_utils_test.py (98%) rename keras/{ => src}/utils/image_utils.py (99%) rename keras/{ => src}/utils/io_utils.py (97%) rename keras/{ => src}/utils/io_utils_test.py (96%) rename keras/{ => src}/utils/jax_layer.py (98%) rename keras/{ => src}/utils/jax_layer_test.py (98%) rename keras/{ => src}/utils/jax_utils.py (88%) rename keras/{ => src}/utils/model_visualization.py (98%) rename keras/{ => src}/utils/module_utils.py (100%) rename keras/{ => src}/utils/naming.py (94%) rename keras/{ => src}/utils/naming_test.py (98%) rename keras/{ => src}/utils/numerical_utils.py (97%) rename keras/{ => src}/utils/numerical_utils_test.py (96%) rename keras/{ => src}/utils/progbar.py (98%) rename keras/{ => src}/utils/python_utils.py (100%) rename keras/{ => src}/utils/python_utils_test.py (97%) rename keras/{ => src}/utils/rng_utils.py (92%) rename keras/{ => src}/utils/rng_utils_test.py (89%) rename keras/{ => src}/utils/sequence_utils.py (99%) rename keras/{ => src}/utils/sequence_utils_test.py (98%) rename keras/{ => src}/utils/summary_utils.py (98%) rename keras/{ => src}/utils/summary_utils_test.py (92%) rename keras/{ => src}/utils/text_dataset_utils.py (98%) rename keras/{ => src}/utils/text_dataset_utils_test.py (99%) rename keras/{ => src}/utils/tf_utils.py (98%) rename keras/{ => src}/utils/timeseries_dataset_utils.py (98%) rename keras/{ => src}/utils/timeseries_dataset_utils_test.py (98%) rename keras/{ => src}/utils/torch_utils.py (93%) rename keras/{ => src}/utils/torch_utils_test.py (97%) rename keras/{ => src}/utils/traceback_utils.py (98%) rename keras/{ => src}/utils/tracking.py (96%) rename keras/{ => src}/utils/tracking_test.py (96%) rename keras/{ => src}/version.py (75%) delete mode 100644 keras/testing/__init__.py delete mode 100644 keras/tree/__init__.py delete mode 100644 keras/utils/__init__.py create mode 100755 shell/api_gen.sh diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index d4d6a1710dc8..2d2e17e1e025 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -24,13 +24,13 @@ jobs: KERAS_HOME: .github/workflows/config/${{ matrix.backend }} steps: - uses: actions/checkout@v4 - - name: Check for changes in keras/applications + - name: Check for changes in keras/src/applications uses: dorny/paths-filter@v3 id: filter with: filters: | applications: - - 'keras/applications/**' + - 'keras/src/applications/**' - name: Set up Python uses: actions/setup-python@v5 with: @@ -49,13 +49,13 @@ jobs: run: | pip install -r requirements.txt --progress-bar off --upgrade pip uninstall -y keras keras-nightly - pip install tf_keras==2.16.0rc0 --progress-bar off --upgrade + pip install tf_keras==2.16.0 --progress-bar off --upgrade pip install -e "." --progress-bar off --upgrade - name: Test applications with pytest if: ${{ steps.filter.outputs.applications == 'true' }} run: | - pytest keras/applications --cov=keras/applications - coverage xml --include='keras/applications/*' -o apps-coverage.xml + pytest keras/src/applications --cov=keras/src/applications + coverage xml --include='keras/src/applications/*' -o apps-coverage.xml - name: Codecov keras.applications if: ${{ steps.filter.outputs.applications == 'true' }} uses: codecov/codecov-action@v4 @@ -80,8 +80,8 @@ jobs: pytest integration_tests/torch_workflow_test.py - name: Test with pytest run: | - pytest keras --ignore keras/applications --cov=keras - coverage xml --omit='keras/applications/*' -o core-coverage.xml + pytest keras --ignore keras/src/applications --cov=keras + coverage xml --omit='keras/src/applications/*,keras/api' -o core-coverage.xml - name: Codecov keras uses: codecov/codecov-action@v4 with: @@ -115,5 +115,14 @@ jobs: pip install -r requirements.txt --progress-bar off --upgrade pip uninstall -y keras keras-nightly pip install -e "." --progress-bar off --upgrade + - name: Check for API changes + run: | + bash shell/api_gen.sh + git status + clean=$(git status | grep "nothing to commit") + if [ -z "$clean" ]; then + echo "Please run shell/api_gen.sh to generate API." + exit 1 + fi - name: Lint run: bash shell/lint.sh diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 4d34c8bfab82..5edfd2c988b1 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -55,7 +55,7 @@ jobs: pytest integration_tests/torch_workflow_test.py - name: Test with pytest run: | - pytest keras --ignore keras/applications --cov=keras + pytest keras --ignore keras/src/applications --cov=keras format: name: Check the code format @@ -81,6 +81,15 @@ jobs: pip install -r requirements.txt --progress-bar off --upgrade pip uninstall -y keras keras-nightly pip install -e "." --progress-bar off --upgrade + - name: Check for API changes + run: | + bash shell/api_gen.sh + git status + clean=$(git status | grep "nothing to commit") + if [ -z "$clean" ]; then + echo "Please run shell/api_gen.sh to generate API." + exit 1 + fi - name: Lint run: bash shell/lint.sh @@ -108,4 +117,4 @@ jobs: with: password: ${{ secrets.PYPI_NIGHTLY_API_TOKEN }} packages-dir: dist/ - verbose: true \ No newline at end of file + verbose: true diff --git a/api_gen.py b/api_gen.py new file mode 100644 index 000000000000..51a0128861ac --- /dev/null +++ b/api_gen.py @@ -0,0 +1,175 @@ +"""Script to generate keras public API in `keras/api` directory. + +Usage: + +Run via `./shell/api_gen.sh`. +It generates API and formats user and generated APIs. +""" + +import os +import shutil + +import namex + +package = "keras" + + +def ignore_files(_, filenames): + return [f for f in filenames if f.endswith("_test.py")] + + +def create_legacy_directory(): + API_DIR = os.path.join(package, "api") + # Make keras/_tf_keras/ by copying keras/ + tf_keras_dirpath_parent = os.path.join(API_DIR, "_tf_keras") + tf_keras_dirpath = os.path.join(tf_keras_dirpath_parent, "keras") + os.makedirs(tf_keras_dirpath, exist_ok=True) + with open(os.path.join(tf_keras_dirpath_parent, "__init__.py"), "w") as f: + f.write("from keras.api._tf_keras import keras\n") + with open(os.path.join(API_DIR, "__init__.py")) as f: + init_file = f.read() + init_file = init_file.replace( + "from keras.api import _legacy", + "from keras.api import _tf_keras", + ) + with open(os.path.join(API_DIR, "__init__.py"), "w") as f: + f.write(init_file) + # Remove the import of `_tf_keras` in `keras/_tf_keras/keras/__init__.py` + init_file = init_file.replace("from keras.api import _tf_keras\n", "\n") + with open(os.path.join(tf_keras_dirpath, "__init__.py"), "w") as f: + f.write(init_file) + for dirname in os.listdir(API_DIR): + dirpath = os.path.join(API_DIR, dirname) + if os.path.isdir(dirpath) and dirname not in ( + "_legacy", + "_tf_keras", + "src", + ): + destpath = os.path.join(tf_keras_dirpath, dirname) + if os.path.exists(destpath): + shutil.rmtree(destpath) + shutil.copytree( + dirpath, + destpath, + ignore=ignore_files, + ) + + # Copy keras/_legacy/ file contents to keras/_tf_keras/keras + legacy_submodules = [ + path[:-3] + for path in os.listdir(os.path.join(package, "src", "legacy")) + if path.endswith(".py") + ] + legacy_submodules += [ + path + for path in os.listdir(os.path.join(package, "src", "legacy")) + if os.path.isdir(os.path.join(package, "src", "legacy", path)) + ] + + for root, _, fnames in os.walk(os.path.join(package, "_legacy")): + for fname in fnames: + if fname.endswith(".py"): + legacy_fpath = os.path.join(root, fname) + tf_keras_root = root.replace("/_legacy", "/_tf_keras/keras") + core_api_fpath = os.path.join( + root.replace("/_legacy", ""), fname + ) + if not os.path.exists(tf_keras_root): + os.makedirs(tf_keras_root) + tf_keras_fpath = os.path.join(tf_keras_root, fname) + with open(legacy_fpath) as f: + legacy_contents = f.read() + legacy_contents = legacy_contents.replace( + "keras.api._legacy", "keras.api._tf_keras.keras" + ) + if os.path.exists(core_api_fpath): + with open(core_api_fpath) as f: + core_api_contents = f.read() + core_api_contents = core_api_contents.replace( + "from keras.api import _tf_keras\n", "" + ) + for legacy_submodule in legacy_submodules: + core_api_contents = core_api_contents.replace( + f"from keras.api import {legacy_submodule}\n", + "", + ) + core_api_contents = core_api_contents.replace( + f"keras.api.{legacy_submodule}", + f"keras.api._tf_keras.keras.{legacy_submodule}", + ) + legacy_contents = core_api_contents + "\n" + legacy_contents + with open(tf_keras_fpath, "w") as f: + f.write(legacy_contents) + + # Delete keras/api/_legacy/ + shutil.rmtree(os.path.join(API_DIR, "_legacy")) + + +def export_version_string(): + API_INIT = os.path.join(package, "api", "__init__.py") + with open(API_INIT) as f: + contents = f.read() + with open(API_INIT, "w") as f: + contents += "from keras.src.version import __version__\n" + f.write(contents) + + +def update_package_init(): + contents = """ +# Import everything from /api/ into keras. +from keras.api import * # noqa: F403 +from keras.api import __version__ # Import * ignores names start with "_". + +import os + +# Add everything in /api/ to the module search path. +__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405 + +# Don't pollute namespace. +del os + +# Never autocomplete `.src` or `.api` on an imported keras object. +def __dir__(): + keys = dict.fromkeys((globals().keys())) + keys.pop("src") + keys.pop("api") + return list(keys) + + +# Don't import `.src` or `.api` during `from keras import *`. +__all__ = [ + name + for name in globals().keys() + if not (name.startswith("_") or name in ("src", "api")) +]""" + with open(os.path.join(package, "__init__.py")) as f: + init_contents = f.read() + with open(os.path.join(package, "__init__.py"), "w") as f: + f.write(init_contents.replace("\nfrom keras import api", contents)) + + +if __name__ == "__main__": + # Backup the `keras/__init__.py` and restore it on error in api gen. + os.makedirs(os.path.join(package, "api"), exist_ok=True) + init_fname = os.path.join(package, "__init__.py") + backup_init_fname = os.path.join(package, "__init__.py.bak") + try: + if os.path.exists(init_fname): + shutil.move(init_fname, backup_init_fname) + # Generates `keras/api` directory. + namex.generate_api_files( + "keras", code_directory="src", target_directory="api" + ) + # Creates `keras/__init__.py` importing from `keras/api` + update_package_init() + except Exception as e: + if os.path.exists(backup_init_fname): + shutil.move(backup_init_fname, init_fname) + raise e + finally: + if os.path.exists(backup_init_fname): + os.remove(backup_init_fname) + # Add __version__ to keras package + export_version_string() + # Creates `_tf_keras` with full keras API + create_legacy_directory() diff --git a/conftest.py b/conftest.py index 2fcc51ec0062..5c27d947c13b 100644 --- a/conftest.py +++ b/conftest.py @@ -14,7 +14,7 @@ import pytest # noqa: E402 -from keras.backend import backend # noqa: E402 +from keras.src.backend import backend # noqa: E402 def pytest_configure(config): diff --git a/integration_tests/basic_full_flow.py b/integration_tests/basic_full_flow.py index e7e2d0fbd09c..6361b32d4794 100644 --- a/integration_tests/basic_full_flow.py +++ b/integration_tests/basic_full_flow.py @@ -2,11 +2,11 @@ import pytest import keras -from keras import layers -from keras import losses -from keras import metrics -from keras import optimizers -from keras import testing +from keras.src import layers +from keras.src import losses +from keras.src import metrics +from keras.src import optimizers +from keras.src import testing class MyModel(keras.Model): diff --git a/integration_tests/dataset_tests/boston_housing_test.py b/integration_tests/dataset_tests/boston_housing_test.py index eb5ac411cac9..4d4c3399beb6 100644 --- a/integration_tests/dataset_tests/boston_housing_test.py +++ b/integration_tests/dataset_tests/boston_housing_test.py @@ -1,5 +1,5 @@ -from keras import testing -from keras.datasets import boston_housing +from keras.src import testing +from keras.src.datasets import boston_housing class BostonHousingTest(testing.TestCase): diff --git a/integration_tests/dataset_tests/california_housing_test.py b/integration_tests/dataset_tests/california_housing_test.py index ec63578b709f..d49abb7c0142 100644 --- a/integration_tests/dataset_tests/california_housing_test.py +++ b/integration_tests/dataset_tests/california_housing_test.py @@ -1,5 +1,5 @@ -from keras import testing -from keras.datasets import california_housing +from keras.src import testing +from keras.src.datasets import california_housing class CaliforniaHousingTest(testing.TestCase): diff --git a/integration_tests/dataset_tests/cifar100_test.py b/integration_tests/dataset_tests/cifar100_test.py index d2ef4f7edeea..3a497dd205ac 100644 --- a/integration_tests/dataset_tests/cifar100_test.py +++ b/integration_tests/dataset_tests/cifar100_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import testing -from keras.datasets import cifar100 +from keras.src import testing +from keras.src.datasets import cifar100 class Cifar100LoadDataTest(testing.TestCase): diff --git a/integration_tests/dataset_tests/cifar10_test.py b/integration_tests/dataset_tests/cifar10_test.py index 58185d230a09..fe1c20319b00 100644 --- a/integration_tests/dataset_tests/cifar10_test.py +++ b/integration_tests/dataset_tests/cifar10_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import testing -from keras.datasets import cifar10 +from keras.src import testing +from keras.src.datasets import cifar10 class Cifar10LoadDataTest(testing.TestCase): diff --git a/integration_tests/dataset_tests/fashion_mnist_test.py b/integration_tests/dataset_tests/fashion_mnist_test.py index e3374730dfbc..92c43eeefe32 100644 --- a/integration_tests/dataset_tests/fashion_mnist_test.py +++ b/integration_tests/dataset_tests/fashion_mnist_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import testing -from keras.datasets import fashion_mnist +from keras.src import testing +from keras.src.datasets import fashion_mnist class FashionMnistLoadDataTest(testing.TestCase): diff --git a/integration_tests/dataset_tests/imdb_test.py b/integration_tests/dataset_tests/imdb_test.py index 8d740904a23c..e2971c4709b6 100644 --- a/integration_tests/dataset_tests/imdb_test.py +++ b/integration_tests/dataset_tests/imdb_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import testing -from keras.datasets import imdb +from keras.src import testing +from keras.src.datasets import imdb class ImdbLoadDataTest(testing.TestCase): diff --git a/integration_tests/dataset_tests/mnist_test.py b/integration_tests/dataset_tests/mnist_test.py index aed6ef5e8654..5aeaae4548bd 100644 --- a/integration_tests/dataset_tests/mnist_test.py +++ b/integration_tests/dataset_tests/mnist_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import testing -from keras.datasets import mnist +from keras.src import testing +from keras.src.datasets import mnist class MnistLoadDataTest(testing.TestCase): diff --git a/integration_tests/dataset_tests/reuters_test.py b/integration_tests/dataset_tests/reuters_test.py index 14b1eb0c1f05..3d83de560869 100644 --- a/integration_tests/dataset_tests/reuters_test.py +++ b/integration_tests/dataset_tests/reuters_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import testing -from keras.datasets import reuters +from keras.src import testing +from keras.src.datasets import reuters class ReutersLoadDataTest(testing.TestCase): diff --git a/integration_tests/import_test.py b/integration_tests/import_test.py index 54509a88492f..9330b834e0a1 100644 --- a/integration_tests/import_test.py +++ b/integration_tests/import_test.py @@ -2,12 +2,16 @@ import re import subprocess -from keras import backend +from keras.src import backend +# For torch, use index url to avoid installing nvidia drivers for the test. BACKEND_REQ = { - "tensorflow": "tensorflow", - "torch": "torch torchvision", - "jax": "jax jaxlib", + "tensorflow": ("tensorflow-cpu", ""), + "torch": ( + "torch torchvision", + "--extra-index-url https://download.pytorch.org/whl/cpu ", + ), + "jax": ("jax[cpu]", ""), } @@ -43,16 +47,17 @@ def create_virtualenv(): def manage_venv_installs(whl_path): other_backends = list(set(BACKEND_REQ.keys()) - {backend.backend()}) + backend_pkg, backend_extra_url = BACKEND_REQ[backend.backend()] install_setup = [ # Installs the backend's package and common requirements - "pip install " + BACKEND_REQ[backend.backend()], + "pip install " + backend_extra_url + backend_pkg, "pip install -r requirements-common.txt", "pip install pytest", # Ensure other backends are uninstalled "pip uninstall -y " - + BACKEND_REQ[other_backends[0]] + + BACKEND_REQ[other_backends[0]][0] + " " - + BACKEND_REQ[other_backends[1]], + + BACKEND_REQ[other_backends[1]][0], # Install `.whl` package "pip install " + whl_path, ] diff --git a/integration_tests/model_visualization_test.py b/integration_tests/model_visualization_test.py index ed5c4e87ee52..29c666aee6fc 100644 --- a/integration_tests/model_visualization_test.py +++ b/integration_tests/model_visualization_test.py @@ -1,5 +1,5 @@ import keras -from keras.utils import plot_model +from keras.src.utils import plot_model def plot_sequential_model(): diff --git a/integration_tests/tf_distribute_training_test.py b/integration_tests/tf_distribute_training_test.py index 76fa55ed64f4..ec2a7d5bfb92 100644 --- a/integration_tests/tf_distribute_training_test.py +++ b/integration_tests/tf_distribute_training_test.py @@ -2,12 +2,12 @@ import tensorflow as tf import keras -from keras import layers -from keras import losses -from keras import metrics -from keras import models -from keras import optimizers -from keras.callbacks import LearningRateScheduler +from keras.src import layers +from keras.src import losses +from keras.src import metrics +from keras.src import models +from keras.src import optimizers +from keras.src.callbacks import LearningRateScheduler def test_model_fit(): diff --git a/integration_tests/torch_workflow_test.py b/integration_tests/torch_workflow_test.py index 1bab5a4b7e46..3737197b86e5 100644 --- a/integration_tests/torch_workflow_test.py +++ b/integration_tests/torch_workflow_test.py @@ -1,8 +1,8 @@ import torch -from keras import layers -from keras import testing -from keras.backend.common import KerasVariable +from keras.src import layers +from keras.src import testing +from keras.src.backend.common import KerasVariable class Net(torch.nn.Module): diff --git a/keras/__init__.py b/keras/__init__.py index 4cda3efc0e7f..6276b51e1f85 100644 --- a/keras/__init__.py +++ b/keras/__init__.py @@ -1,19 +1,33 @@ -from keras import activations -from keras import applications -from keras import backend -from keras import constraints -from keras import datasets -from keras import initializers -from keras import layers -from keras import models -from keras import ops -from keras import optimizers -from keras import regularizers -from keras import utils -from keras.backend import KerasTensor -from keras.layers import Input -from keras.layers import Layer -from keras.models import Functional -from keras.models import Model -from keras.models import Sequential -from keras.version import __version__ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +import os + +# Import everything from /api/ into keras. +from keras.api import * # noqa: F403 +from keras.api import __version__ # Import * ignores names start with "_". + +# Add everything in /api/ to the module search path. +__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405 + +# Don't pollute namespace. +del os + + +# Never autocomplete `.src` or `.api` on an imported keras object. +def __dir__(): + keys = dict.fromkeys((globals().keys())) + keys.pop("src") + keys.pop("api") + return list(keys) + + +# Don't import `.src` or `.api` during `from keras import *`. +__all__ = [ + name + for name in globals().keys() + if not (name.startswith("_") or name in ("src", "api")) +] diff --git a/keras/api/__init__.py b/keras/api/__init__.py new file mode 100644 index 000000000000..d93460c26c53 --- /dev/null +++ b/keras/api/__init__.py @@ -0,0 +1,58 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api import _tf_keras +from keras.api import activations +from keras.api import applications +from keras.api import backend +from keras.api import callbacks +from keras.api import config +from keras.api import constraints +from keras.api import datasets +from keras.api import distribution +from keras.api import dtype_policies +from keras.api import export +from keras.api import initializers +from keras.api import layers +from keras.api import legacy +from keras.api import losses +from keras.api import metrics +from keras.api import mixed_precision +from keras.api import models +from keras.api import ops +from keras.api import optimizers +from keras.api import preprocessing +from keras.api import quantizers +from keras.api import random +from keras.api import regularizers +from keras.api import saving +from keras.api import tree +from keras.api import utils +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.exports import Variable +from keras.src.backend.exports import device +from keras.src.backend.exports import name_scope +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy +from keras.src.initializers.initializer import Initializer +from keras.src.layers.core.input_layer import Input +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.losses.loss import Loss +from keras.src.metrics.metric import Metric +from keras.src.models.model import Model +from keras.src.models.sequential import Sequential +from keras.src.ops.function import Function +from keras.src.ops.operation import Operation +from keras.src.optimizers.optimizer import Optimizer +from keras.src.quantizers.quantizers import AbsMaxQuantizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.regularizers.regularizers import Regularizer +from keras.src.version import __version__ +from keras.src.version import version diff --git a/keras/api/_tf_keras/__init__.py b/keras/api/_tf_keras/__init__.py new file mode 100644 index 000000000000..249c46d892a7 --- /dev/null +++ b/keras/api/_tf_keras/__init__.py @@ -0,0 +1 @@ +from keras.api._tf_keras import keras diff --git a/keras/api/_tf_keras/keras/__init__.py b/keras/api/_tf_keras/keras/__init__.py new file mode 100644 index 000000000000..767853b2be3b --- /dev/null +++ b/keras/api/_tf_keras/keras/__init__.py @@ -0,0 +1,57 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api import activations +from keras.api import applications +from keras.api import backend +from keras.api import callbacks +from keras.api import config +from keras.api import constraints +from keras.api import datasets +from keras.api import distribution +from keras.api import dtype_policies +from keras.api import export +from keras.api import initializers +from keras.api import layers +from keras.api import legacy +from keras.api import losses +from keras.api import metrics +from keras.api import mixed_precision +from keras.api import models +from keras.api import ops +from keras.api import optimizers +from keras.api import preprocessing +from keras.api import quantizers +from keras.api import random +from keras.api import regularizers +from keras.api import saving +from keras.api import tree +from keras.api import utils +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.exports import Variable +from keras.src.backend.exports import device +from keras.src.backend.exports import name_scope +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy +from keras.src.initializers.initializer import Initializer +from keras.src.layers.core.input_layer import Input +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.losses.loss import Loss +from keras.src.metrics.metric import Metric +from keras.src.models.model import Model +from keras.src.models.sequential import Sequential +from keras.src.ops.function import Function +from keras.src.ops.operation import Operation +from keras.src.optimizers.optimizer import Optimizer +from keras.src.quantizers.quantizers import AbsMaxQuantizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.regularizers.regularizers import Regularizer +from keras.src.version import __version__ +from keras.src.version import version diff --git a/keras/api/_tf_keras/keras/activations/__init__.py b/keras/api/_tf_keras/keras/activations/__init__.py new file mode 100644 index 000000000000..17624b6ba5dc --- /dev/null +++ b/keras/api/_tf_keras/keras/activations/__init__.py @@ -0,0 +1,29 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.activations import deserialize +from keras.src.activations import get +from keras.src.activations import serialize +from keras.src.activations.activations import elu +from keras.src.activations.activations import exponential +from keras.src.activations.activations import gelu +from keras.src.activations.activations import hard_sigmoid +from keras.src.activations.activations import hard_silu +from keras.src.activations.activations import hard_silu as hard_swish +from keras.src.activations.activations import leaky_relu +from keras.src.activations.activations import linear +from keras.src.activations.activations import log_softmax +from keras.src.activations.activations import mish +from keras.src.activations.activations import relu +from keras.src.activations.activations import relu6 +from keras.src.activations.activations import selu +from keras.src.activations.activations import sigmoid +from keras.src.activations.activations import silu +from keras.src.activations.activations import silu as swish +from keras.src.activations.activations import softmax +from keras.src.activations.activations import softplus +from keras.src.activations.activations import softsign +from keras.src.activations.activations import tanh diff --git a/keras/api/_tf_keras/keras/applications/__init__.py b/keras/api/_tf_keras/keras/applications/__init__.py new file mode 100644 index 000000000000..183b3ca66142 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/__init__.py @@ -0,0 +1,63 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.applications import convnext +from keras.api.applications import densenet +from keras.api.applications import efficientnet +from keras.api.applications import efficientnet_v2 +from keras.api.applications import imagenet_utils +from keras.api.applications import inception_resnet_v2 +from keras.api.applications import inception_v3 +from keras.api.applications import mobilenet +from keras.api.applications import mobilenet_v2 +from keras.api.applications import mobilenet_v3 +from keras.api.applications import nasnet +from keras.api.applications import resnet +from keras.api.applications import resnet50 +from keras.api.applications import resnet_v2 +from keras.api.applications import vgg16 +from keras.api.applications import vgg19 +from keras.api.applications import xception +from keras.src.applications.convnext import ConvNeXtBase +from keras.src.applications.convnext import ConvNeXtLarge +from keras.src.applications.convnext import ConvNeXtSmall +from keras.src.applications.convnext import ConvNeXtTiny +from keras.src.applications.convnext import ConvNeXtXLarge +from keras.src.applications.densenet import DenseNet121 +from keras.src.applications.densenet import DenseNet169 +from keras.src.applications.densenet import DenseNet201 +from keras.src.applications.efficientnet import EfficientNetB0 +from keras.src.applications.efficientnet import EfficientNetB1 +from keras.src.applications.efficientnet import EfficientNetB2 +from keras.src.applications.efficientnet import EfficientNetB3 +from keras.src.applications.efficientnet import EfficientNetB4 +from keras.src.applications.efficientnet import EfficientNetB5 +from keras.src.applications.efficientnet import EfficientNetB6 +from keras.src.applications.efficientnet import EfficientNetB7 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B0 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B1 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B2 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B3 +from keras.src.applications.efficientnet_v2 import EfficientNetV2L +from keras.src.applications.efficientnet_v2 import EfficientNetV2M +from keras.src.applications.efficientnet_v2 import EfficientNetV2S +from keras.src.applications.inception_resnet_v2 import InceptionResNetV2 +from keras.src.applications.inception_v3 import InceptionV3 +from keras.src.applications.mobilenet import MobileNet +from keras.src.applications.mobilenet_v2 import MobileNetV2 +from keras.src.applications.mobilenet_v3 import MobileNetV3Large +from keras.src.applications.mobilenet_v3 import MobileNetV3Small +from keras.src.applications.nasnet import NASNetLarge +from keras.src.applications.nasnet import NASNetMobile +from keras.src.applications.resnet import ResNet50 +from keras.src.applications.resnet import ResNet101 +from keras.src.applications.resnet import ResNet152 +from keras.src.applications.resnet_v2 import ResNet50V2 +from keras.src.applications.resnet_v2 import ResNet101V2 +from keras.src.applications.resnet_v2 import ResNet152V2 +from keras.src.applications.vgg16 import VGG16 +from keras.src.applications.vgg19 import VGG19 +from keras.src.applications.xception import Xception diff --git a/keras/api/_tf_keras/keras/applications/convnext/__init__.py b/keras/api/_tf_keras/keras/applications/convnext/__init__.py new file mode 100644 index 000000000000..b4eaaa3834b1 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/convnext/__init__.py @@ -0,0 +1,13 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.convnext import ConvNeXtBase +from keras.src.applications.convnext import ConvNeXtLarge +from keras.src.applications.convnext import ConvNeXtSmall +from keras.src.applications.convnext import ConvNeXtTiny +from keras.src.applications.convnext import ConvNeXtXLarge +from keras.src.applications.convnext import decode_predictions +from keras.src.applications.convnext import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/densenet/__init__.py b/keras/api/_tf_keras/keras/applications/densenet/__init__.py new file mode 100644 index 000000000000..0173a2c3ed9d --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/densenet/__init__.py @@ -0,0 +1,11 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.densenet import DenseNet121 +from keras.src.applications.densenet import DenseNet169 +from keras.src.applications.densenet import DenseNet201 +from keras.src.applications.densenet import decode_predictions +from keras.src.applications.densenet import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/efficientnet/__init__.py b/keras/api/_tf_keras/keras/applications/efficientnet/__init__.py new file mode 100644 index 000000000000..c4af0199bea6 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/efficientnet/__init__.py @@ -0,0 +1,16 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.efficientnet import EfficientNetB0 +from keras.src.applications.efficientnet import EfficientNetB1 +from keras.src.applications.efficientnet import EfficientNetB2 +from keras.src.applications.efficientnet import EfficientNetB3 +from keras.src.applications.efficientnet import EfficientNetB4 +from keras.src.applications.efficientnet import EfficientNetB5 +from keras.src.applications.efficientnet import EfficientNetB6 +from keras.src.applications.efficientnet import EfficientNetB7 +from keras.src.applications.efficientnet import decode_predictions +from keras.src.applications.efficientnet import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/efficientnet_v2/__init__.py b/keras/api/_tf_keras/keras/applications/efficientnet_v2/__init__.py new file mode 100644 index 000000000000..ee85821a1d74 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/efficientnet_v2/__init__.py @@ -0,0 +1,15 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.efficientnet_v2 import EfficientNetV2B0 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B1 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B2 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B3 +from keras.src.applications.efficientnet_v2 import EfficientNetV2L +from keras.src.applications.efficientnet_v2 import EfficientNetV2M +from keras.src.applications.efficientnet_v2 import EfficientNetV2S +from keras.src.applications.efficientnet_v2 import decode_predictions +from keras.src.applications.efficientnet_v2 import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/imagenet_utils/__init__.py b/keras/api/_tf_keras/keras/applications/imagenet_utils/__init__.py new file mode 100644 index 000000000000..81a923e55b9e --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/imagenet_utils/__init__.py @@ -0,0 +1,8 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.imagenet_utils import decode_predictions +from keras.src.applications.imagenet_utils import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/inception_resnet_v2/__init__.py b/keras/api/_tf_keras/keras/applications/inception_resnet_v2/__init__.py new file mode 100644 index 000000000000..b710829bd377 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/inception_resnet_v2/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.inception_resnet_v2 import InceptionResNetV2 +from keras.src.applications.inception_resnet_v2 import decode_predictions +from keras.src.applications.inception_resnet_v2 import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/inception_v3/__init__.py b/keras/api/_tf_keras/keras/applications/inception_v3/__init__.py new file mode 100644 index 000000000000..8a2379ca1b13 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/inception_v3/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.inception_v3 import InceptionV3 +from keras.src.applications.inception_v3 import decode_predictions +from keras.src.applications.inception_v3 import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/mobilenet/__init__.py b/keras/api/_tf_keras/keras/applications/mobilenet/__init__.py new file mode 100644 index 000000000000..0194cdfd0ac6 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/mobilenet/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.mobilenet import MobileNet +from keras.src.applications.mobilenet import decode_predictions +from keras.src.applications.mobilenet import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/mobilenet_v2/__init__.py b/keras/api/_tf_keras/keras/applications/mobilenet_v2/__init__.py new file mode 100644 index 000000000000..ceb0625e3519 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/mobilenet_v2/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.mobilenet_v2 import MobileNetV2 +from keras.src.applications.mobilenet_v2 import decode_predictions +from keras.src.applications.mobilenet_v2 import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/mobilenet_v3/__init__.py b/keras/api/_tf_keras/keras/applications/mobilenet_v3/__init__.py new file mode 100644 index 000000000000..c27e6669f0f1 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/mobilenet_v3/__init__.py @@ -0,0 +1,8 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.mobilenet_v3 import decode_predictions +from keras.src.applications.mobilenet_v3 import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/nasnet/__init__.py b/keras/api/_tf_keras/keras/applications/nasnet/__init__.py new file mode 100644 index 000000000000..874de61f00ab --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/nasnet/__init__.py @@ -0,0 +1,10 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.nasnet import NASNetLarge +from keras.src.applications.nasnet import NASNetMobile +from keras.src.applications.nasnet import decode_predictions +from keras.src.applications.nasnet import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/resnet/__init__.py b/keras/api/_tf_keras/keras/applications/resnet/__init__.py new file mode 100644 index 000000000000..5aaa3ee0e5e2 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/resnet/__init__.py @@ -0,0 +1,11 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.resnet import ResNet50 +from keras.src.applications.resnet import ResNet101 +from keras.src.applications.resnet import ResNet152 +from keras.src.applications.resnet import decode_predictions +from keras.src.applications.resnet import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/resnet50/__init__.py b/keras/api/_tf_keras/keras/applications/resnet50/__init__.py new file mode 100644 index 000000000000..ac08b5322682 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/resnet50/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.resnet import ResNet50 +from keras.src.applications.resnet import decode_predictions +from keras.src.applications.resnet import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/resnet_v2/__init__.py b/keras/api/_tf_keras/keras/applications/resnet_v2/__init__.py new file mode 100644 index 000000000000..273dd3019d85 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/resnet_v2/__init__.py @@ -0,0 +1,11 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.resnet_v2 import ResNet50V2 +from keras.src.applications.resnet_v2 import ResNet101V2 +from keras.src.applications.resnet_v2 import ResNet152V2 +from keras.src.applications.resnet_v2 import decode_predictions +from keras.src.applications.resnet_v2 import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/vgg16/__init__.py b/keras/api/_tf_keras/keras/applications/vgg16/__init__.py new file mode 100644 index 000000000000..5a31084a4676 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/vgg16/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.vgg16 import VGG16 +from keras.src.applications.vgg16 import decode_predictions +from keras.src.applications.vgg16 import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/vgg19/__init__.py b/keras/api/_tf_keras/keras/applications/vgg19/__init__.py new file mode 100644 index 000000000000..14355514d7cf --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/vgg19/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.vgg19 import VGG19 +from keras.src.applications.vgg19 import decode_predictions +from keras.src.applications.vgg19 import preprocess_input diff --git a/keras/api/_tf_keras/keras/applications/xception/__init__.py b/keras/api/_tf_keras/keras/applications/xception/__init__.py new file mode 100644 index 000000000000..c200dc66df35 --- /dev/null +++ b/keras/api/_tf_keras/keras/applications/xception/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.xception import Xception +from keras.src.applications.xception import decode_predictions +from keras.src.applications.xception import preprocess_input diff --git a/keras/api/_tf_keras/keras/backend/__init__.py b/keras/api/_tf_keras/keras/backend/__init__.py new file mode 100644 index 000000000000..840bde6e4ded --- /dev/null +++ b/keras/api/_tf_keras/keras/backend/__init__.py @@ -0,0 +1,20 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.global_state import clear_session +from keras.src.backend.common.keras_tensor import is_keras_tensor +from keras.src.backend.common.variables import is_float_dtype +from keras.src.backend.common.variables import is_int_dtype +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.config import backend +from keras.src.backend.config import epsilon +from keras.src.backend.config import floatx +from keras.src.backend.config import image_data_format +from keras.src.backend.config import set_epsilon +from keras.src.backend.config import set_floatx +from keras.src.backend.config import set_image_data_format +from keras.src.utils.naming import get_uid diff --git a/keras/api/_tf_keras/keras/callbacks/__init__.py b/keras/api/_tf_keras/keras/callbacks/__init__.py new file mode 100644 index 000000000000..42ba958b9bb3 --- /dev/null +++ b/keras/api/_tf_keras/keras/callbacks/__init__.py @@ -0,0 +1,21 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.callbacks.backup_and_restore import BackupAndRestore +from keras.src.callbacks.callback import Callback +from keras.src.callbacks.callback_list import CallbackList +from keras.src.callbacks.csv_logger import CSVLogger +from keras.src.callbacks.early_stopping import EarlyStopping +from keras.src.callbacks.history import History +from keras.src.callbacks.lambda_callback import LambdaCallback +from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler +from keras.src.callbacks.model_checkpoint import ModelCheckpoint +from keras.src.callbacks.progbar_logger import ProgbarLogger +from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau +from keras.src.callbacks.remote_monitor import RemoteMonitor +from keras.src.callbacks.swap_ema_weights import SwapEMAWeights +from keras.src.callbacks.tensorboard import TensorBoard +from keras.src.callbacks.terminate_on_nan import TerminateOnNaN diff --git a/keras/api/_tf_keras/keras/config/__init__.py b/keras/api/_tf_keras/keras/config/__init__.py new file mode 100644 index 000000000000..13e334cb7c06 --- /dev/null +++ b/keras/api/_tf_keras/keras/config/__init__.py @@ -0,0 +1,23 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.backend.config import backend +from keras.src.backend.config import epsilon +from keras.src.backend.config import floatx +from keras.src.backend.config import image_data_format +from keras.src.backend.config import set_epsilon +from keras.src.backend.config import set_floatx +from keras.src.backend.config import set_image_data_format +from keras.src.dtype_policies.dtype_policy import dtype_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy +from keras.src.saving.serialization_lib import enable_unsafe_deserialization +from keras.src.utils.backend_utils import set_backend +from keras.src.utils.io_utils import disable_interactive_logging +from keras.src.utils.io_utils import enable_interactive_logging +from keras.src.utils.io_utils import is_interactive_logging_enabled +from keras.src.utils.traceback_utils import disable_traceback_filtering +from keras.src.utils.traceback_utils import enable_traceback_filtering +from keras.src.utils.traceback_utils import is_traceback_filtering_enabled diff --git a/keras/api/_tf_keras/keras/constraints/__init__.py b/keras/api/_tf_keras/keras/constraints/__init__.py new file mode 100644 index 000000000000..6372e149d3ba --- /dev/null +++ b/keras/api/_tf_keras/keras/constraints/__init__.py @@ -0,0 +1,18 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.constraints import deserialize +from keras.src.constraints import get +from keras.src.constraints import serialize +from keras.src.constraints.constraints import Constraint +from keras.src.constraints.constraints import MaxNorm +from keras.src.constraints.constraints import MaxNorm as max_norm +from keras.src.constraints.constraints import MinMaxNorm +from keras.src.constraints.constraints import MinMaxNorm as min_max_norm +from keras.src.constraints.constraints import NonNeg +from keras.src.constraints.constraints import NonNeg as non_neg +from keras.src.constraints.constraints import UnitNorm +from keras.src.constraints.constraints import UnitNorm as unit_norm diff --git a/keras/api/_tf_keras/keras/datasets/__init__.py b/keras/api/_tf_keras/keras/datasets/__init__.py new file mode 100644 index 000000000000..cf153fefcd4d --- /dev/null +++ b/keras/api/_tf_keras/keras/datasets/__init__.py @@ -0,0 +1,14 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.datasets import boston_housing +from keras.api.datasets import california_housing +from keras.api.datasets import cifar10 +from keras.api.datasets import cifar100 +from keras.api.datasets import fashion_mnist +from keras.api.datasets import imdb +from keras.api.datasets import mnist +from keras.api.datasets import reuters diff --git a/keras/api/_tf_keras/keras/datasets/boston_housing/__init__.py b/keras/api/_tf_keras/keras/datasets/boston_housing/__init__.py new file mode 100644 index 000000000000..f5a179db9968 --- /dev/null +++ b/keras/api/_tf_keras/keras/datasets/boston_housing/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.boston_housing import load_data diff --git a/keras/api/_tf_keras/keras/datasets/california_housing/__init__.py b/keras/api/_tf_keras/keras/datasets/california_housing/__init__.py new file mode 100644 index 000000000000..52b6157dcf28 --- /dev/null +++ b/keras/api/_tf_keras/keras/datasets/california_housing/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.california_housing import load_data diff --git a/keras/api/_tf_keras/keras/datasets/cifar10/__init__.py b/keras/api/_tf_keras/keras/datasets/cifar10/__init__.py new file mode 100644 index 000000000000..68c72a91b495 --- /dev/null +++ b/keras/api/_tf_keras/keras/datasets/cifar10/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.cifar10 import load_data diff --git a/keras/api/_tf_keras/keras/datasets/cifar100/__init__.py b/keras/api/_tf_keras/keras/datasets/cifar100/__init__.py new file mode 100644 index 000000000000..e49e67faeecf --- /dev/null +++ b/keras/api/_tf_keras/keras/datasets/cifar100/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.cifar100 import load_data diff --git a/keras/api/_tf_keras/keras/datasets/fashion_mnist/__init__.py b/keras/api/_tf_keras/keras/datasets/fashion_mnist/__init__.py new file mode 100644 index 000000000000..33512169fc9f --- /dev/null +++ b/keras/api/_tf_keras/keras/datasets/fashion_mnist/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.fashion_mnist import load_data diff --git a/keras/api/_tf_keras/keras/datasets/imdb/__init__.py b/keras/api/_tf_keras/keras/datasets/imdb/__init__.py new file mode 100644 index 000000000000..6bcddbd11dbe --- /dev/null +++ b/keras/api/_tf_keras/keras/datasets/imdb/__init__.py @@ -0,0 +1,8 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.imdb import get_word_index +from keras.src.datasets.imdb import load_data diff --git a/keras/api/_tf_keras/keras/datasets/mnist/__init__.py b/keras/api/_tf_keras/keras/datasets/mnist/__init__.py new file mode 100644 index 000000000000..45568c463ba8 --- /dev/null +++ b/keras/api/_tf_keras/keras/datasets/mnist/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.mnist import load_data diff --git a/keras/api/_tf_keras/keras/datasets/reuters/__init__.py b/keras/api/_tf_keras/keras/datasets/reuters/__init__.py new file mode 100644 index 000000000000..cdc9b68cff93 --- /dev/null +++ b/keras/api/_tf_keras/keras/datasets/reuters/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.reuters import get_label_names +from keras.src.datasets.reuters import get_word_index +from keras.src.datasets.reuters import load_data diff --git a/keras/api/_tf_keras/keras/distribution/__init__.py b/keras/api/_tf_keras/keras/distribution/__init__.py new file mode 100644 index 000000000000..b56806af9fac --- /dev/null +++ b/keras/api/_tf_keras/keras/distribution/__init__.py @@ -0,0 +1,16 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.distribution.distribution_lib import DataParallel +from keras.src.distribution.distribution_lib import DeviceMesh +from keras.src.distribution.distribution_lib import LayoutMap +from keras.src.distribution.distribution_lib import ModelParallel +from keras.src.distribution.distribution_lib import TensorLayout +from keras.src.distribution.distribution_lib import distribute_tensor +from keras.src.distribution.distribution_lib import distribution +from keras.src.distribution.distribution_lib import initialize +from keras.src.distribution.distribution_lib import list_devices +from keras.src.distribution.distribution_lib import set_distribution diff --git a/keras/api/_tf_keras/keras/dtype_policies/__init__.py b/keras/api/_tf_keras/keras/dtype_policies/__init__.py new file mode 100644 index 000000000000..da8364263a22 --- /dev/null +++ b/keras/api/_tf_keras/keras/dtype_policies/__init__.py @@ -0,0 +1,10 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy diff --git a/keras/api/_tf_keras/keras/export/__init__.py b/keras/api/_tf_keras/keras/export/__init__.py new file mode 100644 index 000000000000..68fa60293961 --- /dev/null +++ b/keras/api/_tf_keras/keras/export/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.export.export_lib import ExportArchive diff --git a/keras/api/_tf_keras/keras/initializers/__init__.py b/keras/api/_tf_keras/keras/initializers/__init__.py new file mode 100644 index 000000000000..5819d1b285eb --- /dev/null +++ b/keras/api/_tf_keras/keras/initializers/__init__.py @@ -0,0 +1,64 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.initializers import deserialize +from keras.src.initializers import get +from keras.src.initializers import serialize +from keras.src.initializers.constant_initializers import Constant +from keras.src.initializers.constant_initializers import Constant as constant +from keras.src.initializers.constant_initializers import Identity +from keras.src.initializers.constant_initializers import ( + Identity as IdentityInitializer, +) +from keras.src.initializers.constant_initializers import Identity as identity +from keras.src.initializers.constant_initializers import Ones +from keras.src.initializers.constant_initializers import Ones as ones +from keras.src.initializers.constant_initializers import Zeros +from keras.src.initializers.constant_initializers import Zeros as zeros +from keras.src.initializers.initializer import Initializer +from keras.src.initializers.random_initializers import GlorotNormal +from keras.src.initializers.random_initializers import ( + GlorotNormal as glorot_normal, +) +from keras.src.initializers.random_initializers import GlorotUniform +from keras.src.initializers.random_initializers import ( + GlorotUniform as glorot_uniform, +) +from keras.src.initializers.random_initializers import HeNormal +from keras.src.initializers.random_initializers import HeNormal as he_normal +from keras.src.initializers.random_initializers import HeUniform +from keras.src.initializers.random_initializers import HeUniform as he_uniform +from keras.src.initializers.random_initializers import LecunNormal +from keras.src.initializers.random_initializers import ( + LecunNormal as lecun_normal, +) +from keras.src.initializers.random_initializers import LecunUniform +from keras.src.initializers.random_initializers import ( + LecunUniform as lecun_uniform, +) +from keras.src.initializers.random_initializers import OrthogonalInitializer +from keras.src.initializers.random_initializers import ( + OrthogonalInitializer as Orthogonal, +) +from keras.src.initializers.random_initializers import ( + OrthogonalInitializer as orthogonal, +) +from keras.src.initializers.random_initializers import RandomNormal +from keras.src.initializers.random_initializers import ( + RandomNormal as random_normal, +) +from keras.src.initializers.random_initializers import RandomUniform +from keras.src.initializers.random_initializers import ( + RandomUniform as random_uniform, +) +from keras.src.initializers.random_initializers import TruncatedNormal +from keras.src.initializers.random_initializers import ( + TruncatedNormal as truncated_normal, +) +from keras.src.initializers.random_initializers import VarianceScaling +from keras.src.initializers.random_initializers import ( + VarianceScaling as variance_scaling, +) diff --git a/keras/api/_tf_keras/keras/layers/__init__.py b/keras/api/_tf_keras/keras/layers/__init__.py new file mode 100644 index 000000000000..a4e1bf9a6bbd --- /dev/null +++ b/keras/api/_tf_keras/keras/layers/__init__.py @@ -0,0 +1,195 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.export.export_lib import TFSMLayer +from keras.src.layers import deserialize +from keras.src.layers import serialize +from keras.src.layers.activations.activation import Activation +from keras.src.layers.activations.elu import ELU +from keras.src.layers.activations.leaky_relu import LeakyReLU +from keras.src.layers.activations.prelu import PReLU +from keras.src.layers.activations.relu import ReLU +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.attention.additive_attention import AdditiveAttention +from keras.src.layers.attention.attention import Attention +from keras.src.layers.attention.grouped_query_attention import ( + GroupedQueryAttention as GroupQueryAttention, +) +from keras.src.layers.attention.multi_head_attention import MultiHeadAttention +from keras.src.layers.convolutional.conv1d import Conv1D +from keras.src.layers.convolutional.conv1d import Conv1D as Convolution1D +from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose +from keras.src.layers.convolutional.conv1d_transpose import ( + Conv1DTranspose as Convolution1DTranspose, +) +from keras.src.layers.convolutional.conv2d import Conv2D +from keras.src.layers.convolutional.conv2d import Conv2D as Convolution2D +from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose +from keras.src.layers.convolutional.conv2d_transpose import ( + Conv2DTranspose as Convolution2DTranspose, +) +from keras.src.layers.convolutional.conv3d import Conv3D +from keras.src.layers.convolutional.conv3d import Conv3D as Convolution3D +from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose +from keras.src.layers.convolutional.conv3d_transpose import ( + Conv3DTranspose as Convolution3DTranspose, +) +from keras.src.layers.convolutional.depthwise_conv1d import DepthwiseConv1D +from keras.src.layers.convolutional.depthwise_conv2d import DepthwiseConv2D +from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D +from keras.src.layers.convolutional.separable_conv1d import ( + SeparableConv1D as SeparableConvolution1D, +) +from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D +from keras.src.layers.convolutional.separable_conv2d import ( + SeparableConv2D as SeparableConvolution2D, +) +from keras.src.layers.core.dense import Dense +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.core.embedding import Embedding +from keras.src.layers.core.identity import Identity +from keras.src.layers.core.input_layer import Input +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.core.lambda_layer import Lambda +from keras.src.layers.core.masking import Masking +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.merging.add import Add +from keras.src.layers.merging.add import add +from keras.src.layers.merging.average import Average +from keras.src.layers.merging.average import average +from keras.src.layers.merging.concatenate import Concatenate +from keras.src.layers.merging.concatenate import concatenate +from keras.src.layers.merging.dot import Dot +from keras.src.layers.merging.dot import dot +from keras.src.layers.merging.maximum import Maximum +from keras.src.layers.merging.maximum import maximum +from keras.src.layers.merging.minimum import Minimum +from keras.src.layers.merging.minimum import minimum +from keras.src.layers.merging.multiply import Multiply +from keras.src.layers.merging.multiply import multiply +from keras.src.layers.merging.subtract import Subtract +from keras.src.layers.merging.subtract import subtract +from keras.src.layers.normalization.batch_normalization import ( + BatchNormalization, +) +from keras.src.layers.normalization.group_normalization import ( + GroupNormalization, +) +from keras.src.layers.normalization.layer_normalization import ( + LayerNormalization, +) +from keras.src.layers.normalization.spectral_normalization import ( + SpectralNormalization, +) +from keras.src.layers.normalization.unit_normalization import UnitNormalization +from keras.src.layers.pooling.average_pooling1d import AveragePooling1D +from keras.src.layers.pooling.average_pooling1d import ( + AveragePooling1D as AvgPool1D, +) +from keras.src.layers.pooling.average_pooling2d import AveragePooling2D +from keras.src.layers.pooling.average_pooling2d import ( + AveragePooling2D as AvgPool2D, +) +from keras.src.layers.pooling.average_pooling3d import AveragePooling3D +from keras.src.layers.pooling.average_pooling3d import ( + AveragePooling3D as AvgPool3D, +) +from keras.src.layers.pooling.global_average_pooling1d import ( + GlobalAveragePooling1D, +) +from keras.src.layers.pooling.global_average_pooling1d import ( + GlobalAveragePooling1D as GlobalAvgPool1D, +) +from keras.src.layers.pooling.global_average_pooling2d import ( + GlobalAveragePooling2D, +) +from keras.src.layers.pooling.global_average_pooling2d import ( + GlobalAveragePooling2D as GlobalAvgPool2D, +) +from keras.src.layers.pooling.global_average_pooling3d import ( + GlobalAveragePooling3D, +) +from keras.src.layers.pooling.global_average_pooling3d import ( + GlobalAveragePooling3D as GlobalAvgPool3D, +) +from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D +from keras.src.layers.pooling.global_max_pooling1d import ( + GlobalMaxPooling1D as GlobalMaxPool1D, +) +from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D +from keras.src.layers.pooling.global_max_pooling2d import ( + GlobalMaxPooling2D as GlobalMaxPool2D, +) +from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D +from keras.src.layers.pooling.global_max_pooling3d import ( + GlobalMaxPooling3D as GlobalMaxPool3D, +) +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D as MaxPool1D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D as MaxPool2D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D as MaxPool3D +from keras.src.layers.preprocessing.audio_preprocessing import MelSpectrogram +from keras.src.layers.preprocessing.category_encoding import CategoryEncoding +from keras.src.layers.preprocessing.center_crop import CenterCrop +from keras.src.layers.preprocessing.discretization import Discretization +from keras.src.layers.preprocessing.hashed_crossing import HashedCrossing +from keras.src.layers.preprocessing.hashing import Hashing +from keras.src.layers.preprocessing.integer_lookup import IntegerLookup +from keras.src.layers.preprocessing.normalization import Normalization +from keras.src.layers.preprocessing.random_brightness import RandomBrightness +from keras.src.layers.preprocessing.random_contrast import RandomContrast +from keras.src.layers.preprocessing.random_crop import RandomCrop +from keras.src.layers.preprocessing.random_flip import RandomFlip +from keras.src.layers.preprocessing.random_rotation import RandomRotation +from keras.src.layers.preprocessing.random_translation import RandomTranslation +from keras.src.layers.preprocessing.random_zoom import RandomZoom +from keras.src.layers.preprocessing.rescaling import Rescaling +from keras.src.layers.preprocessing.resizing import Resizing +from keras.src.layers.preprocessing.string_lookup import StringLookup +from keras.src.layers.preprocessing.text_vectorization import TextVectorization +from keras.src.layers.regularization.activity_regularization import ( + ActivityRegularization, +) +from keras.src.layers.regularization.alpha_dropout import AlphaDropout +from keras.src.layers.regularization.dropout import Dropout +from keras.src.layers.regularization.gaussian_dropout import GaussianDropout +from keras.src.layers.regularization.gaussian_noise import GaussianNoise +from keras.src.layers.regularization.spatial_dropout import SpatialDropout1D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout2D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout3D +from keras.src.layers.reshaping.cropping1d import Cropping1D +from keras.src.layers.reshaping.cropping2d import Cropping2D +from keras.src.layers.reshaping.cropping3d import Cropping3D +from keras.src.layers.reshaping.flatten import Flatten +from keras.src.layers.reshaping.permute import Permute +from keras.src.layers.reshaping.repeat_vector import RepeatVector +from keras.src.layers.reshaping.reshape import Reshape +from keras.src.layers.reshaping.up_sampling1d import UpSampling1D +from keras.src.layers.reshaping.up_sampling2d import UpSampling2D +from keras.src.layers.reshaping.up_sampling3d import UpSampling3D +from keras.src.layers.reshaping.zero_padding1d import ZeroPadding1D +from keras.src.layers.reshaping.zero_padding2d import ZeroPadding2D +from keras.src.layers.reshaping.zero_padding3d import ZeroPadding3D +from keras.src.layers.rnn.bidirectional import Bidirectional +from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D +from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D +from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D +from keras.src.layers.rnn.gru import GRU +from keras.src.layers.rnn.gru import GRUCell +from keras.src.layers.rnn.lstm import LSTM +from keras.src.layers.rnn.lstm import LSTMCell +from keras.src.layers.rnn.rnn import RNN +from keras.src.layers.rnn.simple_rnn import SimpleRNN +from keras.src.layers.rnn.simple_rnn import SimpleRNNCell +from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.src.layers.rnn.time_distributed import TimeDistributed +from keras.src.utils.jax_layer import FlaxLayer +from keras.src.utils.jax_layer import JaxLayer +from keras.src.utils.torch_utils import TorchModuleWrapper diff --git a/keras/api/_tf_keras/keras/legacy/__init__.py b/keras/api/_tf_keras/keras/legacy/__init__.py new file mode 100644 index 000000000000..96347e2c32bf --- /dev/null +++ b/keras/api/_tf_keras/keras/legacy/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.legacy import saving diff --git a/keras/api/_tf_keras/keras/legacy/saving/__init__.py b/keras/api/_tf_keras/keras/legacy/saving/__init__.py new file mode 100644 index 000000000000..ac4d2d43dd9a --- /dev/null +++ b/keras/api/_tf_keras/keras/legacy/saving/__init__.py @@ -0,0 +1,8 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.legacy.saving.serialization import deserialize_keras_object +from keras.src.legacy.saving.serialization import serialize_keras_object diff --git a/keras/api/_tf_keras/keras/losses/__init__.py b/keras/api/_tf_keras/keras/losses/__init__.py new file mode 100644 index 000000000000..ecaadddf6b7e --- /dev/null +++ b/keras/api/_tf_keras/keras/losses/__init__.py @@ -0,0 +1,50 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.losses import deserialize +from keras.src.losses import get +from keras.src.losses import serialize +from keras.src.losses.loss import Loss +from keras.src.losses.losses import CTC +from keras.src.losses.losses import BinaryCrossentropy +from keras.src.losses.losses import BinaryFocalCrossentropy +from keras.src.losses.losses import CategoricalCrossentropy +from keras.src.losses.losses import CategoricalFocalCrossentropy +from keras.src.losses.losses import CategoricalHinge +from keras.src.losses.losses import CosineSimilarity +from keras.src.losses.losses import Dice +from keras.src.losses.losses import Hinge +from keras.src.losses.losses import Huber +from keras.src.losses.losses import KLDivergence +from keras.src.losses.losses import LogCosh +from keras.src.losses.losses import MeanAbsoluteError +from keras.src.losses.losses import MeanAbsolutePercentageError +from keras.src.losses.losses import MeanSquaredError +from keras.src.losses.losses import MeanSquaredLogarithmicError +from keras.src.losses.losses import Poisson +from keras.src.losses.losses import SparseCategoricalCrossentropy +from keras.src.losses.losses import SquaredHinge +from keras.src.losses.losses import Tversky +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import cosine_similarity +from keras.src.losses.losses import ctc +from keras.src.losses.losses import dice +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.losses.losses import tversky diff --git a/keras/api/_tf_keras/keras/metrics/__init__.py b/keras/api/_tf_keras/keras/metrics/__init__.py new file mode 100644 index 000000000000..dc59b32a46c3 --- /dev/null +++ b/keras/api/_tf_keras/keras/metrics/__init__.py @@ -0,0 +1,76 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.metrics import deserialize +from keras.src.metrics import get +from keras.src.metrics import serialize +from keras.src.metrics.accuracy_metrics import Accuracy +from keras.src.metrics.accuracy_metrics import BinaryAccuracy +from keras.src.metrics.accuracy_metrics import CategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import binary_accuracy +from keras.src.metrics.accuracy_metrics import categorical_accuracy +from keras.src.metrics.accuracy_metrics import sparse_categorical_accuracy +from keras.src.metrics.accuracy_metrics import sparse_top_k_categorical_accuracy +from keras.src.metrics.accuracy_metrics import top_k_categorical_accuracy +from keras.src.metrics.confusion_metrics import AUC +from keras.src.metrics.confusion_metrics import FalseNegatives +from keras.src.metrics.confusion_metrics import FalsePositives +from keras.src.metrics.confusion_metrics import Precision +from keras.src.metrics.confusion_metrics import PrecisionAtRecall +from keras.src.metrics.confusion_metrics import Recall +from keras.src.metrics.confusion_metrics import RecallAtPrecision +from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity +from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity +from keras.src.metrics.confusion_metrics import TrueNegatives +from keras.src.metrics.confusion_metrics import TruePositives +from keras.src.metrics.f_score_metrics import F1Score +from keras.src.metrics.f_score_metrics import FBetaScore +from keras.src.metrics.hinge_metrics import CategoricalHinge +from keras.src.metrics.hinge_metrics import Hinge +from keras.src.metrics.hinge_metrics import SquaredHinge +from keras.src.metrics.iou_metrics import BinaryIoU +from keras.src.metrics.iou_metrics import IoU +from keras.src.metrics.iou_metrics import MeanIoU +from keras.src.metrics.iou_metrics import OneHotIoU +from keras.src.metrics.iou_metrics import OneHotMeanIoU +from keras.src.metrics.metric import Metric +from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy +from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy +from keras.src.metrics.probabilistic_metrics import KLDivergence +from keras.src.metrics.probabilistic_metrics import Poisson +from keras.src.metrics.probabilistic_metrics import ( + SparseCategoricalCrossentropy, +) +from keras.src.metrics.reduction_metrics import Mean +from keras.src.metrics.reduction_metrics import MeanMetricWrapper +from keras.src.metrics.reduction_metrics import Sum +from keras.src.metrics.regression_metrics import CosineSimilarity +from keras.src.metrics.regression_metrics import LogCoshError +from keras.src.metrics.regression_metrics import MeanAbsoluteError +from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError +from keras.src.metrics.regression_metrics import MeanSquaredError +from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError +from keras.src.metrics.regression_metrics import R2Score +from keras.src.metrics.regression_metrics import RootMeanSquaredError diff --git a/keras/api/_tf_keras/keras/mixed_precision/__init__.py b/keras/api/_tf_keras/keras/mixed_precision/__init__.py new file mode 100644 index 000000000000..85a421651d16 --- /dev/null +++ b/keras/api/_tf_keras/keras/mixed_precision/__init__.py @@ -0,0 +1,15 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy +from keras.src.dtype_policies.dtype_policy import dtype_policy +from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy +from keras.src.dtype_policies.dtype_policy import ( + set_dtype_policy as set_global_policy, +) +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer diff --git a/keras/api/_tf_keras/keras/models/__init__.py b/keras/api/_tf_keras/keras/models/__init__.py new file mode 100644 index 000000000000..48760da64791 --- /dev/null +++ b/keras/api/_tf_keras/keras/models/__init__.py @@ -0,0 +1,12 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.models.cloning import clone_model +from keras.src.models.model import Model +from keras.src.models.model import model_from_json +from keras.src.models.sequential import Sequential +from keras.src.saving.saving_api import load_model +from keras.src.saving.saving_api import save_model diff --git a/keras/api/_tf_keras/keras/ops/__init__.py b/keras/api/_tf_keras/keras/ops/__init__.py new file mode 100644 index 000000000000..1253650e9bc0 --- /dev/null +++ b/keras/api/_tf_keras/keras/ops/__init__.py @@ -0,0 +1,223 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.ops import image +from keras.api.ops import linalg +from keras.api.ops import nn +from keras.api.ops import numpy +from keras.src.ops.core import cast +from keras.src.ops.core import cond +from keras.src.ops.core import convert_to_numpy +from keras.src.ops.core import convert_to_tensor +from keras.src.ops.core import custom_gradient +from keras.src.ops.core import fori_loop +from keras.src.ops.core import is_tensor +from keras.src.ops.core import scatter +from keras.src.ops.core import scatter_update +from keras.src.ops.core import shape +from keras.src.ops.core import slice +from keras.src.ops.core import slice_update +from keras.src.ops.core import stop_gradient +from keras.src.ops.core import unstack +from keras.src.ops.core import vectorized_map +from keras.src.ops.core import while_loop +from keras.src.ops.linalg import cholesky +from keras.src.ops.linalg import det +from keras.src.ops.linalg import eig +from keras.src.ops.linalg import inv +from keras.src.ops.linalg import lu_factor +from keras.src.ops.linalg import norm +from keras.src.ops.linalg import qr +from keras.src.ops.linalg import solve +from keras.src.ops.linalg import solve_triangular +from keras.src.ops.linalg import svd +from keras.src.ops.math import erf +from keras.src.ops.math import erfinv +from keras.src.ops.math import extract_sequences +from keras.src.ops.math import fft +from keras.src.ops.math import fft2 +from keras.src.ops.math import in_top_k +from keras.src.ops.math import irfft +from keras.src.ops.math import istft +from keras.src.ops.math import logsumexp +from keras.src.ops.math import rfft +from keras.src.ops.math import rsqrt +from keras.src.ops.math import segment_max +from keras.src.ops.math import segment_sum +from keras.src.ops.math import stft +from keras.src.ops.math import top_k +from keras.src.ops.nn import average_pool +from keras.src.ops.nn import batch_normalization +from keras.src.ops.nn import binary_crossentropy +from keras.src.ops.nn import categorical_crossentropy +from keras.src.ops.nn import conv +from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_loss +from keras.src.ops.nn import depthwise_conv +from keras.src.ops.nn import elu +from keras.src.ops.nn import gelu +from keras.src.ops.nn import hard_sigmoid +from keras.src.ops.nn import hard_silu +from keras.src.ops.nn import hard_silu as hard_swish +from keras.src.ops.nn import leaky_relu +from keras.src.ops.nn import log_sigmoid +from keras.src.ops.nn import log_softmax +from keras.src.ops.nn import max_pool +from keras.src.ops.nn import moments +from keras.src.ops.nn import multi_hot +from keras.src.ops.nn import normalize +from keras.src.ops.nn import one_hot +from keras.src.ops.nn import relu +from keras.src.ops.nn import relu6 +from keras.src.ops.nn import selu +from keras.src.ops.nn import separable_conv +from keras.src.ops.nn import sigmoid +from keras.src.ops.nn import silu +from keras.src.ops.nn import silu as swish +from keras.src.ops.nn import softmax +from keras.src.ops.nn import softplus +from keras.src.ops.nn import softsign +from keras.src.ops.nn import sparse_categorical_crossentropy +from keras.src.ops.numpy import abs +from keras.src.ops.numpy import absolute +from keras.src.ops.numpy import add +from keras.src.ops.numpy import all +from keras.src.ops.numpy import amax +from keras.src.ops.numpy import amin +from keras.src.ops.numpy import any +from keras.src.ops.numpy import append +from keras.src.ops.numpy import arange +from keras.src.ops.numpy import arccos +from keras.src.ops.numpy import arccosh +from keras.src.ops.numpy import arcsin +from keras.src.ops.numpy import arcsinh +from keras.src.ops.numpy import arctan +from keras.src.ops.numpy import arctan2 +from keras.src.ops.numpy import arctanh +from keras.src.ops.numpy import argmax +from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argsort +from keras.src.ops.numpy import array +from keras.src.ops.numpy import average +from keras.src.ops.numpy import bincount +from keras.src.ops.numpy import broadcast_to +from keras.src.ops.numpy import ceil +from keras.src.ops.numpy import clip +from keras.src.ops.numpy import concatenate +from keras.src.ops.numpy import conj +from keras.src.ops.numpy import conjugate +from keras.src.ops.numpy import copy +from keras.src.ops.numpy import correlate +from keras.src.ops.numpy import cos +from keras.src.ops.numpy import cosh +from keras.src.ops.numpy import count_nonzero +from keras.src.ops.numpy import cross +from keras.src.ops.numpy import cumprod +from keras.src.ops.numpy import cumsum +from keras.src.ops.numpy import diag +from keras.src.ops.numpy import diagonal +from keras.src.ops.numpy import diff +from keras.src.ops.numpy import digitize +from keras.src.ops.numpy import divide +from keras.src.ops.numpy import divide_no_nan +from keras.src.ops.numpy import dot +from keras.src.ops.numpy import einsum +from keras.src.ops.numpy import empty +from keras.src.ops.numpy import equal +from keras.src.ops.numpy import exp +from keras.src.ops.numpy import expand_dims +from keras.src.ops.numpy import expm1 +from keras.src.ops.numpy import eye +from keras.src.ops.numpy import flip +from keras.src.ops.numpy import floor +from keras.src.ops.numpy import floor_divide +from keras.src.ops.numpy import full +from keras.src.ops.numpy import full_like +from keras.src.ops.numpy import get_item +from keras.src.ops.numpy import greater +from keras.src.ops.numpy import greater_equal +from keras.src.ops.numpy import hstack +from keras.src.ops.numpy import identity +from keras.src.ops.numpy import imag +from keras.src.ops.numpy import isclose +from keras.src.ops.numpy import isfinite +from keras.src.ops.numpy import isinf +from keras.src.ops.numpy import isnan +from keras.src.ops.numpy import less +from keras.src.ops.numpy import less_equal +from keras.src.ops.numpy import linspace +from keras.src.ops.numpy import log +from keras.src.ops.numpy import log1p +from keras.src.ops.numpy import log2 +from keras.src.ops.numpy import log10 +from keras.src.ops.numpy import logaddexp +from keras.src.ops.numpy import logical_and +from keras.src.ops.numpy import logical_not +from keras.src.ops.numpy import logical_or +from keras.src.ops.numpy import logical_xor +from keras.src.ops.numpy import logspace +from keras.src.ops.numpy import matmul +from keras.src.ops.numpy import max +from keras.src.ops.numpy import maximum +from keras.src.ops.numpy import mean +from keras.src.ops.numpy import median +from keras.src.ops.numpy import meshgrid +from keras.src.ops.numpy import min +from keras.src.ops.numpy import minimum +from keras.src.ops.numpy import mod +from keras.src.ops.numpy import moveaxis +from keras.src.ops.numpy import multiply +from keras.src.ops.numpy import nan_to_num +from keras.src.ops.numpy import ndim +from keras.src.ops.numpy import negative +from keras.src.ops.numpy import nonzero +from keras.src.ops.numpy import not_equal +from keras.src.ops.numpy import ones +from keras.src.ops.numpy import ones_like +from keras.src.ops.numpy import outer +from keras.src.ops.numpy import pad +from keras.src.ops.numpy import power +from keras.src.ops.numpy import prod +from keras.src.ops.numpy import quantile +from keras.src.ops.numpy import ravel +from keras.src.ops.numpy import real +from keras.src.ops.numpy import reciprocal +from keras.src.ops.numpy import repeat +from keras.src.ops.numpy import reshape +from keras.src.ops.numpy import roll +from keras.src.ops.numpy import round +from keras.src.ops.numpy import sign +from keras.src.ops.numpy import sin +from keras.src.ops.numpy import sinh +from keras.src.ops.numpy import size +from keras.src.ops.numpy import sort +from keras.src.ops.numpy import split +from keras.src.ops.numpy import sqrt +from keras.src.ops.numpy import square +from keras.src.ops.numpy import squeeze +from keras.src.ops.numpy import stack +from keras.src.ops.numpy import std +from keras.src.ops.numpy import subtract +from keras.src.ops.numpy import sum +from keras.src.ops.numpy import swapaxes +from keras.src.ops.numpy import take +from keras.src.ops.numpy import take_along_axis +from keras.src.ops.numpy import tan +from keras.src.ops.numpy import tanh +from keras.src.ops.numpy import tensordot +from keras.src.ops.numpy import tile +from keras.src.ops.numpy import trace +from keras.src.ops.numpy import transpose +from keras.src.ops.numpy import tri +from keras.src.ops.numpy import tril +from keras.src.ops.numpy import triu +from keras.src.ops.numpy import true_divide +from keras.src.ops.numpy import var +from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vstack +from keras.src.ops.numpy import where +from keras.src.ops.numpy import zeros +from keras.src.ops.numpy import zeros_like diff --git a/keras/api/_tf_keras/keras/ops/image/__init__.py b/keras/api/_tf_keras/keras/ops/image/__init__.py new file mode 100644 index 000000000000..e4c8464c2195 --- /dev/null +++ b/keras/api/_tf_keras/keras/ops/image/__init__.py @@ -0,0 +1,13 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.ops.image import affine_transform +from keras.src.ops.image import crop_images +from keras.src.ops.image import extract_patches +from keras.src.ops.image import map_coordinates +from keras.src.ops.image import pad_images +from keras.src.ops.image import resize +from keras.src.ops.image import rgb_to_grayscale diff --git a/keras/api/_tf_keras/keras/ops/linalg/__init__.py b/keras/api/_tf_keras/keras/ops/linalg/__init__.py new file mode 100644 index 000000000000..da392d6c2490 --- /dev/null +++ b/keras/api/_tf_keras/keras/ops/linalg/__init__.py @@ -0,0 +1,16 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.ops.linalg import cholesky +from keras.src.ops.linalg import det +from keras.src.ops.linalg import eig +from keras.src.ops.linalg import inv +from keras.src.ops.linalg import lu_factor +from keras.src.ops.linalg import norm +from keras.src.ops.linalg import qr +from keras.src.ops.linalg import solve +from keras.src.ops.linalg import solve_triangular +from keras.src.ops.linalg import svd diff --git a/keras/api/_tf_keras/keras/ops/nn/__init__.py b/keras/api/_tf_keras/keras/ops/nn/__init__.py new file mode 100644 index 000000000000..9452ea18a766 --- /dev/null +++ b/keras/api/_tf_keras/keras/ops/nn/__init__.py @@ -0,0 +1,38 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.ops.nn import average_pool +from keras.src.ops.nn import batch_normalization +from keras.src.ops.nn import binary_crossentropy +from keras.src.ops.nn import categorical_crossentropy +from keras.src.ops.nn import conv +from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_loss +from keras.src.ops.nn import depthwise_conv +from keras.src.ops.nn import elu +from keras.src.ops.nn import gelu +from keras.src.ops.nn import hard_sigmoid +from keras.src.ops.nn import hard_silu +from keras.src.ops.nn import hard_silu as hard_swish +from keras.src.ops.nn import leaky_relu +from keras.src.ops.nn import log_sigmoid +from keras.src.ops.nn import log_softmax +from keras.src.ops.nn import max_pool +from keras.src.ops.nn import moments +from keras.src.ops.nn import multi_hot +from keras.src.ops.nn import normalize +from keras.src.ops.nn import one_hot +from keras.src.ops.nn import relu +from keras.src.ops.nn import relu6 +from keras.src.ops.nn import selu +from keras.src.ops.nn import separable_conv +from keras.src.ops.nn import sigmoid +from keras.src.ops.nn import silu +from keras.src.ops.nn import silu as swish +from keras.src.ops.nn import softmax +from keras.src.ops.nn import softplus +from keras.src.ops.nn import softsign +from keras.src.ops.nn import sparse_categorical_crossentropy diff --git a/keras/api/_tf_keras/keras/ops/numpy/__init__.py b/keras/api/_tf_keras/keras/ops/numpy/__init__.py new file mode 100644 index 000000000000..1d5434e40288 --- /dev/null +++ b/keras/api/_tf_keras/keras/ops/numpy/__init__.py @@ -0,0 +1,146 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.ops.numpy import abs +from keras.src.ops.numpy import absolute +from keras.src.ops.numpy import add +from keras.src.ops.numpy import all +from keras.src.ops.numpy import amax +from keras.src.ops.numpy import amin +from keras.src.ops.numpy import any +from keras.src.ops.numpy import append +from keras.src.ops.numpy import arange +from keras.src.ops.numpy import arccos +from keras.src.ops.numpy import arccosh +from keras.src.ops.numpy import arcsin +from keras.src.ops.numpy import arcsinh +from keras.src.ops.numpy import arctan +from keras.src.ops.numpy import arctan2 +from keras.src.ops.numpy import arctanh +from keras.src.ops.numpy import argmax +from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argsort +from keras.src.ops.numpy import array +from keras.src.ops.numpy import average +from keras.src.ops.numpy import bincount +from keras.src.ops.numpy import broadcast_to +from keras.src.ops.numpy import ceil +from keras.src.ops.numpy import clip +from keras.src.ops.numpy import concatenate +from keras.src.ops.numpy import conj +from keras.src.ops.numpy import conjugate +from keras.src.ops.numpy import copy +from keras.src.ops.numpy import correlate +from keras.src.ops.numpy import cos +from keras.src.ops.numpy import cosh +from keras.src.ops.numpy import count_nonzero +from keras.src.ops.numpy import cross +from keras.src.ops.numpy import cumprod +from keras.src.ops.numpy import cumsum +from keras.src.ops.numpy import diag +from keras.src.ops.numpy import diagonal +from keras.src.ops.numpy import diff +from keras.src.ops.numpy import digitize +from keras.src.ops.numpy import divide +from keras.src.ops.numpy import divide_no_nan +from keras.src.ops.numpy import dot +from keras.src.ops.numpy import einsum +from keras.src.ops.numpy import empty +from keras.src.ops.numpy import equal +from keras.src.ops.numpy import exp +from keras.src.ops.numpy import expand_dims +from keras.src.ops.numpy import expm1 +from keras.src.ops.numpy import eye +from keras.src.ops.numpy import flip +from keras.src.ops.numpy import floor +from keras.src.ops.numpy import floor_divide +from keras.src.ops.numpy import full +from keras.src.ops.numpy import full_like +from keras.src.ops.numpy import get_item +from keras.src.ops.numpy import greater +from keras.src.ops.numpy import greater_equal +from keras.src.ops.numpy import hstack +from keras.src.ops.numpy import identity +from keras.src.ops.numpy import imag +from keras.src.ops.numpy import isclose +from keras.src.ops.numpy import isfinite +from keras.src.ops.numpy import isinf +from keras.src.ops.numpy import isnan +from keras.src.ops.numpy import less +from keras.src.ops.numpy import less_equal +from keras.src.ops.numpy import linspace +from keras.src.ops.numpy import log +from keras.src.ops.numpy import log1p +from keras.src.ops.numpy import log2 +from keras.src.ops.numpy import log10 +from keras.src.ops.numpy import logaddexp +from keras.src.ops.numpy import logical_and +from keras.src.ops.numpy import logical_not +from keras.src.ops.numpy import logical_or +from keras.src.ops.numpy import logical_xor +from keras.src.ops.numpy import logspace +from keras.src.ops.numpy import matmul +from keras.src.ops.numpy import max +from keras.src.ops.numpy import maximum +from keras.src.ops.numpy import mean +from keras.src.ops.numpy import median +from keras.src.ops.numpy import meshgrid +from keras.src.ops.numpy import min +from keras.src.ops.numpy import minimum +from keras.src.ops.numpy import mod +from keras.src.ops.numpy import moveaxis +from keras.src.ops.numpy import multiply +from keras.src.ops.numpy import nan_to_num +from keras.src.ops.numpy import ndim +from keras.src.ops.numpy import negative +from keras.src.ops.numpy import nonzero +from keras.src.ops.numpy import not_equal +from keras.src.ops.numpy import ones +from keras.src.ops.numpy import ones_like +from keras.src.ops.numpy import outer +from keras.src.ops.numpy import pad +from keras.src.ops.numpy import power +from keras.src.ops.numpy import prod +from keras.src.ops.numpy import quantile +from keras.src.ops.numpy import ravel +from keras.src.ops.numpy import real +from keras.src.ops.numpy import reciprocal +from keras.src.ops.numpy import repeat +from keras.src.ops.numpy import reshape +from keras.src.ops.numpy import roll +from keras.src.ops.numpy import round +from keras.src.ops.numpy import sign +from keras.src.ops.numpy import sin +from keras.src.ops.numpy import sinh +from keras.src.ops.numpy import size +from keras.src.ops.numpy import sort +from keras.src.ops.numpy import split +from keras.src.ops.numpy import sqrt +from keras.src.ops.numpy import square +from keras.src.ops.numpy import squeeze +from keras.src.ops.numpy import stack +from keras.src.ops.numpy import std +from keras.src.ops.numpy import subtract +from keras.src.ops.numpy import sum +from keras.src.ops.numpy import swapaxes +from keras.src.ops.numpy import take +from keras.src.ops.numpy import take_along_axis +from keras.src.ops.numpy import tan +from keras.src.ops.numpy import tanh +from keras.src.ops.numpy import tensordot +from keras.src.ops.numpy import tile +from keras.src.ops.numpy import trace +from keras.src.ops.numpy import transpose +from keras.src.ops.numpy import tri +from keras.src.ops.numpy import tril +from keras.src.ops.numpy import triu +from keras.src.ops.numpy import true_divide +from keras.src.ops.numpy import var +from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vstack +from keras.src.ops.numpy import where +from keras.src.ops.numpy import zeros +from keras.src.ops.numpy import zeros_like diff --git a/keras/api/_tf_keras/keras/optimizers/__init__.py b/keras/api/_tf_keras/keras/optimizers/__init__.py new file mode 100644 index 000000000000..5dab6705b58d --- /dev/null +++ b/keras/api/_tf_keras/keras/optimizers/__init__.py @@ -0,0 +1,24 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.optimizers import legacy +from keras.api.optimizers import schedules +from keras.src.optimizers import deserialize +from keras.src.optimizers import get +from keras.src.optimizers import serialize +from keras.src.optimizers.adadelta import Adadelta +from keras.src.optimizers.adafactor import Adafactor +from keras.src.optimizers.adagrad import Adagrad +from keras.src.optimizers.adam import Adam +from keras.src.optimizers.adamax import Adamax +from keras.src.optimizers.adamw import AdamW +from keras.src.optimizers.ftrl import Ftrl +from keras.src.optimizers.lion import Lion +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.src.optimizers.nadam import Nadam +from keras.src.optimizers.optimizer import Optimizer +from keras.src.optimizers.rmsprop import RMSprop +from keras.src.optimizers.sgd import SGD diff --git a/keras/api/_tf_keras/keras/optimizers/legacy/__init__.py b/keras/api/_tf_keras/keras/optimizers/legacy/__init__.py new file mode 100644 index 000000000000..bff1a0313630 --- /dev/null +++ b/keras/api/_tf_keras/keras/optimizers/legacy/__init__.py @@ -0,0 +1,12 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.optimizers import LegacyOptimizerWarning as Adagrad +from keras.src.optimizers import LegacyOptimizerWarning as Adam +from keras.src.optimizers import LegacyOptimizerWarning as Ftrl +from keras.src.optimizers import LegacyOptimizerWarning as Optimizer +from keras.src.optimizers import LegacyOptimizerWarning as RMSprop +from keras.src.optimizers import LegacyOptimizerWarning as SGD diff --git a/keras/api/_tf_keras/keras/optimizers/schedules/__init__.py b/keras/api/_tf_keras/keras/optimizers/schedules/__init__.py new file mode 100644 index 000000000000..6178626258ed --- /dev/null +++ b/keras/api/_tf_keras/keras/optimizers/schedules/__init__.py @@ -0,0 +1,27 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay +from keras.src.optimizers.schedules.learning_rate_schedule import ( + CosineDecayRestarts, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + ExponentialDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + InverseTimeDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + LearningRateSchedule, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + PiecewiseConstantDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + PolynomialDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import deserialize +from keras.src.optimizers.schedules.learning_rate_schedule import serialize diff --git a/keras/api/_tf_keras/keras/preprocessing/__init__.py b/keras/api/_tf_keras/keras/preprocessing/__init__.py new file mode 100644 index 000000000000..c9ed7fd664c2 --- /dev/null +++ b/keras/api/_tf_keras/keras/preprocessing/__init__.py @@ -0,0 +1,13 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.preprocessing import image +from keras.api.preprocessing import sequence +from keras.src.utils.image_dataset_utils import image_dataset_from_directory +from keras.src.utils.text_dataset_utils import text_dataset_from_directory +from keras.src.utils.timeseries_dataset_utils import ( + timeseries_dataset_from_array, +) diff --git a/keras/api/_tf_keras/keras/preprocessing/image/__init__.py b/keras/api/_tf_keras/keras/preprocessing/image/__init__.py new file mode 100644 index 000000000000..f68afe8789d5 --- /dev/null +++ b/keras/api/_tf_keras/keras/preprocessing/image/__init__.py @@ -0,0 +1,11 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.utils.image_utils import array_to_img +from keras.src.utils.image_utils import img_to_array +from keras.src.utils.image_utils import load_img +from keras.src.utils.image_utils import save_img +from keras.src.utils.image_utils import smart_resize diff --git a/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py b/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py new file mode 100644 index 000000000000..188e01af9c48 --- /dev/null +++ b/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.utils.sequence_utils import pad_sequences diff --git a/keras/api/_tf_keras/keras/quantizers/__init__.py b/keras/api/_tf_keras/keras/quantizers/__init__.py new file mode 100644 index 000000000000..d8a209bbb623 --- /dev/null +++ b/keras/api/_tf_keras/keras/quantizers/__init__.py @@ -0,0 +1,15 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.quantizers import deserialize +from keras.src.quantizers import get +from keras.src.quantizers import serialize +from keras.src.quantizers.quantizers import AbsMaxQuantizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.quantizers.quantizers import abs_max_quantize +from keras.src.quantizers.quantizers import compute_float8_amax_history +from keras.src.quantizers.quantizers import compute_float8_scale +from keras.src.quantizers.quantizers import quantize_and_dequantize diff --git a/keras/api/_tf_keras/keras/random/__init__.py b/keras/api/_tf_keras/keras/random/__init__.py new file mode 100644 index 000000000000..faf9c67f3fc4 --- /dev/null +++ b/keras/api/_tf_keras/keras/random/__init__.py @@ -0,0 +1,17 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.random.random import beta +from keras.src.random.random import binomial +from keras.src.random.random import categorical +from keras.src.random.random import dropout +from keras.src.random.random import gamma +from keras.src.random.random import normal +from keras.src.random.random import randint +from keras.src.random.random import shuffle +from keras.src.random.random import truncated_normal +from keras.src.random.random import uniform +from keras.src.random.seed_generator import SeedGenerator diff --git a/keras/api/_tf_keras/keras/regularizers/__init__.py b/keras/api/_tf_keras/keras/regularizers/__init__.py new file mode 100644 index 000000000000..93b51eaa51bd --- /dev/null +++ b/keras/api/_tf_keras/keras/regularizers/__init__.py @@ -0,0 +1,20 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.regularizers import deserialize +from keras.src.regularizers import get +from keras.src.regularizers import serialize +from keras.src.regularizers.regularizers import L1 +from keras.src.regularizers.regularizers import L1 as l1 +from keras.src.regularizers.regularizers import L1L2 +from keras.src.regularizers.regularizers import L1L2 as l1_l2 +from keras.src.regularizers.regularizers import L2 +from keras.src.regularizers.regularizers import L2 as l2 +from keras.src.regularizers.regularizers import OrthogonalRegularizer +from keras.src.regularizers.regularizers import ( + OrthogonalRegularizer as orthogonal_regularizer, +) +from keras.src.regularizers.regularizers import Regularizer diff --git a/keras/api/_tf_keras/keras/saving/__init__.py b/keras/api/_tf_keras/keras/saving/__init__.py new file mode 100644 index 000000000000..2f772922f8d1 --- /dev/null +++ b/keras/api/_tf_keras/keras/saving/__init__.py @@ -0,0 +1,20 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.saving.object_registration import CustomObjectScope +from keras.src.saving.object_registration import ( + CustomObjectScope as custom_object_scope, +) +from keras.src.saving.object_registration import get_custom_objects +from keras.src.saving.object_registration import get_registered_name +from keras.src.saving.object_registration import get_registered_object +from keras.src.saving.object_registration import register_keras_serializable +from keras.src.saving.saving_api import load_model +from keras.src.saving.saving_api import load_weights +from keras.src.saving.saving_api import save_model +from keras.src.saving.saving_api import save_weights +from keras.src.saving.serialization_lib import deserialize_keras_object +from keras.src.saving.serialization_lib import serialize_keras_object diff --git a/keras/api/_tf_keras/keras/tree/__init__.py b/keras/api/_tf_keras/keras/tree/__init__.py new file mode 100644 index 000000000000..388d19a0ec26 --- /dev/null +++ b/keras/api/_tf_keras/keras/tree/__init__.py @@ -0,0 +1,15 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.tree.tree_api import assert_same_structure +from keras.src.tree.tree_api import flatten +from keras.src.tree.tree_api import is_nested +from keras.src.tree.tree_api import lists_to_tuples +from keras.src.tree.tree_api import map_shape_structure +from keras.src.tree.tree_api import map_structure +from keras.src.tree.tree_api import map_structure_up_to +from keras.src.tree.tree_api import pack_sequence_as +from keras.src.tree.tree_api import traverse diff --git a/keras/api/_tf_keras/keras/utils/__init__.py b/keras/api/_tf_keras/keras/utils/__init__.py new file mode 100644 index 000000000000..aab787cc930f --- /dev/null +++ b/keras/api/_tf_keras/keras/utils/__init__.py @@ -0,0 +1,54 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.utils import legacy +from keras.src.backend.common.global_state import clear_session +from keras.src.backend.common.keras_tensor import is_keras_tensor +from keras.src.backend.common.variables import standardize_dtype +from keras.src.layers.preprocessing.feature_space import FeatureSpace +from keras.src.ops.operation_utils import get_source_inputs +from keras.src.saving.object_registration import CustomObjectScope +from keras.src.saving.object_registration import ( + CustomObjectScope as custom_object_scope, +) +from keras.src.saving.object_registration import get_custom_objects +from keras.src.saving.object_registration import get_registered_name +from keras.src.saving.object_registration import get_registered_object +from keras.src.saving.object_registration import register_keras_serializable +from keras.src.saving.serialization_lib import deserialize_keras_object +from keras.src.saving.serialization_lib import serialize_keras_object +from keras.src.trainers.data_adapters.data_adapter_utils import ( + pack_x_y_sample_weight, +) +from keras.src.trainers.data_adapters.data_adapter_utils import ( + unpack_x_y_sample_weight, +) +from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset +from keras.src.trainers.data_adapters.py_dataset_adapter import ( + PyDataset as Sequence, +) +from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory +from keras.src.utils.dataset_utils import split_dataset +from keras.src.utils.file_utils import get_file +from keras.src.utils.image_dataset_utils import image_dataset_from_directory +from keras.src.utils.image_utils import array_to_img +from keras.src.utils.image_utils import img_to_array +from keras.src.utils.image_utils import load_img +from keras.src.utils.image_utils import save_img +from keras.src.utils.io_utils import disable_interactive_logging +from keras.src.utils.io_utils import enable_interactive_logging +from keras.src.utils.io_utils import is_interactive_logging_enabled +from keras.src.utils.model_visualization import model_to_dot +from keras.src.utils.model_visualization import plot_model +from keras.src.utils.numerical_utils import normalize +from keras.src.utils.numerical_utils import to_categorical +from keras.src.utils.progbar import Progbar +from keras.src.utils.rng_utils import set_random_seed +from keras.src.utils.sequence_utils import pad_sequences +from keras.src.utils.text_dataset_utils import text_dataset_from_directory +from keras.src.utils.timeseries_dataset_utils import ( + timeseries_dataset_from_array, +) diff --git a/keras/api/_tf_keras/keras/utils/legacy/__init__.py b/keras/api/_tf_keras/keras/utils/legacy/__init__.py new file mode 100644 index 000000000000..ac4d2d43dd9a --- /dev/null +++ b/keras/api/_tf_keras/keras/utils/legacy/__init__.py @@ -0,0 +1,8 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.legacy.saving.serialization import deserialize_keras_object +from keras.src.legacy.saving.serialization import serialize_keras_object diff --git a/keras/api/activations/__init__.py b/keras/api/activations/__init__.py new file mode 100644 index 000000000000..17624b6ba5dc --- /dev/null +++ b/keras/api/activations/__init__.py @@ -0,0 +1,29 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.activations import deserialize +from keras.src.activations import get +from keras.src.activations import serialize +from keras.src.activations.activations import elu +from keras.src.activations.activations import exponential +from keras.src.activations.activations import gelu +from keras.src.activations.activations import hard_sigmoid +from keras.src.activations.activations import hard_silu +from keras.src.activations.activations import hard_silu as hard_swish +from keras.src.activations.activations import leaky_relu +from keras.src.activations.activations import linear +from keras.src.activations.activations import log_softmax +from keras.src.activations.activations import mish +from keras.src.activations.activations import relu +from keras.src.activations.activations import relu6 +from keras.src.activations.activations import selu +from keras.src.activations.activations import sigmoid +from keras.src.activations.activations import silu +from keras.src.activations.activations import silu as swish +from keras.src.activations.activations import softmax +from keras.src.activations.activations import softplus +from keras.src.activations.activations import softsign +from keras.src.activations.activations import tanh diff --git a/keras/api/applications/__init__.py b/keras/api/applications/__init__.py new file mode 100644 index 000000000000..183b3ca66142 --- /dev/null +++ b/keras/api/applications/__init__.py @@ -0,0 +1,63 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.applications import convnext +from keras.api.applications import densenet +from keras.api.applications import efficientnet +from keras.api.applications import efficientnet_v2 +from keras.api.applications import imagenet_utils +from keras.api.applications import inception_resnet_v2 +from keras.api.applications import inception_v3 +from keras.api.applications import mobilenet +from keras.api.applications import mobilenet_v2 +from keras.api.applications import mobilenet_v3 +from keras.api.applications import nasnet +from keras.api.applications import resnet +from keras.api.applications import resnet50 +from keras.api.applications import resnet_v2 +from keras.api.applications import vgg16 +from keras.api.applications import vgg19 +from keras.api.applications import xception +from keras.src.applications.convnext import ConvNeXtBase +from keras.src.applications.convnext import ConvNeXtLarge +from keras.src.applications.convnext import ConvNeXtSmall +from keras.src.applications.convnext import ConvNeXtTiny +from keras.src.applications.convnext import ConvNeXtXLarge +from keras.src.applications.densenet import DenseNet121 +from keras.src.applications.densenet import DenseNet169 +from keras.src.applications.densenet import DenseNet201 +from keras.src.applications.efficientnet import EfficientNetB0 +from keras.src.applications.efficientnet import EfficientNetB1 +from keras.src.applications.efficientnet import EfficientNetB2 +from keras.src.applications.efficientnet import EfficientNetB3 +from keras.src.applications.efficientnet import EfficientNetB4 +from keras.src.applications.efficientnet import EfficientNetB5 +from keras.src.applications.efficientnet import EfficientNetB6 +from keras.src.applications.efficientnet import EfficientNetB7 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B0 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B1 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B2 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B3 +from keras.src.applications.efficientnet_v2 import EfficientNetV2L +from keras.src.applications.efficientnet_v2 import EfficientNetV2M +from keras.src.applications.efficientnet_v2 import EfficientNetV2S +from keras.src.applications.inception_resnet_v2 import InceptionResNetV2 +from keras.src.applications.inception_v3 import InceptionV3 +from keras.src.applications.mobilenet import MobileNet +from keras.src.applications.mobilenet_v2 import MobileNetV2 +from keras.src.applications.mobilenet_v3 import MobileNetV3Large +from keras.src.applications.mobilenet_v3 import MobileNetV3Small +from keras.src.applications.nasnet import NASNetLarge +from keras.src.applications.nasnet import NASNetMobile +from keras.src.applications.resnet import ResNet50 +from keras.src.applications.resnet import ResNet101 +from keras.src.applications.resnet import ResNet152 +from keras.src.applications.resnet_v2 import ResNet50V2 +from keras.src.applications.resnet_v2 import ResNet101V2 +from keras.src.applications.resnet_v2 import ResNet152V2 +from keras.src.applications.vgg16 import VGG16 +from keras.src.applications.vgg19 import VGG19 +from keras.src.applications.xception import Xception diff --git a/keras/api/applications/convnext/__init__.py b/keras/api/applications/convnext/__init__.py new file mode 100644 index 000000000000..b4eaaa3834b1 --- /dev/null +++ b/keras/api/applications/convnext/__init__.py @@ -0,0 +1,13 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.convnext import ConvNeXtBase +from keras.src.applications.convnext import ConvNeXtLarge +from keras.src.applications.convnext import ConvNeXtSmall +from keras.src.applications.convnext import ConvNeXtTiny +from keras.src.applications.convnext import ConvNeXtXLarge +from keras.src.applications.convnext import decode_predictions +from keras.src.applications.convnext import preprocess_input diff --git a/keras/api/applications/densenet/__init__.py b/keras/api/applications/densenet/__init__.py new file mode 100644 index 000000000000..0173a2c3ed9d --- /dev/null +++ b/keras/api/applications/densenet/__init__.py @@ -0,0 +1,11 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.densenet import DenseNet121 +from keras.src.applications.densenet import DenseNet169 +from keras.src.applications.densenet import DenseNet201 +from keras.src.applications.densenet import decode_predictions +from keras.src.applications.densenet import preprocess_input diff --git a/keras/api/applications/efficientnet/__init__.py b/keras/api/applications/efficientnet/__init__.py new file mode 100644 index 000000000000..c4af0199bea6 --- /dev/null +++ b/keras/api/applications/efficientnet/__init__.py @@ -0,0 +1,16 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.efficientnet import EfficientNetB0 +from keras.src.applications.efficientnet import EfficientNetB1 +from keras.src.applications.efficientnet import EfficientNetB2 +from keras.src.applications.efficientnet import EfficientNetB3 +from keras.src.applications.efficientnet import EfficientNetB4 +from keras.src.applications.efficientnet import EfficientNetB5 +from keras.src.applications.efficientnet import EfficientNetB6 +from keras.src.applications.efficientnet import EfficientNetB7 +from keras.src.applications.efficientnet import decode_predictions +from keras.src.applications.efficientnet import preprocess_input diff --git a/keras/api/applications/efficientnet_v2/__init__.py b/keras/api/applications/efficientnet_v2/__init__.py new file mode 100644 index 000000000000..ee85821a1d74 --- /dev/null +++ b/keras/api/applications/efficientnet_v2/__init__.py @@ -0,0 +1,15 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.efficientnet_v2 import EfficientNetV2B0 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B1 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B2 +from keras.src.applications.efficientnet_v2 import EfficientNetV2B3 +from keras.src.applications.efficientnet_v2 import EfficientNetV2L +from keras.src.applications.efficientnet_v2 import EfficientNetV2M +from keras.src.applications.efficientnet_v2 import EfficientNetV2S +from keras.src.applications.efficientnet_v2 import decode_predictions +from keras.src.applications.efficientnet_v2 import preprocess_input diff --git a/keras/api/applications/imagenet_utils/__init__.py b/keras/api/applications/imagenet_utils/__init__.py new file mode 100644 index 000000000000..81a923e55b9e --- /dev/null +++ b/keras/api/applications/imagenet_utils/__init__.py @@ -0,0 +1,8 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.imagenet_utils import decode_predictions +from keras.src.applications.imagenet_utils import preprocess_input diff --git a/keras/api/applications/inception_resnet_v2/__init__.py b/keras/api/applications/inception_resnet_v2/__init__.py new file mode 100644 index 000000000000..b710829bd377 --- /dev/null +++ b/keras/api/applications/inception_resnet_v2/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.inception_resnet_v2 import InceptionResNetV2 +from keras.src.applications.inception_resnet_v2 import decode_predictions +from keras.src.applications.inception_resnet_v2 import preprocess_input diff --git a/keras/api/applications/inception_v3/__init__.py b/keras/api/applications/inception_v3/__init__.py new file mode 100644 index 000000000000..8a2379ca1b13 --- /dev/null +++ b/keras/api/applications/inception_v3/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.inception_v3 import InceptionV3 +from keras.src.applications.inception_v3 import decode_predictions +from keras.src.applications.inception_v3 import preprocess_input diff --git a/keras/api/applications/mobilenet/__init__.py b/keras/api/applications/mobilenet/__init__.py new file mode 100644 index 000000000000..0194cdfd0ac6 --- /dev/null +++ b/keras/api/applications/mobilenet/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.mobilenet import MobileNet +from keras.src.applications.mobilenet import decode_predictions +from keras.src.applications.mobilenet import preprocess_input diff --git a/keras/api/applications/mobilenet_v2/__init__.py b/keras/api/applications/mobilenet_v2/__init__.py new file mode 100644 index 000000000000..ceb0625e3519 --- /dev/null +++ b/keras/api/applications/mobilenet_v2/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.mobilenet_v2 import MobileNetV2 +from keras.src.applications.mobilenet_v2 import decode_predictions +from keras.src.applications.mobilenet_v2 import preprocess_input diff --git a/keras/api/applications/mobilenet_v3/__init__.py b/keras/api/applications/mobilenet_v3/__init__.py new file mode 100644 index 000000000000..c27e6669f0f1 --- /dev/null +++ b/keras/api/applications/mobilenet_v3/__init__.py @@ -0,0 +1,8 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.mobilenet_v3 import decode_predictions +from keras.src.applications.mobilenet_v3 import preprocess_input diff --git a/keras/api/applications/nasnet/__init__.py b/keras/api/applications/nasnet/__init__.py new file mode 100644 index 000000000000..874de61f00ab --- /dev/null +++ b/keras/api/applications/nasnet/__init__.py @@ -0,0 +1,10 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.nasnet import NASNetLarge +from keras.src.applications.nasnet import NASNetMobile +from keras.src.applications.nasnet import decode_predictions +from keras.src.applications.nasnet import preprocess_input diff --git a/keras/api/applications/resnet/__init__.py b/keras/api/applications/resnet/__init__.py new file mode 100644 index 000000000000..5aaa3ee0e5e2 --- /dev/null +++ b/keras/api/applications/resnet/__init__.py @@ -0,0 +1,11 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.resnet import ResNet50 +from keras.src.applications.resnet import ResNet101 +from keras.src.applications.resnet import ResNet152 +from keras.src.applications.resnet import decode_predictions +from keras.src.applications.resnet import preprocess_input diff --git a/keras/api/applications/resnet50/__init__.py b/keras/api/applications/resnet50/__init__.py new file mode 100644 index 000000000000..ac08b5322682 --- /dev/null +++ b/keras/api/applications/resnet50/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.resnet import ResNet50 +from keras.src.applications.resnet import decode_predictions +from keras.src.applications.resnet import preprocess_input diff --git a/keras/api/applications/resnet_v2/__init__.py b/keras/api/applications/resnet_v2/__init__.py new file mode 100644 index 000000000000..273dd3019d85 --- /dev/null +++ b/keras/api/applications/resnet_v2/__init__.py @@ -0,0 +1,11 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.resnet_v2 import ResNet50V2 +from keras.src.applications.resnet_v2 import ResNet101V2 +from keras.src.applications.resnet_v2 import ResNet152V2 +from keras.src.applications.resnet_v2 import decode_predictions +from keras.src.applications.resnet_v2 import preprocess_input diff --git a/keras/api/applications/vgg16/__init__.py b/keras/api/applications/vgg16/__init__.py new file mode 100644 index 000000000000..5a31084a4676 --- /dev/null +++ b/keras/api/applications/vgg16/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.vgg16 import VGG16 +from keras.src.applications.vgg16 import decode_predictions +from keras.src.applications.vgg16 import preprocess_input diff --git a/keras/api/applications/vgg19/__init__.py b/keras/api/applications/vgg19/__init__.py new file mode 100644 index 000000000000..14355514d7cf --- /dev/null +++ b/keras/api/applications/vgg19/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.vgg19 import VGG19 +from keras.src.applications.vgg19 import decode_predictions +from keras.src.applications.vgg19 import preprocess_input diff --git a/keras/api/applications/xception/__init__.py b/keras/api/applications/xception/__init__.py new file mode 100644 index 000000000000..c200dc66df35 --- /dev/null +++ b/keras/api/applications/xception/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.applications.xception import Xception +from keras.src.applications.xception import decode_predictions +from keras.src.applications.xception import preprocess_input diff --git a/keras/api/backend/__init__.py b/keras/api/backend/__init__.py new file mode 100644 index 000000000000..840bde6e4ded --- /dev/null +++ b/keras/api/backend/__init__.py @@ -0,0 +1,20 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.global_state import clear_session +from keras.src.backend.common.keras_tensor import is_keras_tensor +from keras.src.backend.common.variables import is_float_dtype +from keras.src.backend.common.variables import is_int_dtype +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.config import backend +from keras.src.backend.config import epsilon +from keras.src.backend.config import floatx +from keras.src.backend.config import image_data_format +from keras.src.backend.config import set_epsilon +from keras.src.backend.config import set_floatx +from keras.src.backend.config import set_image_data_format +from keras.src.utils.naming import get_uid diff --git a/keras/api/callbacks/__init__.py b/keras/api/callbacks/__init__.py new file mode 100644 index 000000000000..42ba958b9bb3 --- /dev/null +++ b/keras/api/callbacks/__init__.py @@ -0,0 +1,21 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.callbacks.backup_and_restore import BackupAndRestore +from keras.src.callbacks.callback import Callback +from keras.src.callbacks.callback_list import CallbackList +from keras.src.callbacks.csv_logger import CSVLogger +from keras.src.callbacks.early_stopping import EarlyStopping +from keras.src.callbacks.history import History +from keras.src.callbacks.lambda_callback import LambdaCallback +from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler +from keras.src.callbacks.model_checkpoint import ModelCheckpoint +from keras.src.callbacks.progbar_logger import ProgbarLogger +from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau +from keras.src.callbacks.remote_monitor import RemoteMonitor +from keras.src.callbacks.swap_ema_weights import SwapEMAWeights +from keras.src.callbacks.tensorboard import TensorBoard +from keras.src.callbacks.terminate_on_nan import TerminateOnNaN diff --git a/keras/api/config/__init__.py b/keras/api/config/__init__.py new file mode 100644 index 000000000000..13e334cb7c06 --- /dev/null +++ b/keras/api/config/__init__.py @@ -0,0 +1,23 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.backend.config import backend +from keras.src.backend.config import epsilon +from keras.src.backend.config import floatx +from keras.src.backend.config import image_data_format +from keras.src.backend.config import set_epsilon +from keras.src.backend.config import set_floatx +from keras.src.backend.config import set_image_data_format +from keras.src.dtype_policies.dtype_policy import dtype_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy +from keras.src.saving.serialization_lib import enable_unsafe_deserialization +from keras.src.utils.backend_utils import set_backend +from keras.src.utils.io_utils import disable_interactive_logging +from keras.src.utils.io_utils import enable_interactive_logging +from keras.src.utils.io_utils import is_interactive_logging_enabled +from keras.src.utils.traceback_utils import disable_traceback_filtering +from keras.src.utils.traceback_utils import enable_traceback_filtering +from keras.src.utils.traceback_utils import is_traceback_filtering_enabled diff --git a/keras/api/constraints/__init__.py b/keras/api/constraints/__init__.py new file mode 100644 index 000000000000..6372e149d3ba --- /dev/null +++ b/keras/api/constraints/__init__.py @@ -0,0 +1,18 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.constraints import deserialize +from keras.src.constraints import get +from keras.src.constraints import serialize +from keras.src.constraints.constraints import Constraint +from keras.src.constraints.constraints import MaxNorm +from keras.src.constraints.constraints import MaxNorm as max_norm +from keras.src.constraints.constraints import MinMaxNorm +from keras.src.constraints.constraints import MinMaxNorm as min_max_norm +from keras.src.constraints.constraints import NonNeg +from keras.src.constraints.constraints import NonNeg as non_neg +from keras.src.constraints.constraints import UnitNorm +from keras.src.constraints.constraints import UnitNorm as unit_norm diff --git a/keras/api/datasets/__init__.py b/keras/api/datasets/__init__.py new file mode 100644 index 000000000000..cf153fefcd4d --- /dev/null +++ b/keras/api/datasets/__init__.py @@ -0,0 +1,14 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.datasets import boston_housing +from keras.api.datasets import california_housing +from keras.api.datasets import cifar10 +from keras.api.datasets import cifar100 +from keras.api.datasets import fashion_mnist +from keras.api.datasets import imdb +from keras.api.datasets import mnist +from keras.api.datasets import reuters diff --git a/keras/api/datasets/boston_housing/__init__.py b/keras/api/datasets/boston_housing/__init__.py new file mode 100644 index 000000000000..f5a179db9968 --- /dev/null +++ b/keras/api/datasets/boston_housing/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.boston_housing import load_data diff --git a/keras/api/datasets/california_housing/__init__.py b/keras/api/datasets/california_housing/__init__.py new file mode 100644 index 000000000000..52b6157dcf28 --- /dev/null +++ b/keras/api/datasets/california_housing/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.california_housing import load_data diff --git a/keras/api/datasets/cifar10/__init__.py b/keras/api/datasets/cifar10/__init__.py new file mode 100644 index 000000000000..68c72a91b495 --- /dev/null +++ b/keras/api/datasets/cifar10/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.cifar10 import load_data diff --git a/keras/api/datasets/cifar100/__init__.py b/keras/api/datasets/cifar100/__init__.py new file mode 100644 index 000000000000..e49e67faeecf --- /dev/null +++ b/keras/api/datasets/cifar100/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.cifar100 import load_data diff --git a/keras/api/datasets/fashion_mnist/__init__.py b/keras/api/datasets/fashion_mnist/__init__.py new file mode 100644 index 000000000000..33512169fc9f --- /dev/null +++ b/keras/api/datasets/fashion_mnist/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.fashion_mnist import load_data diff --git a/keras/api/datasets/imdb/__init__.py b/keras/api/datasets/imdb/__init__.py new file mode 100644 index 000000000000..6bcddbd11dbe --- /dev/null +++ b/keras/api/datasets/imdb/__init__.py @@ -0,0 +1,8 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.imdb import get_word_index +from keras.src.datasets.imdb import load_data diff --git a/keras/api/datasets/mnist/__init__.py b/keras/api/datasets/mnist/__init__.py new file mode 100644 index 000000000000..45568c463ba8 --- /dev/null +++ b/keras/api/datasets/mnist/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.mnist import load_data diff --git a/keras/api/datasets/reuters/__init__.py b/keras/api/datasets/reuters/__init__.py new file mode 100644 index 000000000000..cdc9b68cff93 --- /dev/null +++ b/keras/api/datasets/reuters/__init__.py @@ -0,0 +1,9 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.datasets.reuters import get_label_names +from keras.src.datasets.reuters import get_word_index +from keras.src.datasets.reuters import load_data diff --git a/keras/api/distribution/__init__.py b/keras/api/distribution/__init__.py new file mode 100644 index 000000000000..b56806af9fac --- /dev/null +++ b/keras/api/distribution/__init__.py @@ -0,0 +1,16 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.distribution.distribution_lib import DataParallel +from keras.src.distribution.distribution_lib import DeviceMesh +from keras.src.distribution.distribution_lib import LayoutMap +from keras.src.distribution.distribution_lib import ModelParallel +from keras.src.distribution.distribution_lib import TensorLayout +from keras.src.distribution.distribution_lib import distribute_tensor +from keras.src.distribution.distribution_lib import distribution +from keras.src.distribution.distribution_lib import initialize +from keras.src.distribution.distribution_lib import list_devices +from keras.src.distribution.distribution_lib import set_distribution diff --git a/keras/api/dtype_policies/__init__.py b/keras/api/dtype_policies/__init__.py new file mode 100644 index 000000000000..da8364263a22 --- /dev/null +++ b/keras/api/dtype_policies/__init__.py @@ -0,0 +1,10 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy diff --git a/keras/api/export/__init__.py b/keras/api/export/__init__.py new file mode 100644 index 000000000000..68fa60293961 --- /dev/null +++ b/keras/api/export/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.export.export_lib import ExportArchive diff --git a/keras/api/initializers/__init__.py b/keras/api/initializers/__init__.py new file mode 100644 index 000000000000..5819d1b285eb --- /dev/null +++ b/keras/api/initializers/__init__.py @@ -0,0 +1,64 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.initializers import deserialize +from keras.src.initializers import get +from keras.src.initializers import serialize +from keras.src.initializers.constant_initializers import Constant +from keras.src.initializers.constant_initializers import Constant as constant +from keras.src.initializers.constant_initializers import Identity +from keras.src.initializers.constant_initializers import ( + Identity as IdentityInitializer, +) +from keras.src.initializers.constant_initializers import Identity as identity +from keras.src.initializers.constant_initializers import Ones +from keras.src.initializers.constant_initializers import Ones as ones +from keras.src.initializers.constant_initializers import Zeros +from keras.src.initializers.constant_initializers import Zeros as zeros +from keras.src.initializers.initializer import Initializer +from keras.src.initializers.random_initializers import GlorotNormal +from keras.src.initializers.random_initializers import ( + GlorotNormal as glorot_normal, +) +from keras.src.initializers.random_initializers import GlorotUniform +from keras.src.initializers.random_initializers import ( + GlorotUniform as glorot_uniform, +) +from keras.src.initializers.random_initializers import HeNormal +from keras.src.initializers.random_initializers import HeNormal as he_normal +from keras.src.initializers.random_initializers import HeUniform +from keras.src.initializers.random_initializers import HeUniform as he_uniform +from keras.src.initializers.random_initializers import LecunNormal +from keras.src.initializers.random_initializers import ( + LecunNormal as lecun_normal, +) +from keras.src.initializers.random_initializers import LecunUniform +from keras.src.initializers.random_initializers import ( + LecunUniform as lecun_uniform, +) +from keras.src.initializers.random_initializers import OrthogonalInitializer +from keras.src.initializers.random_initializers import ( + OrthogonalInitializer as Orthogonal, +) +from keras.src.initializers.random_initializers import ( + OrthogonalInitializer as orthogonal, +) +from keras.src.initializers.random_initializers import RandomNormal +from keras.src.initializers.random_initializers import ( + RandomNormal as random_normal, +) +from keras.src.initializers.random_initializers import RandomUniform +from keras.src.initializers.random_initializers import ( + RandomUniform as random_uniform, +) +from keras.src.initializers.random_initializers import TruncatedNormal +from keras.src.initializers.random_initializers import ( + TruncatedNormal as truncated_normal, +) +from keras.src.initializers.random_initializers import VarianceScaling +from keras.src.initializers.random_initializers import ( + VarianceScaling as variance_scaling, +) diff --git a/keras/api/layers/__init__.py b/keras/api/layers/__init__.py new file mode 100644 index 000000000000..a4e1bf9a6bbd --- /dev/null +++ b/keras/api/layers/__init__.py @@ -0,0 +1,195 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.export.export_lib import TFSMLayer +from keras.src.layers import deserialize +from keras.src.layers import serialize +from keras.src.layers.activations.activation import Activation +from keras.src.layers.activations.elu import ELU +from keras.src.layers.activations.leaky_relu import LeakyReLU +from keras.src.layers.activations.prelu import PReLU +from keras.src.layers.activations.relu import ReLU +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.attention.additive_attention import AdditiveAttention +from keras.src.layers.attention.attention import Attention +from keras.src.layers.attention.grouped_query_attention import ( + GroupedQueryAttention as GroupQueryAttention, +) +from keras.src.layers.attention.multi_head_attention import MultiHeadAttention +from keras.src.layers.convolutional.conv1d import Conv1D +from keras.src.layers.convolutional.conv1d import Conv1D as Convolution1D +from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose +from keras.src.layers.convolutional.conv1d_transpose import ( + Conv1DTranspose as Convolution1DTranspose, +) +from keras.src.layers.convolutional.conv2d import Conv2D +from keras.src.layers.convolutional.conv2d import Conv2D as Convolution2D +from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose +from keras.src.layers.convolutional.conv2d_transpose import ( + Conv2DTranspose as Convolution2DTranspose, +) +from keras.src.layers.convolutional.conv3d import Conv3D +from keras.src.layers.convolutional.conv3d import Conv3D as Convolution3D +from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose +from keras.src.layers.convolutional.conv3d_transpose import ( + Conv3DTranspose as Convolution3DTranspose, +) +from keras.src.layers.convolutional.depthwise_conv1d import DepthwiseConv1D +from keras.src.layers.convolutional.depthwise_conv2d import DepthwiseConv2D +from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D +from keras.src.layers.convolutional.separable_conv1d import ( + SeparableConv1D as SeparableConvolution1D, +) +from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D +from keras.src.layers.convolutional.separable_conv2d import ( + SeparableConv2D as SeparableConvolution2D, +) +from keras.src.layers.core.dense import Dense +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.core.embedding import Embedding +from keras.src.layers.core.identity import Identity +from keras.src.layers.core.input_layer import Input +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.core.lambda_layer import Lambda +from keras.src.layers.core.masking import Masking +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.merging.add import Add +from keras.src.layers.merging.add import add +from keras.src.layers.merging.average import Average +from keras.src.layers.merging.average import average +from keras.src.layers.merging.concatenate import Concatenate +from keras.src.layers.merging.concatenate import concatenate +from keras.src.layers.merging.dot import Dot +from keras.src.layers.merging.dot import dot +from keras.src.layers.merging.maximum import Maximum +from keras.src.layers.merging.maximum import maximum +from keras.src.layers.merging.minimum import Minimum +from keras.src.layers.merging.minimum import minimum +from keras.src.layers.merging.multiply import Multiply +from keras.src.layers.merging.multiply import multiply +from keras.src.layers.merging.subtract import Subtract +from keras.src.layers.merging.subtract import subtract +from keras.src.layers.normalization.batch_normalization import ( + BatchNormalization, +) +from keras.src.layers.normalization.group_normalization import ( + GroupNormalization, +) +from keras.src.layers.normalization.layer_normalization import ( + LayerNormalization, +) +from keras.src.layers.normalization.spectral_normalization import ( + SpectralNormalization, +) +from keras.src.layers.normalization.unit_normalization import UnitNormalization +from keras.src.layers.pooling.average_pooling1d import AveragePooling1D +from keras.src.layers.pooling.average_pooling1d import ( + AveragePooling1D as AvgPool1D, +) +from keras.src.layers.pooling.average_pooling2d import AveragePooling2D +from keras.src.layers.pooling.average_pooling2d import ( + AveragePooling2D as AvgPool2D, +) +from keras.src.layers.pooling.average_pooling3d import AveragePooling3D +from keras.src.layers.pooling.average_pooling3d import ( + AveragePooling3D as AvgPool3D, +) +from keras.src.layers.pooling.global_average_pooling1d import ( + GlobalAveragePooling1D, +) +from keras.src.layers.pooling.global_average_pooling1d import ( + GlobalAveragePooling1D as GlobalAvgPool1D, +) +from keras.src.layers.pooling.global_average_pooling2d import ( + GlobalAveragePooling2D, +) +from keras.src.layers.pooling.global_average_pooling2d import ( + GlobalAveragePooling2D as GlobalAvgPool2D, +) +from keras.src.layers.pooling.global_average_pooling3d import ( + GlobalAveragePooling3D, +) +from keras.src.layers.pooling.global_average_pooling3d import ( + GlobalAveragePooling3D as GlobalAvgPool3D, +) +from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D +from keras.src.layers.pooling.global_max_pooling1d import ( + GlobalMaxPooling1D as GlobalMaxPool1D, +) +from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D +from keras.src.layers.pooling.global_max_pooling2d import ( + GlobalMaxPooling2D as GlobalMaxPool2D, +) +from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D +from keras.src.layers.pooling.global_max_pooling3d import ( + GlobalMaxPooling3D as GlobalMaxPool3D, +) +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D as MaxPool1D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D as MaxPool2D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D as MaxPool3D +from keras.src.layers.preprocessing.audio_preprocessing import MelSpectrogram +from keras.src.layers.preprocessing.category_encoding import CategoryEncoding +from keras.src.layers.preprocessing.center_crop import CenterCrop +from keras.src.layers.preprocessing.discretization import Discretization +from keras.src.layers.preprocessing.hashed_crossing import HashedCrossing +from keras.src.layers.preprocessing.hashing import Hashing +from keras.src.layers.preprocessing.integer_lookup import IntegerLookup +from keras.src.layers.preprocessing.normalization import Normalization +from keras.src.layers.preprocessing.random_brightness import RandomBrightness +from keras.src.layers.preprocessing.random_contrast import RandomContrast +from keras.src.layers.preprocessing.random_crop import RandomCrop +from keras.src.layers.preprocessing.random_flip import RandomFlip +from keras.src.layers.preprocessing.random_rotation import RandomRotation +from keras.src.layers.preprocessing.random_translation import RandomTranslation +from keras.src.layers.preprocessing.random_zoom import RandomZoom +from keras.src.layers.preprocessing.rescaling import Rescaling +from keras.src.layers.preprocessing.resizing import Resizing +from keras.src.layers.preprocessing.string_lookup import StringLookup +from keras.src.layers.preprocessing.text_vectorization import TextVectorization +from keras.src.layers.regularization.activity_regularization import ( + ActivityRegularization, +) +from keras.src.layers.regularization.alpha_dropout import AlphaDropout +from keras.src.layers.regularization.dropout import Dropout +from keras.src.layers.regularization.gaussian_dropout import GaussianDropout +from keras.src.layers.regularization.gaussian_noise import GaussianNoise +from keras.src.layers.regularization.spatial_dropout import SpatialDropout1D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout2D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout3D +from keras.src.layers.reshaping.cropping1d import Cropping1D +from keras.src.layers.reshaping.cropping2d import Cropping2D +from keras.src.layers.reshaping.cropping3d import Cropping3D +from keras.src.layers.reshaping.flatten import Flatten +from keras.src.layers.reshaping.permute import Permute +from keras.src.layers.reshaping.repeat_vector import RepeatVector +from keras.src.layers.reshaping.reshape import Reshape +from keras.src.layers.reshaping.up_sampling1d import UpSampling1D +from keras.src.layers.reshaping.up_sampling2d import UpSampling2D +from keras.src.layers.reshaping.up_sampling3d import UpSampling3D +from keras.src.layers.reshaping.zero_padding1d import ZeroPadding1D +from keras.src.layers.reshaping.zero_padding2d import ZeroPadding2D +from keras.src.layers.reshaping.zero_padding3d import ZeroPadding3D +from keras.src.layers.rnn.bidirectional import Bidirectional +from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D +from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D +from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D +from keras.src.layers.rnn.gru import GRU +from keras.src.layers.rnn.gru import GRUCell +from keras.src.layers.rnn.lstm import LSTM +from keras.src.layers.rnn.lstm import LSTMCell +from keras.src.layers.rnn.rnn import RNN +from keras.src.layers.rnn.simple_rnn import SimpleRNN +from keras.src.layers.rnn.simple_rnn import SimpleRNNCell +from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.src.layers.rnn.time_distributed import TimeDistributed +from keras.src.utils.jax_layer import FlaxLayer +from keras.src.utils.jax_layer import JaxLayer +from keras.src.utils.torch_utils import TorchModuleWrapper diff --git a/keras/api/legacy/__init__.py b/keras/api/legacy/__init__.py new file mode 100644 index 000000000000..96347e2c32bf --- /dev/null +++ b/keras/api/legacy/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.legacy import saving diff --git a/keras/api/legacy/saving/__init__.py b/keras/api/legacy/saving/__init__.py new file mode 100644 index 000000000000..ac4d2d43dd9a --- /dev/null +++ b/keras/api/legacy/saving/__init__.py @@ -0,0 +1,8 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.legacy.saving.serialization import deserialize_keras_object +from keras.src.legacy.saving.serialization import serialize_keras_object diff --git a/keras/api/losses/__init__.py b/keras/api/losses/__init__.py new file mode 100644 index 000000000000..ecaadddf6b7e --- /dev/null +++ b/keras/api/losses/__init__.py @@ -0,0 +1,50 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.losses import deserialize +from keras.src.losses import get +from keras.src.losses import serialize +from keras.src.losses.loss import Loss +from keras.src.losses.losses import CTC +from keras.src.losses.losses import BinaryCrossentropy +from keras.src.losses.losses import BinaryFocalCrossentropy +from keras.src.losses.losses import CategoricalCrossentropy +from keras.src.losses.losses import CategoricalFocalCrossentropy +from keras.src.losses.losses import CategoricalHinge +from keras.src.losses.losses import CosineSimilarity +from keras.src.losses.losses import Dice +from keras.src.losses.losses import Hinge +from keras.src.losses.losses import Huber +from keras.src.losses.losses import KLDivergence +from keras.src.losses.losses import LogCosh +from keras.src.losses.losses import MeanAbsoluteError +from keras.src.losses.losses import MeanAbsolutePercentageError +from keras.src.losses.losses import MeanSquaredError +from keras.src.losses.losses import MeanSquaredLogarithmicError +from keras.src.losses.losses import Poisson +from keras.src.losses.losses import SparseCategoricalCrossentropy +from keras.src.losses.losses import SquaredHinge +from keras.src.losses.losses import Tversky +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import cosine_similarity +from keras.src.losses.losses import ctc +from keras.src.losses.losses import dice +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.losses.losses import tversky diff --git a/keras/api/metrics/__init__.py b/keras/api/metrics/__init__.py new file mode 100644 index 000000000000..dc59b32a46c3 --- /dev/null +++ b/keras/api/metrics/__init__.py @@ -0,0 +1,76 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.metrics import deserialize +from keras.src.metrics import get +from keras.src.metrics import serialize +from keras.src.metrics.accuracy_metrics import Accuracy +from keras.src.metrics.accuracy_metrics import BinaryAccuracy +from keras.src.metrics.accuracy_metrics import CategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import binary_accuracy +from keras.src.metrics.accuracy_metrics import categorical_accuracy +from keras.src.metrics.accuracy_metrics import sparse_categorical_accuracy +from keras.src.metrics.accuracy_metrics import sparse_top_k_categorical_accuracy +from keras.src.metrics.accuracy_metrics import top_k_categorical_accuracy +from keras.src.metrics.confusion_metrics import AUC +from keras.src.metrics.confusion_metrics import FalseNegatives +from keras.src.metrics.confusion_metrics import FalsePositives +from keras.src.metrics.confusion_metrics import Precision +from keras.src.metrics.confusion_metrics import PrecisionAtRecall +from keras.src.metrics.confusion_metrics import Recall +from keras.src.metrics.confusion_metrics import RecallAtPrecision +from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity +from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity +from keras.src.metrics.confusion_metrics import TrueNegatives +from keras.src.metrics.confusion_metrics import TruePositives +from keras.src.metrics.f_score_metrics import F1Score +from keras.src.metrics.f_score_metrics import FBetaScore +from keras.src.metrics.hinge_metrics import CategoricalHinge +from keras.src.metrics.hinge_metrics import Hinge +from keras.src.metrics.hinge_metrics import SquaredHinge +from keras.src.metrics.iou_metrics import BinaryIoU +from keras.src.metrics.iou_metrics import IoU +from keras.src.metrics.iou_metrics import MeanIoU +from keras.src.metrics.iou_metrics import OneHotIoU +from keras.src.metrics.iou_metrics import OneHotMeanIoU +from keras.src.metrics.metric import Metric +from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy +from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy +from keras.src.metrics.probabilistic_metrics import KLDivergence +from keras.src.metrics.probabilistic_metrics import Poisson +from keras.src.metrics.probabilistic_metrics import ( + SparseCategoricalCrossentropy, +) +from keras.src.metrics.reduction_metrics import Mean +from keras.src.metrics.reduction_metrics import MeanMetricWrapper +from keras.src.metrics.reduction_metrics import Sum +from keras.src.metrics.regression_metrics import CosineSimilarity +from keras.src.metrics.regression_metrics import LogCoshError +from keras.src.metrics.regression_metrics import MeanAbsoluteError +from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError +from keras.src.metrics.regression_metrics import MeanSquaredError +from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError +from keras.src.metrics.regression_metrics import R2Score +from keras.src.metrics.regression_metrics import RootMeanSquaredError diff --git a/keras/api/mixed_precision/__init__.py b/keras/api/mixed_precision/__init__.py new file mode 100644 index 000000000000..85a421651d16 --- /dev/null +++ b/keras/api/mixed_precision/__init__.py @@ -0,0 +1,15 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy +from keras.src.dtype_policies.dtype_policy import dtype_policy +from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy +from keras.src.dtype_policies.dtype_policy import ( + set_dtype_policy as set_global_policy, +) +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer diff --git a/keras/api/models/__init__.py b/keras/api/models/__init__.py new file mode 100644 index 000000000000..48760da64791 --- /dev/null +++ b/keras/api/models/__init__.py @@ -0,0 +1,12 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.models.cloning import clone_model +from keras.src.models.model import Model +from keras.src.models.model import model_from_json +from keras.src.models.sequential import Sequential +from keras.src.saving.saving_api import load_model +from keras.src.saving.saving_api import save_model diff --git a/keras/api/ops/__init__.py b/keras/api/ops/__init__.py new file mode 100644 index 000000000000..1253650e9bc0 --- /dev/null +++ b/keras/api/ops/__init__.py @@ -0,0 +1,223 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.ops import image +from keras.api.ops import linalg +from keras.api.ops import nn +from keras.api.ops import numpy +from keras.src.ops.core import cast +from keras.src.ops.core import cond +from keras.src.ops.core import convert_to_numpy +from keras.src.ops.core import convert_to_tensor +from keras.src.ops.core import custom_gradient +from keras.src.ops.core import fori_loop +from keras.src.ops.core import is_tensor +from keras.src.ops.core import scatter +from keras.src.ops.core import scatter_update +from keras.src.ops.core import shape +from keras.src.ops.core import slice +from keras.src.ops.core import slice_update +from keras.src.ops.core import stop_gradient +from keras.src.ops.core import unstack +from keras.src.ops.core import vectorized_map +from keras.src.ops.core import while_loop +from keras.src.ops.linalg import cholesky +from keras.src.ops.linalg import det +from keras.src.ops.linalg import eig +from keras.src.ops.linalg import inv +from keras.src.ops.linalg import lu_factor +from keras.src.ops.linalg import norm +from keras.src.ops.linalg import qr +from keras.src.ops.linalg import solve +from keras.src.ops.linalg import solve_triangular +from keras.src.ops.linalg import svd +from keras.src.ops.math import erf +from keras.src.ops.math import erfinv +from keras.src.ops.math import extract_sequences +from keras.src.ops.math import fft +from keras.src.ops.math import fft2 +from keras.src.ops.math import in_top_k +from keras.src.ops.math import irfft +from keras.src.ops.math import istft +from keras.src.ops.math import logsumexp +from keras.src.ops.math import rfft +from keras.src.ops.math import rsqrt +from keras.src.ops.math import segment_max +from keras.src.ops.math import segment_sum +from keras.src.ops.math import stft +from keras.src.ops.math import top_k +from keras.src.ops.nn import average_pool +from keras.src.ops.nn import batch_normalization +from keras.src.ops.nn import binary_crossentropy +from keras.src.ops.nn import categorical_crossentropy +from keras.src.ops.nn import conv +from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_loss +from keras.src.ops.nn import depthwise_conv +from keras.src.ops.nn import elu +from keras.src.ops.nn import gelu +from keras.src.ops.nn import hard_sigmoid +from keras.src.ops.nn import hard_silu +from keras.src.ops.nn import hard_silu as hard_swish +from keras.src.ops.nn import leaky_relu +from keras.src.ops.nn import log_sigmoid +from keras.src.ops.nn import log_softmax +from keras.src.ops.nn import max_pool +from keras.src.ops.nn import moments +from keras.src.ops.nn import multi_hot +from keras.src.ops.nn import normalize +from keras.src.ops.nn import one_hot +from keras.src.ops.nn import relu +from keras.src.ops.nn import relu6 +from keras.src.ops.nn import selu +from keras.src.ops.nn import separable_conv +from keras.src.ops.nn import sigmoid +from keras.src.ops.nn import silu +from keras.src.ops.nn import silu as swish +from keras.src.ops.nn import softmax +from keras.src.ops.nn import softplus +from keras.src.ops.nn import softsign +from keras.src.ops.nn import sparse_categorical_crossentropy +from keras.src.ops.numpy import abs +from keras.src.ops.numpy import absolute +from keras.src.ops.numpy import add +from keras.src.ops.numpy import all +from keras.src.ops.numpy import amax +from keras.src.ops.numpy import amin +from keras.src.ops.numpy import any +from keras.src.ops.numpy import append +from keras.src.ops.numpy import arange +from keras.src.ops.numpy import arccos +from keras.src.ops.numpy import arccosh +from keras.src.ops.numpy import arcsin +from keras.src.ops.numpy import arcsinh +from keras.src.ops.numpy import arctan +from keras.src.ops.numpy import arctan2 +from keras.src.ops.numpy import arctanh +from keras.src.ops.numpy import argmax +from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argsort +from keras.src.ops.numpy import array +from keras.src.ops.numpy import average +from keras.src.ops.numpy import bincount +from keras.src.ops.numpy import broadcast_to +from keras.src.ops.numpy import ceil +from keras.src.ops.numpy import clip +from keras.src.ops.numpy import concatenate +from keras.src.ops.numpy import conj +from keras.src.ops.numpy import conjugate +from keras.src.ops.numpy import copy +from keras.src.ops.numpy import correlate +from keras.src.ops.numpy import cos +from keras.src.ops.numpy import cosh +from keras.src.ops.numpy import count_nonzero +from keras.src.ops.numpy import cross +from keras.src.ops.numpy import cumprod +from keras.src.ops.numpy import cumsum +from keras.src.ops.numpy import diag +from keras.src.ops.numpy import diagonal +from keras.src.ops.numpy import diff +from keras.src.ops.numpy import digitize +from keras.src.ops.numpy import divide +from keras.src.ops.numpy import divide_no_nan +from keras.src.ops.numpy import dot +from keras.src.ops.numpy import einsum +from keras.src.ops.numpy import empty +from keras.src.ops.numpy import equal +from keras.src.ops.numpy import exp +from keras.src.ops.numpy import expand_dims +from keras.src.ops.numpy import expm1 +from keras.src.ops.numpy import eye +from keras.src.ops.numpy import flip +from keras.src.ops.numpy import floor +from keras.src.ops.numpy import floor_divide +from keras.src.ops.numpy import full +from keras.src.ops.numpy import full_like +from keras.src.ops.numpy import get_item +from keras.src.ops.numpy import greater +from keras.src.ops.numpy import greater_equal +from keras.src.ops.numpy import hstack +from keras.src.ops.numpy import identity +from keras.src.ops.numpy import imag +from keras.src.ops.numpy import isclose +from keras.src.ops.numpy import isfinite +from keras.src.ops.numpy import isinf +from keras.src.ops.numpy import isnan +from keras.src.ops.numpy import less +from keras.src.ops.numpy import less_equal +from keras.src.ops.numpy import linspace +from keras.src.ops.numpy import log +from keras.src.ops.numpy import log1p +from keras.src.ops.numpy import log2 +from keras.src.ops.numpy import log10 +from keras.src.ops.numpy import logaddexp +from keras.src.ops.numpy import logical_and +from keras.src.ops.numpy import logical_not +from keras.src.ops.numpy import logical_or +from keras.src.ops.numpy import logical_xor +from keras.src.ops.numpy import logspace +from keras.src.ops.numpy import matmul +from keras.src.ops.numpy import max +from keras.src.ops.numpy import maximum +from keras.src.ops.numpy import mean +from keras.src.ops.numpy import median +from keras.src.ops.numpy import meshgrid +from keras.src.ops.numpy import min +from keras.src.ops.numpy import minimum +from keras.src.ops.numpy import mod +from keras.src.ops.numpy import moveaxis +from keras.src.ops.numpy import multiply +from keras.src.ops.numpy import nan_to_num +from keras.src.ops.numpy import ndim +from keras.src.ops.numpy import negative +from keras.src.ops.numpy import nonzero +from keras.src.ops.numpy import not_equal +from keras.src.ops.numpy import ones +from keras.src.ops.numpy import ones_like +from keras.src.ops.numpy import outer +from keras.src.ops.numpy import pad +from keras.src.ops.numpy import power +from keras.src.ops.numpy import prod +from keras.src.ops.numpy import quantile +from keras.src.ops.numpy import ravel +from keras.src.ops.numpy import real +from keras.src.ops.numpy import reciprocal +from keras.src.ops.numpy import repeat +from keras.src.ops.numpy import reshape +from keras.src.ops.numpy import roll +from keras.src.ops.numpy import round +from keras.src.ops.numpy import sign +from keras.src.ops.numpy import sin +from keras.src.ops.numpy import sinh +from keras.src.ops.numpy import size +from keras.src.ops.numpy import sort +from keras.src.ops.numpy import split +from keras.src.ops.numpy import sqrt +from keras.src.ops.numpy import square +from keras.src.ops.numpy import squeeze +from keras.src.ops.numpy import stack +from keras.src.ops.numpy import std +from keras.src.ops.numpy import subtract +from keras.src.ops.numpy import sum +from keras.src.ops.numpy import swapaxes +from keras.src.ops.numpy import take +from keras.src.ops.numpy import take_along_axis +from keras.src.ops.numpy import tan +from keras.src.ops.numpy import tanh +from keras.src.ops.numpy import tensordot +from keras.src.ops.numpy import tile +from keras.src.ops.numpy import trace +from keras.src.ops.numpy import transpose +from keras.src.ops.numpy import tri +from keras.src.ops.numpy import tril +from keras.src.ops.numpy import triu +from keras.src.ops.numpy import true_divide +from keras.src.ops.numpy import var +from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vstack +from keras.src.ops.numpy import where +from keras.src.ops.numpy import zeros +from keras.src.ops.numpy import zeros_like diff --git a/keras/api/ops/image/__init__.py b/keras/api/ops/image/__init__.py new file mode 100644 index 000000000000..e4c8464c2195 --- /dev/null +++ b/keras/api/ops/image/__init__.py @@ -0,0 +1,13 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.ops.image import affine_transform +from keras.src.ops.image import crop_images +from keras.src.ops.image import extract_patches +from keras.src.ops.image import map_coordinates +from keras.src.ops.image import pad_images +from keras.src.ops.image import resize +from keras.src.ops.image import rgb_to_grayscale diff --git a/keras/api/ops/linalg/__init__.py b/keras/api/ops/linalg/__init__.py new file mode 100644 index 000000000000..da392d6c2490 --- /dev/null +++ b/keras/api/ops/linalg/__init__.py @@ -0,0 +1,16 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.ops.linalg import cholesky +from keras.src.ops.linalg import det +from keras.src.ops.linalg import eig +from keras.src.ops.linalg import inv +from keras.src.ops.linalg import lu_factor +from keras.src.ops.linalg import norm +from keras.src.ops.linalg import qr +from keras.src.ops.linalg import solve +from keras.src.ops.linalg import solve_triangular +from keras.src.ops.linalg import svd diff --git a/keras/api/ops/nn/__init__.py b/keras/api/ops/nn/__init__.py new file mode 100644 index 000000000000..9452ea18a766 --- /dev/null +++ b/keras/api/ops/nn/__init__.py @@ -0,0 +1,38 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.ops.nn import average_pool +from keras.src.ops.nn import batch_normalization +from keras.src.ops.nn import binary_crossentropy +from keras.src.ops.nn import categorical_crossentropy +from keras.src.ops.nn import conv +from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_loss +from keras.src.ops.nn import depthwise_conv +from keras.src.ops.nn import elu +from keras.src.ops.nn import gelu +from keras.src.ops.nn import hard_sigmoid +from keras.src.ops.nn import hard_silu +from keras.src.ops.nn import hard_silu as hard_swish +from keras.src.ops.nn import leaky_relu +from keras.src.ops.nn import log_sigmoid +from keras.src.ops.nn import log_softmax +from keras.src.ops.nn import max_pool +from keras.src.ops.nn import moments +from keras.src.ops.nn import multi_hot +from keras.src.ops.nn import normalize +from keras.src.ops.nn import one_hot +from keras.src.ops.nn import relu +from keras.src.ops.nn import relu6 +from keras.src.ops.nn import selu +from keras.src.ops.nn import separable_conv +from keras.src.ops.nn import sigmoid +from keras.src.ops.nn import silu +from keras.src.ops.nn import silu as swish +from keras.src.ops.nn import softmax +from keras.src.ops.nn import softplus +from keras.src.ops.nn import softsign +from keras.src.ops.nn import sparse_categorical_crossentropy diff --git a/keras/api/ops/numpy/__init__.py b/keras/api/ops/numpy/__init__.py new file mode 100644 index 000000000000..1d5434e40288 --- /dev/null +++ b/keras/api/ops/numpy/__init__.py @@ -0,0 +1,146 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.ops.numpy import abs +from keras.src.ops.numpy import absolute +from keras.src.ops.numpy import add +from keras.src.ops.numpy import all +from keras.src.ops.numpy import amax +from keras.src.ops.numpy import amin +from keras.src.ops.numpy import any +from keras.src.ops.numpy import append +from keras.src.ops.numpy import arange +from keras.src.ops.numpy import arccos +from keras.src.ops.numpy import arccosh +from keras.src.ops.numpy import arcsin +from keras.src.ops.numpy import arcsinh +from keras.src.ops.numpy import arctan +from keras.src.ops.numpy import arctan2 +from keras.src.ops.numpy import arctanh +from keras.src.ops.numpy import argmax +from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argsort +from keras.src.ops.numpy import array +from keras.src.ops.numpy import average +from keras.src.ops.numpy import bincount +from keras.src.ops.numpy import broadcast_to +from keras.src.ops.numpy import ceil +from keras.src.ops.numpy import clip +from keras.src.ops.numpy import concatenate +from keras.src.ops.numpy import conj +from keras.src.ops.numpy import conjugate +from keras.src.ops.numpy import copy +from keras.src.ops.numpy import correlate +from keras.src.ops.numpy import cos +from keras.src.ops.numpy import cosh +from keras.src.ops.numpy import count_nonzero +from keras.src.ops.numpy import cross +from keras.src.ops.numpy import cumprod +from keras.src.ops.numpy import cumsum +from keras.src.ops.numpy import diag +from keras.src.ops.numpy import diagonal +from keras.src.ops.numpy import diff +from keras.src.ops.numpy import digitize +from keras.src.ops.numpy import divide +from keras.src.ops.numpy import divide_no_nan +from keras.src.ops.numpy import dot +from keras.src.ops.numpy import einsum +from keras.src.ops.numpy import empty +from keras.src.ops.numpy import equal +from keras.src.ops.numpy import exp +from keras.src.ops.numpy import expand_dims +from keras.src.ops.numpy import expm1 +from keras.src.ops.numpy import eye +from keras.src.ops.numpy import flip +from keras.src.ops.numpy import floor +from keras.src.ops.numpy import floor_divide +from keras.src.ops.numpy import full +from keras.src.ops.numpy import full_like +from keras.src.ops.numpy import get_item +from keras.src.ops.numpy import greater +from keras.src.ops.numpy import greater_equal +from keras.src.ops.numpy import hstack +from keras.src.ops.numpy import identity +from keras.src.ops.numpy import imag +from keras.src.ops.numpy import isclose +from keras.src.ops.numpy import isfinite +from keras.src.ops.numpy import isinf +from keras.src.ops.numpy import isnan +from keras.src.ops.numpy import less +from keras.src.ops.numpy import less_equal +from keras.src.ops.numpy import linspace +from keras.src.ops.numpy import log +from keras.src.ops.numpy import log1p +from keras.src.ops.numpy import log2 +from keras.src.ops.numpy import log10 +from keras.src.ops.numpy import logaddexp +from keras.src.ops.numpy import logical_and +from keras.src.ops.numpy import logical_not +from keras.src.ops.numpy import logical_or +from keras.src.ops.numpy import logical_xor +from keras.src.ops.numpy import logspace +from keras.src.ops.numpy import matmul +from keras.src.ops.numpy import max +from keras.src.ops.numpy import maximum +from keras.src.ops.numpy import mean +from keras.src.ops.numpy import median +from keras.src.ops.numpy import meshgrid +from keras.src.ops.numpy import min +from keras.src.ops.numpy import minimum +from keras.src.ops.numpy import mod +from keras.src.ops.numpy import moveaxis +from keras.src.ops.numpy import multiply +from keras.src.ops.numpy import nan_to_num +from keras.src.ops.numpy import ndim +from keras.src.ops.numpy import negative +from keras.src.ops.numpy import nonzero +from keras.src.ops.numpy import not_equal +from keras.src.ops.numpy import ones +from keras.src.ops.numpy import ones_like +from keras.src.ops.numpy import outer +from keras.src.ops.numpy import pad +from keras.src.ops.numpy import power +from keras.src.ops.numpy import prod +from keras.src.ops.numpy import quantile +from keras.src.ops.numpy import ravel +from keras.src.ops.numpy import real +from keras.src.ops.numpy import reciprocal +from keras.src.ops.numpy import repeat +from keras.src.ops.numpy import reshape +from keras.src.ops.numpy import roll +from keras.src.ops.numpy import round +from keras.src.ops.numpy import sign +from keras.src.ops.numpy import sin +from keras.src.ops.numpy import sinh +from keras.src.ops.numpy import size +from keras.src.ops.numpy import sort +from keras.src.ops.numpy import split +from keras.src.ops.numpy import sqrt +from keras.src.ops.numpy import square +from keras.src.ops.numpy import squeeze +from keras.src.ops.numpy import stack +from keras.src.ops.numpy import std +from keras.src.ops.numpy import subtract +from keras.src.ops.numpy import sum +from keras.src.ops.numpy import swapaxes +from keras.src.ops.numpy import take +from keras.src.ops.numpy import take_along_axis +from keras.src.ops.numpy import tan +from keras.src.ops.numpy import tanh +from keras.src.ops.numpy import tensordot +from keras.src.ops.numpy import tile +from keras.src.ops.numpy import trace +from keras.src.ops.numpy import transpose +from keras.src.ops.numpy import tri +from keras.src.ops.numpy import tril +from keras.src.ops.numpy import triu +from keras.src.ops.numpy import true_divide +from keras.src.ops.numpy import var +from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vstack +from keras.src.ops.numpy import where +from keras.src.ops.numpy import zeros +from keras.src.ops.numpy import zeros_like diff --git a/keras/api/optimizers/__init__.py b/keras/api/optimizers/__init__.py new file mode 100644 index 000000000000..5dab6705b58d --- /dev/null +++ b/keras/api/optimizers/__init__.py @@ -0,0 +1,24 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.optimizers import legacy +from keras.api.optimizers import schedules +from keras.src.optimizers import deserialize +from keras.src.optimizers import get +from keras.src.optimizers import serialize +from keras.src.optimizers.adadelta import Adadelta +from keras.src.optimizers.adafactor import Adafactor +from keras.src.optimizers.adagrad import Adagrad +from keras.src.optimizers.adam import Adam +from keras.src.optimizers.adamax import Adamax +from keras.src.optimizers.adamw import AdamW +from keras.src.optimizers.ftrl import Ftrl +from keras.src.optimizers.lion import Lion +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.src.optimizers.nadam import Nadam +from keras.src.optimizers.optimizer import Optimizer +from keras.src.optimizers.rmsprop import RMSprop +from keras.src.optimizers.sgd import SGD diff --git a/keras/api/optimizers/legacy/__init__.py b/keras/api/optimizers/legacy/__init__.py new file mode 100644 index 000000000000..bff1a0313630 --- /dev/null +++ b/keras/api/optimizers/legacy/__init__.py @@ -0,0 +1,12 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.optimizers import LegacyOptimizerWarning as Adagrad +from keras.src.optimizers import LegacyOptimizerWarning as Adam +from keras.src.optimizers import LegacyOptimizerWarning as Ftrl +from keras.src.optimizers import LegacyOptimizerWarning as Optimizer +from keras.src.optimizers import LegacyOptimizerWarning as RMSprop +from keras.src.optimizers import LegacyOptimizerWarning as SGD diff --git a/keras/api/optimizers/schedules/__init__.py b/keras/api/optimizers/schedules/__init__.py new file mode 100644 index 000000000000..6178626258ed --- /dev/null +++ b/keras/api/optimizers/schedules/__init__.py @@ -0,0 +1,27 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay +from keras.src.optimizers.schedules.learning_rate_schedule import ( + CosineDecayRestarts, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + ExponentialDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + InverseTimeDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + LearningRateSchedule, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + PiecewiseConstantDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + PolynomialDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import deserialize +from keras.src.optimizers.schedules.learning_rate_schedule import serialize diff --git a/keras/api/preprocessing/__init__.py b/keras/api/preprocessing/__init__.py new file mode 100644 index 000000000000..c9ed7fd664c2 --- /dev/null +++ b/keras/api/preprocessing/__init__.py @@ -0,0 +1,13 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.preprocessing import image +from keras.api.preprocessing import sequence +from keras.src.utils.image_dataset_utils import image_dataset_from_directory +from keras.src.utils.text_dataset_utils import text_dataset_from_directory +from keras.src.utils.timeseries_dataset_utils import ( + timeseries_dataset_from_array, +) diff --git a/keras/api/preprocessing/image/__init__.py b/keras/api/preprocessing/image/__init__.py new file mode 100644 index 000000000000..f68afe8789d5 --- /dev/null +++ b/keras/api/preprocessing/image/__init__.py @@ -0,0 +1,11 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.utils.image_utils import array_to_img +from keras.src.utils.image_utils import img_to_array +from keras.src.utils.image_utils import load_img +from keras.src.utils.image_utils import save_img +from keras.src.utils.image_utils import smart_resize diff --git a/keras/api/preprocessing/sequence/__init__.py b/keras/api/preprocessing/sequence/__init__.py new file mode 100644 index 000000000000..188e01af9c48 --- /dev/null +++ b/keras/api/preprocessing/sequence/__init__.py @@ -0,0 +1,7 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.utils.sequence_utils import pad_sequences diff --git a/keras/api/quantizers/__init__.py b/keras/api/quantizers/__init__.py new file mode 100644 index 000000000000..d8a209bbb623 --- /dev/null +++ b/keras/api/quantizers/__init__.py @@ -0,0 +1,15 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.quantizers import deserialize +from keras.src.quantizers import get +from keras.src.quantizers import serialize +from keras.src.quantizers.quantizers import AbsMaxQuantizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.quantizers.quantizers import abs_max_quantize +from keras.src.quantizers.quantizers import compute_float8_amax_history +from keras.src.quantizers.quantizers import compute_float8_scale +from keras.src.quantizers.quantizers import quantize_and_dequantize diff --git a/keras/api/random/__init__.py b/keras/api/random/__init__.py new file mode 100644 index 000000000000..faf9c67f3fc4 --- /dev/null +++ b/keras/api/random/__init__.py @@ -0,0 +1,17 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.random.random import beta +from keras.src.random.random import binomial +from keras.src.random.random import categorical +from keras.src.random.random import dropout +from keras.src.random.random import gamma +from keras.src.random.random import normal +from keras.src.random.random import randint +from keras.src.random.random import shuffle +from keras.src.random.random import truncated_normal +from keras.src.random.random import uniform +from keras.src.random.seed_generator import SeedGenerator diff --git a/keras/api/regularizers/__init__.py b/keras/api/regularizers/__init__.py new file mode 100644 index 000000000000..93b51eaa51bd --- /dev/null +++ b/keras/api/regularizers/__init__.py @@ -0,0 +1,20 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.regularizers import deserialize +from keras.src.regularizers import get +from keras.src.regularizers import serialize +from keras.src.regularizers.regularizers import L1 +from keras.src.regularizers.regularizers import L1 as l1 +from keras.src.regularizers.regularizers import L1L2 +from keras.src.regularizers.regularizers import L1L2 as l1_l2 +from keras.src.regularizers.regularizers import L2 +from keras.src.regularizers.regularizers import L2 as l2 +from keras.src.regularizers.regularizers import OrthogonalRegularizer +from keras.src.regularizers.regularizers import ( + OrthogonalRegularizer as orthogonal_regularizer, +) +from keras.src.regularizers.regularizers import Regularizer diff --git a/keras/api/saving/__init__.py b/keras/api/saving/__init__.py new file mode 100644 index 000000000000..2f772922f8d1 --- /dev/null +++ b/keras/api/saving/__init__.py @@ -0,0 +1,20 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.saving.object_registration import CustomObjectScope +from keras.src.saving.object_registration import ( + CustomObjectScope as custom_object_scope, +) +from keras.src.saving.object_registration import get_custom_objects +from keras.src.saving.object_registration import get_registered_name +from keras.src.saving.object_registration import get_registered_object +from keras.src.saving.object_registration import register_keras_serializable +from keras.src.saving.saving_api import load_model +from keras.src.saving.saving_api import load_weights +from keras.src.saving.saving_api import save_model +from keras.src.saving.saving_api import save_weights +from keras.src.saving.serialization_lib import deserialize_keras_object +from keras.src.saving.serialization_lib import serialize_keras_object diff --git a/keras/api/tree/__init__.py b/keras/api/tree/__init__.py new file mode 100644 index 000000000000..388d19a0ec26 --- /dev/null +++ b/keras/api/tree/__init__.py @@ -0,0 +1,15 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.tree.tree_api import assert_same_structure +from keras.src.tree.tree_api import flatten +from keras.src.tree.tree_api import is_nested +from keras.src.tree.tree_api import lists_to_tuples +from keras.src.tree.tree_api import map_shape_structure +from keras.src.tree.tree_api import map_structure +from keras.src.tree.tree_api import map_structure_up_to +from keras.src.tree.tree_api import pack_sequence_as +from keras.src.tree.tree_api import traverse diff --git a/keras/api/utils/__init__.py b/keras/api/utils/__init__.py new file mode 100644 index 000000000000..aab787cc930f --- /dev/null +++ b/keras/api/utils/__init__.py @@ -0,0 +1,54 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.api.utils import legacy +from keras.src.backend.common.global_state import clear_session +from keras.src.backend.common.keras_tensor import is_keras_tensor +from keras.src.backend.common.variables import standardize_dtype +from keras.src.layers.preprocessing.feature_space import FeatureSpace +from keras.src.ops.operation_utils import get_source_inputs +from keras.src.saving.object_registration import CustomObjectScope +from keras.src.saving.object_registration import ( + CustomObjectScope as custom_object_scope, +) +from keras.src.saving.object_registration import get_custom_objects +from keras.src.saving.object_registration import get_registered_name +from keras.src.saving.object_registration import get_registered_object +from keras.src.saving.object_registration import register_keras_serializable +from keras.src.saving.serialization_lib import deserialize_keras_object +from keras.src.saving.serialization_lib import serialize_keras_object +from keras.src.trainers.data_adapters.data_adapter_utils import ( + pack_x_y_sample_weight, +) +from keras.src.trainers.data_adapters.data_adapter_utils import ( + unpack_x_y_sample_weight, +) +from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset +from keras.src.trainers.data_adapters.py_dataset_adapter import ( + PyDataset as Sequence, +) +from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory +from keras.src.utils.dataset_utils import split_dataset +from keras.src.utils.file_utils import get_file +from keras.src.utils.image_dataset_utils import image_dataset_from_directory +from keras.src.utils.image_utils import array_to_img +from keras.src.utils.image_utils import img_to_array +from keras.src.utils.image_utils import load_img +from keras.src.utils.image_utils import save_img +from keras.src.utils.io_utils import disable_interactive_logging +from keras.src.utils.io_utils import enable_interactive_logging +from keras.src.utils.io_utils import is_interactive_logging_enabled +from keras.src.utils.model_visualization import model_to_dot +from keras.src.utils.model_visualization import plot_model +from keras.src.utils.numerical_utils import normalize +from keras.src.utils.numerical_utils import to_categorical +from keras.src.utils.progbar import Progbar +from keras.src.utils.rng_utils import set_random_seed +from keras.src.utils.sequence_utils import pad_sequences +from keras.src.utils.text_dataset_utils import text_dataset_from_directory +from keras.src.utils.timeseries_dataset_utils import ( + timeseries_dataset_from_array, +) diff --git a/keras/api/utils/legacy/__init__.py b/keras/api/utils/legacy/__init__.py new file mode 100644 index 000000000000..ac4d2d43dd9a --- /dev/null +++ b/keras/api/utils/legacy/__init__.py @@ -0,0 +1,8 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.legacy.saving.serialization import deserialize_keras_object +from keras.src.legacy.saving.serialization import serialize_keras_object diff --git a/keras/backend/__init__.py b/keras/backend/__init__.py deleted file mode 100644 index 66e8bcb9ded9..000000000000 --- a/keras/backend/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -from keras.backend.config import backend - -if backend() == "torch": - # When using the torch backend, - # torch needs to be imported first, otherwise it will segfault - # upon import. - import torch - -from keras.backend.common.dtypes import result_type -from keras.backend.common.keras_tensor import KerasTensor -from keras.backend.common.keras_tensor import any_symbolic_tensors -from keras.backend.common.keras_tensor import is_keras_tensor -from keras.backend.common.name_scope import name_scope -from keras.backend.common.stateless_scope import StatelessScope -from keras.backend.common.stateless_scope import get_stateless_scope -from keras.backend.common.stateless_scope import in_stateless_scope -from keras.backend.common.variables import AutocastScope -from keras.backend.common.variables import get_autocast_scope -from keras.backend.common.variables import is_float_dtype -from keras.backend.common.variables import is_int_dtype -from keras.backend.common.variables import standardize_dtype -from keras.backend.common.variables import standardize_shape -from keras.backend.config import epsilon -from keras.backend.config import floatx -from keras.backend.config import image_data_format -from keras.backend.config import set_epsilon -from keras.backend.config import set_floatx -from keras.backend.config import set_image_data_format -from keras.backend.config import standardize_data_format - -# Import backend functions. -if backend() == "tensorflow": - from keras.backend.tensorflow import * # noqa: F403 -elif backend() == "jax": - from keras.backend.jax import * # noqa: F403 -elif backend() == "torch": - from keras.backend.torch import * # noqa: F403 - - distribution_lib = None -elif backend() == "numpy": - from keras.backend.numpy import * # noqa: F403 - - distribution_lib = None -else: - raise ValueError(f"Unable to import backend : {backend()}") diff --git a/keras/backend/common/__init__.py b/keras/backend/common/__init__.py deleted file mode 100644 index a29e86ce15ab..000000000000 --- a/keras/backend/common/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from keras.backend.common import backend_utils -from keras.backend.common.dtypes import result_type -from keras.backend.common.variables import AutocastScope -from keras.backend.common.variables import KerasVariable -from keras.backend.common.variables import get_autocast_scope -from keras.backend.common.variables import is_float_dtype -from keras.backend.common.variables import is_int_dtype -from keras.backend.common.variables import standardize_dtype -from keras.backend.common.variables import standardize_shape -from keras.random import random diff --git a/keras/backend/jax/__init__.py b/keras/backend/jax/__init__.py deleted file mode 100644 index 327bd95dc0de..000000000000 --- a/keras/backend/jax/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from keras.backend.jax import core -from keras.backend.jax import distribution_lib -from keras.backend.jax import image -from keras.backend.jax import linalg -from keras.backend.jax import math -from keras.backend.jax import nn -from keras.backend.jax import numpy -from keras.backend.jax import random -from keras.backend.jax.core import SUPPORTS_SPARSE_TENSORS -from keras.backend.jax.core import Variable -from keras.backend.jax.core import cast -from keras.backend.jax.core import compute_output_spec -from keras.backend.jax.core import cond -from keras.backend.jax.core import convert_to_numpy -from keras.backend.jax.core import convert_to_tensor -from keras.backend.jax.core import device_scope -from keras.backend.jax.core import is_tensor -from keras.backend.jax.core import scatter -from keras.backend.jax.core import shape -from keras.backend.jax.core import stop_gradient -from keras.backend.jax.core import vectorized_map -from keras.backend.jax.rnn import cudnn_ok -from keras.backend.jax.rnn import gru -from keras.backend.jax.rnn import lstm -from keras.backend.jax.rnn import rnn diff --git a/keras/backend/numpy/__init__.py b/keras/backend/numpy/__init__.py deleted file mode 100644 index f84ec2e32291..000000000000 --- a/keras/backend/numpy/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from keras.backend.numpy import core -from keras.backend.numpy import image -from keras.backend.numpy import linalg -from keras.backend.numpy import math -from keras.backend.numpy import nn -from keras.backend.numpy import numpy -from keras.backend.numpy import random -from keras.backend.numpy.core import SUPPORTS_SPARSE_TENSORS -from keras.backend.numpy.core import Variable -from keras.backend.numpy.core import cast -from keras.backend.numpy.core import compute_output_spec -from keras.backend.numpy.core import cond -from keras.backend.numpy.core import convert_to_numpy -from keras.backend.numpy.core import convert_to_tensor -from keras.backend.numpy.core import is_tensor -from keras.backend.numpy.core import shape -from keras.backend.numpy.core import vectorized_map -from keras.backend.numpy.rnn import cudnn_ok -from keras.backend.numpy.rnn import gru -from keras.backend.numpy.rnn import lstm -from keras.backend.numpy.rnn import rnn diff --git a/keras/backend/tensorflow/__init__.py b/keras/backend/tensorflow/__init__.py deleted file mode 100644 index 2791de505385..000000000000 --- a/keras/backend/tensorflow/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -from keras.backend.tensorflow import core -from keras.backend.tensorflow import distribution_lib -from keras.backend.tensorflow import image -from keras.backend.tensorflow import linalg -from keras.backend.tensorflow import math -from keras.backend.tensorflow import nn -from keras.backend.tensorflow import numpy -from keras.backend.tensorflow import random -from keras.backend.tensorflow import tensorboard -from keras.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS -from keras.backend.tensorflow.core import Variable -from keras.backend.tensorflow.core import cast -from keras.backend.tensorflow.core import compute_output_spec -from keras.backend.tensorflow.core import cond -from keras.backend.tensorflow.core import convert_to_numpy -from keras.backend.tensorflow.core import convert_to_tensor -from keras.backend.tensorflow.core import device_scope -from keras.backend.tensorflow.core import is_tensor -from keras.backend.tensorflow.core import name_scope -from keras.backend.tensorflow.core import scatter -from keras.backend.tensorflow.core import shape -from keras.backend.tensorflow.core import stop_gradient -from keras.backend.tensorflow.core import vectorized_map -from keras.backend.tensorflow.rnn import cudnn_ok -from keras.backend.tensorflow.rnn import gru -from keras.backend.tensorflow.rnn import lstm -from keras.backend.tensorflow.rnn import rnn diff --git a/keras/backend/torch/__init__.py b/keras/backend/torch/__init__.py deleted file mode 100644 index 1358104f6f4b..000000000000 --- a/keras/backend/torch/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Torch backend APIs. - -# Note on device placement - -Torch has a different device placement style compared to TF and JAX. -In short, variables/tensors are not created on GPU by default, -and the GPU cannot directly communicate with the CPU. -To bring Torch behavior in line with TF and JAX automated device placement, -we are doing the following to automate device placement if a GPU is available: - -- Variables are created on GPU. -- Input data will be placed on GPU at the first `keras.layers.Layer` call. -- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU. -- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy. -""" - -from keras.backend.torch import core -from keras.backend.torch import image -from keras.backend.torch import linalg -from keras.backend.torch import math -from keras.backend.torch import nn -from keras.backend.torch import numpy -from keras.backend.torch import random -from keras.backend.torch.core import SUPPORTS_SPARSE_TENSORS -from keras.backend.torch.core import Variable -from keras.backend.torch.core import cast -from keras.backend.torch.core import compute_output_spec -from keras.backend.torch.core import cond -from keras.backend.torch.core import convert_to_numpy -from keras.backend.torch.core import convert_to_tensor -from keras.backend.torch.core import device_scope -from keras.backend.torch.core import is_tensor -from keras.backend.torch.core import scatter -from keras.backend.torch.core import shape -from keras.backend.torch.core import stop_gradient -from keras.backend.torch.core import to_torch_dtype -from keras.backend.torch.core import vectorized_map -from keras.backend.torch.rnn import cudnn_ok -from keras.backend.torch.rnn import gru -from keras.backend.torch.rnn import lstm -from keras.backend.torch.rnn import rnn diff --git a/keras/backend/torch/optimizers/__init__.py b/keras/backend/torch/optimizers/__init__.py deleted file mode 100644 index d067b931b625..000000000000 --- a/keras/backend/torch/optimizers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from keras.backend.torch.optimizers.torch_optimizer import TorchOptimizer diff --git a/keras/backend/torch/optimizers/torch_adamw.py b/keras/backend/torch/optimizers/torch_adamw.py deleted file mode 100644 index c43ed17bce75..000000000000 --- a/keras/backend/torch/optimizers/torch_adamw.py +++ /dev/null @@ -1,6 +0,0 @@ -from keras import optimizers -from keras.backend.torch.optimizers import torch_adam - - -class AdamW(torch_adam.Adam, optimizers.AdamW): - pass diff --git a/keras/callbacks/__init__.py b/keras/callbacks/__init__.py deleted file mode 100644 index 42a6dd4948d9..000000000000 --- a/keras/callbacks/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from keras.callbacks.backup_and_restore import BackupAndRestore -from keras.callbacks.callback import Callback -from keras.callbacks.callback_list import CallbackList -from keras.callbacks.csv_logger import CSVLogger -from keras.callbacks.early_stopping import EarlyStopping -from keras.callbacks.history import History -from keras.callbacks.lambda_callback import LambdaCallback -from keras.callbacks.learning_rate_scheduler import LearningRateScheduler -from keras.callbacks.model_checkpoint import ModelCheckpoint -from keras.callbacks.progbar_logger import ProgbarLogger -from keras.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau -from keras.callbacks.remote_monitor import RemoteMonitor -from keras.callbacks.swap_ema_weights import SwapEMAWeights -from keras.callbacks.tensorboard import TensorBoard -from keras.callbacks.terminate_on_nan import TerminateOnNaN diff --git a/keras/datasets/__init__.py b/keras/datasets/__init__.py deleted file mode 100644 index 8fb280792f88..000000000000 --- a/keras/datasets/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Small NumPy datasets for debugging/testing.""" - -from keras.datasets import boston_housing -from keras.datasets import california_housing -from keras.datasets import cifar10 -from keras.datasets import cifar100 -from keras.datasets import fashion_mnist -from keras.datasets import imdb -from keras.datasets import mnist -from keras.datasets import reuters diff --git a/keras/distribution/__init__.py b/keras/distribution/__init__.py deleted file mode 100644 index 880faf6d8165..000000000000 --- a/keras/distribution/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from keras.distribution.distribution_lib import DataParallel -from keras.distribution.distribution_lib import DeviceMesh -from keras.distribution.distribution_lib import Distribution -from keras.distribution.distribution_lib import LayoutMap -from keras.distribution.distribution_lib import ModelParallel -from keras.distribution.distribution_lib import TensorLayout -from keras.distribution.distribution_lib import distribute_tensor -from keras.distribution.distribution_lib import distribution -from keras.distribution.distribution_lib import initialize -from keras.distribution.distribution_lib import list_devices -from keras.distribution.distribution_lib import set_distribution diff --git a/keras/export/__init__.py b/keras/export/__init__.py deleted file mode 100644 index f59fb3dd04d9..000000000000 --- a/keras/export/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from keras.export.export_lib import ExportArchive diff --git a/keras/layers/__init__.py b/keras/layers/__init__.py deleted file mode 100644 index 9f3f95743024..000000000000 --- a/keras/layers/__init__.py +++ /dev/null @@ -1,161 +0,0 @@ -from keras.api_export import keras_export -from keras.layers.activations.activation import Activation -from keras.layers.activations.elu import ELU -from keras.layers.activations.leaky_relu import LeakyReLU -from keras.layers.activations.prelu import PReLU -from keras.layers.activations.relu import ReLU -from keras.layers.activations.softmax import Softmax -from keras.layers.attention.additive_attention import AdditiveAttention -from keras.layers.attention.attention import Attention -from keras.layers.attention.grouped_query_attention import GroupedQueryAttention -from keras.layers.attention.multi_head_attention import MultiHeadAttention -from keras.layers.convolutional.conv1d import Conv1D -from keras.layers.convolutional.conv1d_transpose import Conv1DTranspose -from keras.layers.convolutional.conv2d import Conv2D -from keras.layers.convolutional.conv2d_transpose import Conv2DTranspose -from keras.layers.convolutional.conv3d import Conv3D -from keras.layers.convolutional.conv3d_transpose import Conv3DTranspose -from keras.layers.convolutional.depthwise_conv1d import DepthwiseConv1D -from keras.layers.convolutional.depthwise_conv2d import DepthwiseConv2D -from keras.layers.convolutional.separable_conv1d import SeparableConv1D -from keras.layers.convolutional.separable_conv2d import SeparableConv2D -from keras.layers.core.dense import Dense -from keras.layers.core.einsum_dense import EinsumDense -from keras.layers.core.embedding import Embedding -from keras.layers.core.identity import Identity -from keras.layers.core.input_layer import Input -from keras.layers.core.input_layer import InputLayer -from keras.layers.core.lambda_layer import Lambda -from keras.layers.core.masking import Masking -from keras.layers.core.wrapper import Wrapper -from keras.layers.layer import Layer -from keras.layers.merging.add import Add -from keras.layers.merging.add import add -from keras.layers.merging.average import Average -from keras.layers.merging.average import average -from keras.layers.merging.concatenate import Concatenate -from keras.layers.merging.concatenate import concatenate -from keras.layers.merging.dot import Dot -from keras.layers.merging.dot import dot -from keras.layers.merging.maximum import Maximum -from keras.layers.merging.maximum import maximum -from keras.layers.merging.minimum import Minimum -from keras.layers.merging.minimum import minimum -from keras.layers.merging.multiply import Multiply -from keras.layers.merging.multiply import multiply -from keras.layers.merging.subtract import Subtract -from keras.layers.merging.subtract import subtract -from keras.layers.normalization.batch_normalization import BatchNormalization -from keras.layers.normalization.group_normalization import GroupNormalization -from keras.layers.normalization.layer_normalization import LayerNormalization -from keras.layers.normalization.spectral_normalization import ( - SpectralNormalization, -) -from keras.layers.normalization.unit_normalization import UnitNormalization -from keras.layers.pooling.average_pooling1d import AveragePooling1D -from keras.layers.pooling.average_pooling2d import AveragePooling2D -from keras.layers.pooling.average_pooling3d import AveragePooling3D -from keras.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D -from keras.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D -from keras.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D -from keras.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D -from keras.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D -from keras.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D -from keras.layers.pooling.max_pooling1d import MaxPooling1D -from keras.layers.pooling.max_pooling2d import MaxPooling2D -from keras.layers.pooling.max_pooling3d import MaxPooling3D -from keras.layers.preprocessing.audio_preprocessing import MelSpectrogram -from keras.layers.preprocessing.category_encoding import CategoryEncoding -from keras.layers.preprocessing.center_crop import CenterCrop -from keras.layers.preprocessing.discretization import Discretization -from keras.layers.preprocessing.hashed_crossing import HashedCrossing -from keras.layers.preprocessing.hashing import Hashing -from keras.layers.preprocessing.index_lookup import IndexLookup -from keras.layers.preprocessing.integer_lookup import IntegerLookup -from keras.layers.preprocessing.normalization import Normalization -from keras.layers.preprocessing.random_brightness import RandomBrightness -from keras.layers.preprocessing.random_contrast import RandomContrast -from keras.layers.preprocessing.random_crop import RandomCrop -from keras.layers.preprocessing.random_flip import RandomFlip -from keras.layers.preprocessing.random_rotation import RandomRotation -from keras.layers.preprocessing.random_translation import RandomTranslation -from keras.layers.preprocessing.random_zoom import RandomZoom -from keras.layers.preprocessing.rescaling import Rescaling -from keras.layers.preprocessing.resizing import Resizing -from keras.layers.preprocessing.string_lookup import StringLookup -from keras.layers.preprocessing.text_vectorization import TextVectorization -from keras.layers.regularization.activity_regularization import ( - ActivityRegularization, -) -from keras.layers.regularization.alpha_dropout import AlphaDropout -from keras.layers.regularization.dropout import Dropout -from keras.layers.regularization.gaussian_dropout import GaussianDropout -from keras.layers.regularization.gaussian_noise import GaussianNoise -from keras.layers.regularization.spatial_dropout import SpatialDropout1D -from keras.layers.regularization.spatial_dropout import SpatialDropout2D -from keras.layers.regularization.spatial_dropout import SpatialDropout3D -from keras.layers.reshaping.cropping1d import Cropping1D -from keras.layers.reshaping.cropping2d import Cropping2D -from keras.layers.reshaping.cropping3d import Cropping3D -from keras.layers.reshaping.flatten import Flatten -from keras.layers.reshaping.permute import Permute -from keras.layers.reshaping.repeat_vector import RepeatVector -from keras.layers.reshaping.reshape import Reshape -from keras.layers.reshaping.up_sampling1d import UpSampling1D -from keras.layers.reshaping.up_sampling2d import UpSampling2D -from keras.layers.reshaping.up_sampling3d import UpSampling3D -from keras.layers.reshaping.zero_padding1d import ZeroPadding1D -from keras.layers.reshaping.zero_padding2d import ZeroPadding2D -from keras.layers.reshaping.zero_padding3d import ZeroPadding3D -from keras.layers.rnn.bidirectional import Bidirectional -from keras.layers.rnn.conv_lstm1d import ConvLSTM1D -from keras.layers.rnn.conv_lstm2d import ConvLSTM2D -from keras.layers.rnn.conv_lstm3d import ConvLSTM3D -from keras.layers.rnn.gru import GRU -from keras.layers.rnn.gru import GRUCell -from keras.layers.rnn.lstm import LSTM -from keras.layers.rnn.lstm import LSTMCell -from keras.layers.rnn.rnn import RNN -from keras.layers.rnn.simple_rnn import SimpleRNN -from keras.layers.rnn.simple_rnn import SimpleRNNCell -from keras.layers.rnn.stacked_rnn_cells import StackedRNNCells -from keras.layers.rnn.time_distributed import TimeDistributed -from keras.saving import serialization_lib - - -@keras_export("keras.layers.serialize") -def serialize(layer): - """Returns the layer configuration as a Python dict. - - Args: - layer: A `keras.layers.Layer` instance to serialize. - - Returns: - Python dict which contains the configuration of the layer. - """ - return serialization_lib.serialize_keras_object(layer) - - -@keras_export("keras.layers.deserialize") -def deserialize(config, custom_objects=None): - """Returns a Keras layer object via its configuration. - - Args: - config: A python dict containing a serialized layer configuration. - custom_objects: Optional dictionary mapping names (strings) to custom - objects (classes and functions) to be considered during - deserialization. - - Returns: - A Keras layer instance. - """ - obj = serialization_lib.deserialize_keras_object( - config, - custom_objects=custom_objects, - ) - if not isinstance(obj, Layer): - raise ValueError( - "`keras.layers.deserialize` was passed a `config` object that is " - f"not a `keras.layers.Layer`. Received: {config}" - ) - return obj diff --git a/keras/layers/activations/__init__.py b/keras/layers/activations/__init__.py deleted file mode 100644 index 176e43fe183d..000000000000 --- a/keras/layers/activations/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from keras.layers.activations.elu import ELU -from keras.layers.activations.leaky_relu import LeakyReLU -from keras.layers.activations.prelu import PReLU -from keras.layers.activations.relu import ReLU -from keras.layers.activations.softmax import Softmax diff --git a/keras/models/__init__.py b/keras/models/__init__.py deleted file mode 100644 index 21d90b45f81c..000000000000 --- a/keras/models/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from keras.models.functional import Functional -from keras.models.model import Model -from keras.models.sequential import Sequential diff --git a/keras/ops/__init__.py b/keras/ops/__init__.py deleted file mode 100644 index 3afb84a2b296..000000000000 --- a/keras/ops/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# from keras.ops.numpy import Matmul, matmul -# from keras.ops.numpy import Add, add -# from keras.ops.numpy import Multiply, multiply - -from keras.backend import cast -from keras.backend import cond -from keras.backend import is_tensor -from keras.backend import name_scope -from keras.backend import random -from keras.ops import image -from keras.ops import operation_utils -from keras.ops.core import * # noqa: F403 -from keras.ops.linalg import * # noqa: F403 -from keras.ops.math import * # noqa: F403 -from keras.ops.nn import * # noqa: F403 -from keras.ops.numpy import * # noqa: F403 diff --git a/keras/optimizers/schedules/__init__.py b/keras/optimizers/schedules/__init__.py deleted file mode 100644 index 480e06da3a4f..000000000000 --- a/keras/optimizers/schedules/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from keras.optimizers.schedules.learning_rate_schedule import CosineDecay -from keras.optimizers.schedules.learning_rate_schedule import ( - CosineDecayRestarts, -) -from keras.optimizers.schedules.learning_rate_schedule import ExponentialDecay -from keras.optimizers.schedules.learning_rate_schedule import InverseTimeDecay -from keras.optimizers.schedules.learning_rate_schedule import ( - PiecewiseConstantDecay, -) -from keras.optimizers.schedules.learning_rate_schedule import PolynomialDecay diff --git a/keras/random/__init__.py b/keras/random/__init__.py deleted file mode 100644 index 292f299ba7fd..000000000000 --- a/keras/random/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from keras.random.random import categorical -from keras.random.random import dropout -from keras.random.random import gamma -from keras.random.random import normal -from keras.random.random import randint -from keras.random.random import shuffle -from keras.random.random import truncated_normal -from keras.random.random import uniform -from keras.random.seed_generator import SeedGenerator diff --git a/keras/saving/__init__.py b/keras/saving/__init__.py deleted file mode 100644 index 9f709a3c1ba9..000000000000 --- a/keras/saving/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from keras.saving.object_registration import CustomObjectScope -from keras.saving.object_registration import custom_object_scope -from keras.saving.object_registration import get_custom_objects -from keras.saving.object_registration import get_registered_name -from keras.saving.object_registration import get_registered_object -from keras.saving.object_registration import register_keras_serializable -from keras.saving.saving_api import load_model -from keras.saving.serialization_lib import deserialize_keras_object -from keras.saving.serialization_lib import serialize_keras_object diff --git a/keras/src/__init__.py b/keras/src/__init__.py new file mode 100644 index 000000000000..d4cd3c0829a1 --- /dev/null +++ b/keras/src/__init__.py @@ -0,0 +1,19 @@ +from keras.src import activations +from keras.src import applications +from keras.src import backend +from keras.src import constraints +from keras.src import datasets +from keras.src import initializers +from keras.src import layers +from keras.src import models +from keras.src import ops +from keras.src import optimizers +from keras.src import regularizers +from keras.src import utils +from keras.src.backend import KerasTensor +from keras.src.layers import Input +from keras.src.layers import Layer +from keras.src.models import Functional +from keras.src.models import Model +from keras.src.models import Sequential +from keras.src.version import __version__ diff --git a/keras/activations/__init__.py b/keras/src/activations/__init__.py similarity index 68% rename from keras/activations/__init__.py rename to keras/src/activations/__init__.py index 8d9aaa5403eb..13bc6de5dba3 100644 --- a/keras/activations/__init__.py +++ b/keras/src/activations/__init__.py @@ -1,26 +1,26 @@ import types -from keras.activations.activations import elu -from keras.activations.activations import exponential -from keras.activations.activations import gelu -from keras.activations.activations import hard_sigmoid -from keras.activations.activations import hard_silu -from keras.activations.activations import leaky_relu -from keras.activations.activations import linear -from keras.activations.activations import log_softmax -from keras.activations.activations import mish -from keras.activations.activations import relu -from keras.activations.activations import relu6 -from keras.activations.activations import selu -from keras.activations.activations import sigmoid -from keras.activations.activations import silu -from keras.activations.activations import softmax -from keras.activations.activations import softplus -from keras.activations.activations import softsign -from keras.activations.activations import tanh -from keras.api_export import keras_export -from keras.saving import object_registration -from keras.saving import serialization_lib +from keras.src.activations.activations import elu +from keras.src.activations.activations import exponential +from keras.src.activations.activations import gelu +from keras.src.activations.activations import hard_sigmoid +from keras.src.activations.activations import hard_silu +from keras.src.activations.activations import leaky_relu +from keras.src.activations.activations import linear +from keras.src.activations.activations import log_softmax +from keras.src.activations.activations import mish +from keras.src.activations.activations import relu +from keras.src.activations.activations import relu6 +from keras.src.activations.activations import selu +from keras.src.activations.activations import sigmoid +from keras.src.activations.activations import silu +from keras.src.activations.activations import softmax +from keras.src.activations.activations import softplus +from keras.src.activations.activations import softsign +from keras.src.activations.activations import tanh +from keras.src.api_export import keras_export +from keras.src.saving import object_registration +from keras.src.saving import serialization_lib ALL_OBJECTS = { relu, diff --git a/keras/activations/activations.py b/keras/src/activations/activations.py similarity index 99% rename from keras/activations/activations.py rename to keras/src/activations/activations.py index b9617c80c20a..32c3f5855d71 100644 --- a/keras/activations/activations.py +++ b/keras/src/activations/activations.py @@ -1,6 +1,6 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export @keras_export("keras.activations.relu") diff --git a/keras/activations/activations_test.py b/keras/src/activations/activations_test.py similarity index 99% rename from keras/activations/activations_test.py rename to keras/src/activations/activations_test.py index c752468d34e1..c0ae34a1739f 100644 --- a/keras/activations/activations_test.py +++ b/keras/src/activations/activations_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras import activations -from keras import backend -from keras import testing +from keras.src import activations +from keras.src import backend +from keras.src import testing def _ref_softmax(values): diff --git a/keras/api_export.py b/keras/src/api_export.py similarity index 100% rename from keras/api_export.py rename to keras/src/api_export.py diff --git a/keras/applications/__init__.py b/keras/src/applications/__init__.py similarity index 100% rename from keras/applications/__init__.py rename to keras/src/applications/__init__.py diff --git a/keras/applications/applications_test.py b/keras/src/applications/applications_test.py similarity index 91% rename from keras/applications/applications_test.py rename to keras/src/applications/applications_test.py index 8397cd275f0a..bf0d1cdb82f0 100644 --- a/keras/applications/applications_test.py +++ b/keras/src/applications/applications_test.py @@ -4,26 +4,26 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import testing -from keras.applications import convnext -from keras.applications import densenet -from keras.applications import efficientnet -from keras.applications import efficientnet_v2 -from keras.applications import inception_resnet_v2 -from keras.applications import inception_v3 -from keras.applications import mobilenet -from keras.applications import mobilenet_v2 -from keras.applications import mobilenet_v3 -from keras.applications import nasnet -from keras.applications import resnet -from keras.applications import resnet_v2 -from keras.applications import vgg16 -from keras.applications import vgg19 -from keras.applications import xception -from keras.saving import serialization_lib -from keras.utils import file_utils -from keras.utils import image_utils +from keras.src import backend +from keras.src import testing +from keras.src.applications import convnext +from keras.src.applications import densenet +from keras.src.applications import efficientnet +from keras.src.applications import efficientnet_v2 +from keras.src.applications import inception_resnet_v2 +from keras.src.applications import inception_v3 +from keras.src.applications import mobilenet +from keras.src.applications import mobilenet_v2 +from keras.src.applications import mobilenet_v3 +from keras.src.applications import nasnet +from keras.src.applications import resnet +from keras.src.applications import resnet_v2 +from keras.src.applications import vgg16 +from keras.src.applications import vgg19 +from keras.src.applications import xception +from keras.src.saving import serialization_lib +from keras.src.utils import file_utils +from keras.src.utils import image_utils try: import PIL diff --git a/keras/applications/convnext.py b/keras/src/applications/convnext.py similarity index 98% rename from keras/applications/convnext.py rename to keras/src/applications/convnext.py index f658a2da1425..2e464be0c7f1 100644 --- a/keras/applications/convnext.py +++ b/keras/src/applications/convnext.py @@ -1,17 +1,17 @@ import numpy as np -from keras import backend -from keras import initializers -from keras import layers -from keras import ops -from keras import random -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.layers.layer import Layer -from keras.models import Functional -from keras.models import Sequential -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src import ops +from keras.src import random +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.layers.layer import Layer +from keras.src.models import Functional +from keras.src.models import Sequential +from keras.src.ops import operation_utils +from keras.src.utils import file_utils BASE_WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/convnext/" diff --git a/keras/applications/densenet.py b/keras/src/applications/densenet.py similarity index 98% rename from keras/applications/densenet.py rename to keras/src/applications/densenet.py index d9bca939c986..e7d2accb4d33 100644 --- a/keras/applications/densenet.py +++ b/keras/src/applications/densenet.py @@ -1,10 +1,10 @@ -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils BASE_WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/densenet/" diff --git a/keras/applications/efficientnet.py b/keras/src/applications/efficientnet.py similarity index 98% rename from keras/applications/efficientnet.py rename to keras/src/applications/efficientnet.py index eae679b5b05f..5d61bc904b19 100644 --- a/keras/applications/efficientnet.py +++ b/keras/src/applications/efficientnet.py @@ -1,13 +1,13 @@ import copy import math -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils BASE_WEIGHTS_PATH = "https://storage.googleapis.com/keras-applications/" diff --git a/keras/applications/efficientnet_v2.py b/keras/src/applications/efficientnet_v2.py similarity index 99% rename from keras/applications/efficientnet_v2.py rename to keras/src/applications/efficientnet_v2.py index 2cd7cde1ac80..e9b626081c57 100644 --- a/keras/applications/efficientnet_v2.py +++ b/keras/src/applications/efficientnet_v2.py @@ -1,14 +1,14 @@ import copy import math -from keras import backend -from keras import initializers -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils BASE_WEIGHTS_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/efficientnet_v2/" # noqa: E501 diff --git a/keras/applications/imagenet_utils.py b/keras/src/applications/imagenet_utils.py similarity index 98% rename from keras/applications/imagenet_utils.py rename to keras/src/applications/imagenet_utils.py index 9e3f11bb1b16..f88c0af64d88 100644 --- a/keras/applications/imagenet_utils.py +++ b/keras/src/applications/imagenet_utils.py @@ -3,11 +3,11 @@ import numpy as np -from keras import activations -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.utils import file_utils +from keras.src import activations +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.utils import file_utils CLASS_INDEX = None CLASS_INDEX_PATH = ( diff --git a/keras/applications/imagenet_utils_test.py b/keras/src/applications/imagenet_utils_test.py similarity index 98% rename from keras/applications/imagenet_utils_test.py rename to keras/src/applications/imagenet_utils_test.py index dc09911ed44f..861632a9eb05 100644 --- a/keras/applications/imagenet_utils_test.py +++ b/keras/src/applications/imagenet_utils_test.py @@ -3,10 +3,10 @@ from absl.testing import parameterized import keras -from keras import backend -from keras import testing -from keras.applications import imagenet_utils as utils -from keras.dtype_policies.dtype_policy import set_dtype_policy +from keras.src import backend +from keras.src import testing +from keras.src.applications import imagenet_utils as utils +from keras.src.dtype_policies.dtype_policy import set_dtype_policy class TestImageNetUtils(testing.TestCase, parameterized.TestCase): diff --git a/keras/applications/inception_resnet_v2.py b/keras/src/applications/inception_resnet_v2.py similarity index 97% rename from keras/applications/inception_resnet_v2.py rename to keras/src/applications/inception_resnet_v2.py index 46b008324023..ca01832b2d48 100644 --- a/keras/applications/inception_resnet_v2.py +++ b/keras/src/applications/inception_resnet_v2.py @@ -1,11 +1,11 @@ -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.layers.layer import Layer -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.layers.layer import Layer +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils BASE_WEIGHT_URL = ( "https://storage.googleapis.com/tensorflow/" diff --git a/keras/applications/inception_v3.py b/keras/src/applications/inception_v3.py similarity index 98% rename from keras/applications/inception_v3.py rename to keras/src/applications/inception_v3.py index beab281500f3..7eef55b60518 100644 --- a/keras/applications/inception_v3.py +++ b/keras/src/applications/inception_v3.py @@ -1,10 +1,10 @@ -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/" diff --git a/keras/applications/mobilenet.py b/keras/src/applications/mobilenet.py similarity index 98% rename from keras/applications/mobilenet.py rename to keras/src/applications/mobilenet.py index c748c11db617..7c01779405ff 100644 --- a/keras/applications/mobilenet.py +++ b/keras/src/applications/mobilenet.py @@ -1,12 +1,12 @@ import warnings -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils BASE_WEIGHT_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/" diff --git a/keras/applications/mobilenet_v2.py b/keras/src/applications/mobilenet_v2.py similarity index 98% rename from keras/applications/mobilenet_v2.py rename to keras/src/applications/mobilenet_v2.py index 96db08c32e6c..3a6a8f0e14db 100644 --- a/keras/applications/mobilenet_v2.py +++ b/keras/src/applications/mobilenet_v2.py @@ -1,12 +1,12 @@ import warnings -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils BASE_WEIGHT_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/" diff --git a/keras/applications/mobilenet_v3.py b/keras/src/applications/mobilenet_v3.py similarity index 98% rename from keras/applications/mobilenet_v3.py rename to keras/src/applications/mobilenet_v3.py index 96f27d16a7cc..8f8b1422761b 100644 --- a/keras/applications/mobilenet_v3.py +++ b/keras/src/applications/mobilenet_v3.py @@ -1,12 +1,12 @@ import warnings -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils BASE_WEIGHT_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v3/" diff --git a/keras/applications/nasnet.py b/keras/src/applications/nasnet.py similarity index 99% rename from keras/applications/nasnet.py rename to keras/src/applications/nasnet.py index 4f1271de492c..5769adb14eb9 100644 --- a/keras/applications/nasnet.py +++ b/keras/src/applications/nasnet.py @@ -1,12 +1,12 @@ import warnings -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils BASE_WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/nasnet/" diff --git a/keras/applications/resnet.py b/keras/src/applications/resnet.py similarity index 98% rename from keras/applications/resnet.py rename to keras/src/applications/resnet.py index a5230e2da551..b752be1b208c 100644 --- a/keras/applications/resnet.py +++ b/keras/src/applications/resnet.py @@ -1,10 +1,10 @@ -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils BASE_WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/resnet/" diff --git a/keras/applications/resnet_v2.py b/keras/src/applications/resnet_v2.py similarity index 97% rename from keras/applications/resnet_v2.py rename to keras/src/applications/resnet_v2.py index 21652ffc121f..9bdd09091481 100644 --- a/keras/applications/resnet_v2.py +++ b/keras/src/applications/resnet_v2.py @@ -1,6 +1,6 @@ -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.applications import resnet +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.applications import resnet @keras_export( diff --git a/keras/applications/vgg16.py b/keras/src/applications/vgg16.py similarity index 96% rename from keras/applications/vgg16.py rename to keras/src/applications/vgg16.py index f83983d8001b..af39dffc8df0 100644 --- a/keras/applications/vgg16.py +++ b/keras/src/applications/vgg16.py @@ -1,10 +1,10 @@ -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/" diff --git a/keras/applications/vgg19.py b/keras/src/applications/vgg19.py similarity index 97% rename from keras/applications/vgg19.py rename to keras/src/applications/vgg19.py index 48abebdfd005..0d416523138f 100644 --- a/keras/applications/vgg19.py +++ b/keras/src/applications/vgg19.py @@ -1,10 +1,10 @@ -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/" diff --git a/keras/applications/xception.py b/keras/src/applications/xception.py similarity index 97% rename from keras/applications/xception.py rename to keras/src/applications/xception.py index 93d0a4ac7078..0841321cd413 100644 --- a/keras/applications/xception.py +++ b/keras/src/applications/xception.py @@ -1,10 +1,10 @@ -from keras import backend -from keras import layers -from keras.api_export import keras_export -from keras.applications import imagenet_utils -from keras.models import Functional -from keras.ops import operation_utils -from keras.utils import file_utils +from keras.src import backend +from keras.src import layers +from keras.src.api_export import keras_export +from keras.src.applications import imagenet_utils +from keras.src.models import Functional +from keras.src.ops import operation_utils +from keras.src.utils import file_utils WEIGHTS_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/" diff --git a/keras/src/backend/__init__.py b/keras/src/backend/__init__.py new file mode 100644 index 000000000000..5c7fa2235207 --- /dev/null +++ b/keras/src/backend/__init__.py @@ -0,0 +1,45 @@ +from keras.src.backend.config import backend + +if backend() == "torch": + # When using the torch backend, + # torch needs to be imported first, otherwise it will segfault + # upon import. + import torch + +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.keras_tensor import any_symbolic_tensors +from keras.src.backend.common.keras_tensor import is_keras_tensor +from keras.src.backend.common.name_scope import name_scope +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.common.stateless_scope import get_stateless_scope +from keras.src.backend.common.stateless_scope import in_stateless_scope +from keras.src.backend.common.variables import AutocastScope +from keras.src.backend.common.variables import get_autocast_scope +from keras.src.backend.common.variables import is_float_dtype +from keras.src.backend.common.variables import is_int_dtype +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.common.variables import standardize_shape +from keras.src.backend.config import epsilon +from keras.src.backend.config import floatx +from keras.src.backend.config import image_data_format +from keras.src.backend.config import set_epsilon +from keras.src.backend.config import set_floatx +from keras.src.backend.config import set_image_data_format +from keras.src.backend.config import standardize_data_format + +# Import backend functions. +if backend() == "tensorflow": + from keras.src.backend.tensorflow import * # noqa: F403 +elif backend() == "jax": + from keras.src.backend.jax import * # noqa: F403 +elif backend() == "torch": + from keras.src.backend.torch import * # noqa: F403 + + distribution_lib = None +elif backend() == "numpy": + from keras.src.backend.numpy import * # noqa: F403 + + distribution_lib = None +else: + raise ValueError(f"Unable to import backend : {backend()}") diff --git a/keras/src/backend/common/__init__.py b/keras/src/backend/common/__init__.py new file mode 100644 index 000000000000..fabac625b5a6 --- /dev/null +++ b/keras/src/backend/common/__init__.py @@ -0,0 +1,10 @@ +from keras.src.backend.common import backend_utils +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.variables import AutocastScope +from keras.src.backend.common.variables import KerasVariable +from keras.src.backend.common.variables import get_autocast_scope +from keras.src.backend.common.variables import is_float_dtype +from keras.src.backend.common.variables import is_int_dtype +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.common.variables import standardize_shape +from keras.src.random import random diff --git a/keras/backend/common/backend_utils.py b/keras/src/backend/common/backend_utils.py similarity index 100% rename from keras/backend/common/backend_utils.py rename to keras/src/backend/common/backend_utils.py diff --git a/keras/backend/common/backend_utils_test.py b/keras/src/backend/common/backend_utils_test.py similarity index 96% rename from keras/backend/common/backend_utils_test.py rename to keras/src/backend/common/backend_utils_test.py index 3c737f4baa14..68b0bbddda0c 100644 --- a/keras/backend/common/backend_utils_test.py +++ b/keras/src/backend/common/backend_utils_test.py @@ -1,19 +1,19 @@ -from keras.backend.common.backend_utils import ( +from keras.src.backend.common.backend_utils import ( _convert_conv_tranpose_padding_args_from_keras_to_jax, ) -from keras.backend.common.backend_utils import ( +from keras.src.backend.common.backend_utils import ( _convert_conv_tranpose_padding_args_from_keras_to_torch, ) -from keras.backend.common.backend_utils import ( +from keras.src.backend.common.backend_utils import ( _get_output_shape_given_tf_padding, ) -from keras.backend.common.backend_utils import ( +from keras.src.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_jax, ) -from keras.backend.common.backend_utils import ( +from keras.src.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_torch, ) -from keras.testing import test_case +from keras.src.testing import test_case class ConvertConvTransposePaddingArgsJAXTest(test_case.TestCase): diff --git a/keras/backend/common/compute_output_spec_test.py b/keras/src/backend/common/compute_output_spec_test.py similarity index 97% rename from keras/backend/common/compute_output_spec_test.py rename to keras/src/backend/common/compute_output_spec_test.py index def1a942cdc5..8ee856f4d31b 100644 --- a/keras/backend/common/compute_output_spec_test.py +++ b/keras/src/backend/common/compute_output_spec_test.py @@ -1,7 +1,7 @@ import pytest -from keras import backend -from keras import testing +from keras.src import backend +from keras.src import testing def example_fn(x): diff --git a/keras/backend/common/dtypes.py b/keras/src/backend/common/dtypes.py similarity index 98% rename from keras/backend/common/dtypes.py rename to keras/src/backend/common/dtypes.py index dcb922db2156..87253c87b8c4 100644 --- a/keras/backend/common/dtypes.py +++ b/keras/src/backend/common/dtypes.py @@ -1,8 +1,8 @@ import functools -from keras.api_export import keras_export -from keras.backend import config -from keras.backend.common.variables import standardize_dtype +from keras.src.api_export import keras_export +from keras.src.backend import config +from keras.src.backend.common.variables import standardize_dtype BOOL_TYPES = ("bool",) INT_TYPES = ( diff --git a/keras/backend/common/dtypes_test.py b/keras/src/backend/common/dtypes_test.py similarity index 96% rename from keras/backend/common/dtypes_test.py rename to keras/src/backend/common/dtypes_test.py index 200e4fa2b9e3..bc6dbd74bbbe 100644 --- a/keras/backend/common/dtypes_test.py +++ b/keras/src/backend/common/dtypes_test.py @@ -2,18 +2,18 @@ from absl.testing import parameterized -from keras import backend -from keras import ops -from keras.backend.common import dtypes -from keras.testing import test_case -from keras.testing.test_utils import named_product +from keras.src import backend +from keras.src import ops +from keras.src.backend.common import dtypes +from keras.src.testing import test_case +from keras.src.testing.test_utils import named_product class DtypesTest(test_case.TestCase, parameterized.TestCase): """Test the dtype to verify that the behavior matches JAX.""" if backend.backend() == "torch": - from keras.backend.torch.core import to_torch_dtype + from keras.src.backend.torch.core import to_torch_dtype # TODO: torch doesn't support uint64. ALL_DTYPES = [] diff --git a/keras/backend/common/global_state.py b/keras/src/backend/common/global_state.py similarity index 95% rename from keras/backend/common/global_state.py rename to keras/src/backend/common/global_state.py index c0ed8c1e5a51..8ecf11b95056 100644 --- a/keras/backend/common/global_state.py +++ b/keras/src/backend/common/global_state.py @@ -1,8 +1,8 @@ import gc import threading -from keras import backend -from keras.api_export import keras_export +from keras.src import backend +from keras.src.api_export import keras_export GLOBAL_STATE_TRACKER = threading.local() GLOBAL_SETTINGS_TRACKER = threading.local() @@ -77,7 +77,7 @@ def clear_session(free_memory=True): GLOBAL_SETTINGS_TRACKER = threading.local() if backend.backend() == "tensorflow": - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf tf.compat.v1.reset_default_graph() if tf.executing_eagerly(): diff --git a/keras/backend/common/global_state_test.py b/keras/src/backend/common/global_state_test.py similarity index 72% rename from keras/backend/common/global_state_test.py rename to keras/src/backend/common/global_state_test.py index 880886a7d98b..5f2a05ba15a4 100644 --- a/keras/backend/common/global_state_test.py +++ b/keras/src/backend/common/global_state_test.py @@ -1,6 +1,6 @@ -from keras.backend.common import global_state -from keras.testing import test_case -from keras.utils.naming import auto_name +from keras.src.backend.common import global_state +from keras.src.testing import test_case +from keras.src.utils.naming import auto_name class GlobalStateTest(test_case.TestCase): diff --git a/keras/backend/common/keras_tensor.py b/keras/src/backend/common/keras_tensor.py similarity index 85% rename from keras/backend/common/keras_tensor.py rename to keras/src/backend/common/keras_tensor.py index af030407f231..2876a57cffb4 100644 --- a/keras/backend/common/keras_tensor.py +++ b/keras/src/backend/common/keras_tensor.py @@ -1,6 +1,6 @@ -from keras import tree -from keras.api_export import keras_export -from keras.utils.naming import auto_name +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.utils.naming import auto_name @keras_export("keras.KerasTensor") @@ -35,7 +35,7 @@ def __init__( record_history=True, name=None, ): - from keras import backend + from keras.src import backend self.shape = backend.standardize_shape(shape) self.dtype = backend.standardize_dtype(dtype) @@ -48,12 +48,12 @@ def ndim(self): return len(self.shape) def reshape(self, newshape): - from keras import ops + from keras.src import ops return ops.Reshape(newshape)(self) def squeeze(self, axis=None): - from keras import ops + from keras.src import ops return ops.Squeeze(axis)(self) @@ -125,167 +125,167 @@ def __bool__(self): raise TypeError("A symbolic KerasTensor cannot be used as a boolean.") def __add__(self, other): - from keras import ops + from keras.src import ops return ops.Add().symbolic_call(self, other) def __radd__(self, other): - from keras import ops + from keras.src import ops return ops.Add().symbolic_call(other, self) def __sub__(self, other): - from keras import ops + from keras.src import ops return ops.Subtract().symbolic_call(self, other) def __rsub__(self, other): - from keras import ops + from keras.src import ops return ops.Subtract().symbolic_call(other, self) def __mul__(self, other): - from keras import ops + from keras.src import ops return ops.Multiply().symbolic_call(self, other) def __rmul__(self, other): - from keras import ops + from keras.src import ops return ops.Multiply().symbolic_call(other, self) def __matmul__(self, other): - from keras import ops + from keras.src import ops return ops.Matmul().symbolic_call(self, other) def __rmatmul__(self, other): - from keras import ops + from keras.src import ops return ops.Matmul().symbolic_call(other, self) def __div__(self, other): - from keras import ops + from keras.src import ops return ops.Divide().symbolic_call(self, other) def __rdiv__(self, other): - from keras import ops + from keras.src import ops return ops.Divide().symbolic_call(other, self) def __truediv__(self, other): - from keras import ops + from keras.src import ops return ops.TrueDivide().symbolic_call(self, other) def __rtruediv__(self, other): - from keras import ops + from keras.src import ops return ops.TrueDivide().symbolic_call(other, self) def __neg__(self): - from keras import ops + from keras.src import ops return ops.Negative().symbolic_call(self) def __abs__(self): - from keras import ops + from keras.src import ops return ops.Absolute().symbolic_call(self) def __pow__(self, other): - from keras import ops + from keras.src import ops return ops.Power().symbolic_call(self, other) def __rpow__(self, other): - from keras import ops + from keras.src import ops return ops.Power().symbolic_call(other, self) def __floordiv__(self, other): - from keras import ops + from keras.src import ops return ops.FloorDivide().symbolic_call(self, other) def __rfloordiv__(self, other): - from keras import ops + from keras.src import ops return ops.FloorDivide().symbolic_call(other, self) def __mod__(self, other): - from keras import ops + from keras.src import ops return ops.Mod().symbolic_call(self, other) def __rmod__(self, other): - from keras import ops + from keras.src import ops return ops.Mod().symbolic_call(other, self) def __lt__(self, other): - from keras import ops + from keras.src import ops return ops.Less().symbolic_call(self, other) def __le__(self, other): - from keras import ops + from keras.src import ops return ops.LessEqual().symbolic_call(self, other) def __gt__(self, other): - from keras import ops + from keras.src import ops return ops.Greater().symbolic_call(self, other) def __ge__(self, other): - from keras import ops + from keras.src import ops return ops.GreaterEqual().symbolic_call(self, other) def __ne__(self, other): - from keras import ops + from keras.src import ops return ops.NotEqual().symbolic_call(self, other) def __and__(self, other): - from keras import ops + from keras.src import ops return ops.LogicalAnd().symbolic_call(self, other) def __rand__(self, other): - from keras import ops + from keras.src import ops return ops.LogicalAnd().symbolic_call(other, self) def __or__(self, other): - from keras import ops + from keras.src import ops return ops.LogicalOr().symbolic_call(self, other) def __ror__(self, other): - from keras import ops + from keras.src import ops return ops.LogicalOr().symbolic_call(other, self) def __invert__(self): - from keras import ops + from keras.src import ops return ops.LogicalNot().symbolic_call(self) def __xor__(self, other): - from keras import ops + from keras.src import ops return ops.LogicalXor().symbolic_call(self, other) def __rxor__(self, other): - from keras import ops + from keras.src import ops return ops.LogicalXor().symbolic_call(other, self) def __getitem__(self, key): - from keras import ops + from keras.src import ops return ops.GetItem().symbolic_call(self, key) diff --git a/keras/backend/common/keras_tensor_test.py b/keras/src/backend/common/keras_tensor_test.py similarity index 88% rename from keras/backend/common/keras_tensor_test.py rename to keras/src/backend/common/keras_tensor_test.py index 5b63f4961ce2..ca6391024f87 100644 --- a/keras/backend/common/keras_tensor_test.py +++ b/keras/src/backend/common/keras_tensor_test.py @@ -4,10 +4,10 @@ import numpy as np import tensorflow as tf -from keras import backend -from keras import ops -from keras import testing -from keras.backend.common import keras_tensor +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.backend.common import keras_tensor class KerasTensorTest(testing.TestCase): @@ -80,7 +80,7 @@ def test_is_keras_tensor(self): y = np.array([1, 2, 3]) self.assertFalse(keras_tensor.is_keras_tensor(y)) - @patch("keras.ops.Absolute.symbolic_call") + @patch("keras.src.ops.Absolute.symbolic_call") def test_abs_method(self, mock_symbolic_call): mock_tensor = Mock() mock_symbolic_call.return_value = mock_tensor @@ -89,51 +89,51 @@ def test_abs_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(x) self.assertEqual(abs_x, mock_tensor) - @patch("keras.ops.Negative.symbolic_call") + @patch("keras.src.ops.Negative.symbolic_call") def test_neg_method(self, mock_method): self._test_unary_op_method(mock_method, lambda x: -x) - @patch("keras.ops.Subtract.symbolic_call") + @patch("keras.src.ops.Subtract.symbolic_call") def test_sub_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x - y) - @patch("keras.ops.Multiply.symbolic_call") + @patch("keras.src.ops.Multiply.symbolic_call") def test_mul_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x * y) - @patch("keras.ops.Matmul.symbolic_call") + @patch("keras.src.ops.Matmul.symbolic_call") def test_matmul_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x @ y) - @patch("keras.ops.Power.symbolic_call") + @patch("keras.src.ops.Power.symbolic_call") def test_pow_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x**y) - @patch("keras.ops.Mod.symbolic_call") + @patch("keras.src.ops.Mod.symbolic_call") def test_mod_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x % y) - @patch("keras.ops.Less.symbolic_call") + @patch("keras.src.ops.Less.symbolic_call") def test_lt_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x < y) - @patch("keras.ops.LogicalAnd.symbolic_call") + @patch("keras.src.ops.LogicalAnd.symbolic_call") def test_and_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x & y) - @patch("keras.ops.LogicalOr.symbolic_call") + @patch("keras.src.ops.LogicalOr.symbolic_call") def test_or_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x | y) - @patch("keras.ops.GetItem.symbolic_call") + @patch("keras.src.ops.GetItem.symbolic_call") def test_getitem_method(self, mock_method): y = Mock() self._test_binary_op_method(mock_method, y, lambda x, y: x[y]) @@ -154,7 +154,7 @@ def _test_binary_op_method(self, mock_method, other, operator): mock_method.assert_called_once_with(x, other) self.assertEqual(result, mock_tensor) - @patch("keras.ops.Add.symbolic_call") + @patch("keras.src.ops.Add.symbolic_call") def test_radd_method(self, mock_symbolic_call): """Test __radd__ method""" mock_tensor = Mock() @@ -165,7 +165,7 @@ def test_radd_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.Subtract.symbolic_call") + @patch("keras.src.ops.Subtract.symbolic_call") def test_rsub_method(self, mock_symbolic_call): """Test __rsub__ method""" mock_tensor = Mock() @@ -176,7 +176,7 @@ def test_rsub_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.Multiply.symbolic_call") + @patch("keras.src.ops.Multiply.symbolic_call") def test_rmul_method(self, mock_symbolic_call): """Test __rmul__ method""" mock_tensor = Mock() @@ -187,7 +187,7 @@ def test_rmul_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.Matmul.symbolic_call") + @patch("keras.src.ops.Matmul.symbolic_call") def test_rmatmul_method(self, mock_symbolic_call): """Test __rmatmul__ method""" mock_tensor = Mock() @@ -198,7 +198,7 @@ def test_rmatmul_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.Power.symbolic_call") + @patch("keras.src.ops.Power.symbolic_call") def test_rpow_method(self, mock_symbolic_call): """Test __rpow__ method""" mock_tensor = Mock() @@ -209,7 +209,7 @@ def test_rpow_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.FloorDivide.symbolic_call") + @patch("keras.src.ops.FloorDivide.symbolic_call") def test_floordiv_method(self, mock_symbolic_call): """Test __floordiv__ method""" mock_tensor = Mock() @@ -220,7 +220,7 @@ def test_floordiv_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(x, y) self.assertEqual(result, mock_tensor) - @patch("keras.ops.FloorDivide.symbolic_call") + @patch("keras.src.ops.FloorDivide.symbolic_call") def test_rfloordiv_method(self, mock_symbolic_call): """Test __rfloordiv__ method""" mock_tensor = Mock() @@ -231,7 +231,7 @@ def test_rfloordiv_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.Mod.symbolic_call") + @patch("keras.src.ops.Mod.symbolic_call") def test_rmod_method(self, mock_symbolic_call): """Test __rmod__ method""" mock_tensor = Mock() @@ -242,7 +242,7 @@ def test_rmod_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.LessEqual.symbolic_call") + @patch("keras.src.ops.LessEqual.symbolic_call") def test_le_method(self, mock_symbolic_call): """Test __le__ method""" mock_tensor = Mock() @@ -253,7 +253,7 @@ def test_le_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(x, y) self.assertEqual(result, mock_tensor) - @patch("keras.ops.Greater.symbolic_call") + @patch("keras.src.ops.Greater.symbolic_call") def test_gt_method(self, mock_symbolic_call): """Test __gt__ method""" mock_tensor = Mock() @@ -264,7 +264,7 @@ def test_gt_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(x, y) self.assertEqual(result, mock_tensor) - @patch("keras.ops.GreaterEqual.symbolic_call") + @patch("keras.src.ops.GreaterEqual.symbolic_call") def test_ge_method(self, mock_symbolic_call): """Test __ge__ method""" mock_tensor = Mock() @@ -275,7 +275,7 @@ def test_ge_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(x, y) self.assertEqual(result, mock_tensor) - @patch("keras.ops.NotEqual.symbolic_call") + @patch("keras.src.ops.NotEqual.symbolic_call") def test_ne_method(self, mock_symbolic_call): """Test __ne__ method""" mock_tensor = Mock() @@ -286,7 +286,7 @@ def test_ne_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(x, y) self.assertEqual(result, mock_tensor) - @patch("keras.ops.LogicalAnd.symbolic_call") + @patch("keras.src.ops.LogicalAnd.symbolic_call") def test_rand_method(self, mock_symbolic_call): """Test __rand__ method""" mock_tensor = Mock() @@ -297,7 +297,7 @@ def test_rand_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.LogicalOr.symbolic_call") + @patch("keras.src.ops.LogicalOr.symbolic_call") def test_ror_method(self, mock_symbolic_call): """Test __ror__ method""" mock_tensor = Mock() @@ -308,7 +308,7 @@ def test_ror_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.LogicalNot.symbolic_call") + @patch("keras.src.ops.LogicalNot.symbolic_call") def test_invert_method(self, mock_symbolic_call): """Test __invert__ method""" mock_tensor = Mock() @@ -318,7 +318,7 @@ def test_invert_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.LogicalXor.symbolic_call") + @patch("keras.src.ops.LogicalXor.symbolic_call") def test_xor_method(self, mock_symbolic_call): """Test __xor__ method""" mock_tensor = Mock() @@ -329,7 +329,7 @@ def test_xor_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(x, y) self.assertEqual(result, mock_tensor) - @patch("keras.ops.LogicalXor.symbolic_call") + @patch("keras.src.ops.LogicalXor.symbolic_call") def test_rxor_method(self, mock_symbolic_call): """Test __rxor__ method""" mock_tensor = Mock() @@ -340,7 +340,7 @@ def test_rxor_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.TrueDivide.symbolic_call") + @patch("keras.src.ops.TrueDivide.symbolic_call") def test_truediv_method(self, mock_symbolic_call): """Test __truediv__ method""" mock_tensor = Mock() @@ -351,7 +351,7 @@ def test_truediv_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(x, y) self.assertEqual(result, mock_tensor) - @patch("keras.ops.TrueDivide.symbolic_call") + @patch("keras.src.ops.TrueDivide.symbolic_call") def test_rtruediv_method(self, mock_symbolic_call): """Test __rtruediv__ method""" mock_tensor = Mock() @@ -362,7 +362,7 @@ def test_rtruediv_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(y, x) self.assertEqual(result, mock_tensor) - @patch("keras.ops.Divide.symbolic_call") + @patch("keras.src.ops.Divide.symbolic_call") def test_div_method(self, mock_symbolic_call): """Test __div__ method""" mock_tensor = Mock() @@ -374,7 +374,7 @@ def test_div_method(self, mock_symbolic_call): mock_symbolic_call.assert_called_once_with(x, y) self.assertEqual(result, mock_tensor) - @patch("keras.ops.Divide.symbolic_call") + @patch("keras.src.ops.Divide.symbolic_call") def test_rdiv_method(self, mock_symbolic_call): """Test __rdiv__ method""" mock_tensor = Mock() diff --git a/keras/backend/common/name_scope.py b/keras/src/backend/common/name_scope.py similarity index 98% rename from keras/backend/common/name_scope.py rename to keras/src/backend/common/name_scope.py index 02768fd43d0d..71a8408767b6 100644 --- a/keras/backend/common/name_scope.py +++ b/keras/src/backend/common/name_scope.py @@ -1,4 +1,4 @@ -from keras.backend.common import global_state +from keras.src.backend.common import global_state class name_scope: diff --git a/keras/backend/common/name_scope_test.py b/keras/src/backend/common/name_scope_test.py similarity index 92% rename from keras/backend/common/name_scope_test.py rename to keras/src/backend/common/name_scope_test.py index 0650fadec093..2e79f2146958 100644 --- a/keras/backend/common/name_scope_test.py +++ b/keras/src/backend/common/name_scope_test.py @@ -1,6 +1,6 @@ -from keras import testing -from keras.backend.common.name_scope import current_path -from keras.backend.common.name_scope import name_scope +from keras.src import testing +from keras.src.backend.common.name_scope import current_path +from keras.src.backend.common.name_scope import name_scope class NameScopeTest(testing.TestCase): diff --git a/keras/backend/common/stateless_scope.py b/keras/src/backend/common/stateless_scope.py similarity index 91% rename from keras/backend/common/stateless_scope.py rename to keras/src/backend/common/stateless_scope.py index a3abf758e2c4..e3f4f9d69693 100644 --- a/keras/backend/common/stateless_scope.py +++ b/keras/src/backend/common/stateless_scope.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.backend.common import global_state +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state @keras_export("keras.StatelessScope") @@ -38,8 +38,8 @@ def __init__( collect_losses=False, initialize_variables=True, ): - from keras import backend - from keras.backend.common.variables import KerasVariable + from keras.src import backend + from keras.src.backend.common.variables import KerasVariable self.collect_losses = collect_losses self.initialize_variables = initialize_variables @@ -90,7 +90,9 @@ def __exit__(self, *args, **kwargs): # We're back in eager scope; # if any variables were created within the stateless # scope, we initialize them here. - from keras.backend.common.variables import initialize_all_variables + from keras.src.backend.common.variables import ( + initialize_all_variables, + ) initialize_all_variables() diff --git a/keras/backend/common/stateless_scope_test.py b/keras/src/backend/common/stateless_scope_test.py similarity index 92% rename from keras/backend/common/stateless_scope_test.py rename to keras/src/backend/common/stateless_scope_test.py index f3f917620db9..295c6ffb091d 100644 --- a/keras/backend/common/stateless_scope_test.py +++ b/keras/src/backend/common/stateless_scope_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras import backend -from keras import ops -from keras import testing -from keras.backend.common.stateless_scope import StatelessScope +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.backend.common.stateless_scope import StatelessScope class TestStatelessScope(testing.TestCase): diff --git a/keras/backend/common/variables.py b/keras/src/backend/common/variables.py similarity index 96% rename from keras/backend/common/variables.py rename to keras/src/backend/common/variables.py index 3d9c80989687..156d50eba4bf 100644 --- a/keras/backend/common/variables.py +++ b/keras/src/backend/common/variables.py @@ -1,14 +1,14 @@ import numpy as np -from keras.api_export import keras_export -from keras.backend import config -from keras.backend.common import dtypes -from keras.backend.common import global_state -from keras.backend.common.name_scope import current_path -from keras.backend.common.stateless_scope import get_stateless_scope -from keras.backend.common.stateless_scope import in_stateless_scope -from keras.utils.module_utils import tensorflow as tf -from keras.utils.naming import auto_name +from keras.src.api_export import keras_export +from keras.src.backend import config +from keras.src.backend.common import dtypes +from keras.src.backend.common import global_state +from keras.src.backend.common.name_scope import current_path +from keras.src.backend.common.stateless_scope import get_stateless_scope +from keras.src.backend.common.stateless_scope import in_stateless_scope +from keras.src.utils.module_utils import tensorflow as tf +from keras.src.utils.naming import auto_name class KerasVariable: @@ -55,7 +55,7 @@ class KerasVariable: **Using a Keras initializer to create a `Variable`:** ```python - from keras.initializers import Ones + from keras.src.initializers import Ones variable_from_initializer = keras.Variable( initializer=Ones(), shape=(3, 3), dtype="float32" ) @@ -120,7 +120,7 @@ def __init__( # Ref: https://github.com/google/flax/blob/main/flax/linen/fp8_ops.py self._overwrite_with_gradient = False if isinstance(initializer, str): - from keras import initializers + from keras.src import initializers initializer = initializers.get(initializer) if callable(initializer): @@ -298,7 +298,7 @@ def regularizer(self): @regularizer.setter def regularizer(self, value): - from keras.regularizers import Regularizer + from keras.src.regularizers import Regularizer if value is not None and not isinstance(value, Regularizer): raise ValueError( @@ -314,7 +314,7 @@ def constraint(self): @constraint.setter def constraint(self, value): - from keras.constraints import Constraint + from keras.src.constraints import Constraint if value is not None and not isinstance(value, Constraint): raise ValueError( @@ -605,7 +605,7 @@ def __init__(self, dtype): self.original_scope = None def maybe_cast(self, value): - from keras import backend + from keras.src import backend if self.dtype is not None and is_float_dtype(value.dtype): return backend.cast(value, dtype=self.dtype) diff --git a/keras/backend/common/variables_test.py b/keras/src/backend/common/variables_test.py similarity index 98% rename from keras/backend/common/variables_test.py rename to keras/src/backend/common/variables_test.py index 1062aa4ac76c..7ece491e8d05 100644 --- a/keras/backend/common/variables_test.py +++ b/keras/src/backend/common/variables_test.py @@ -2,15 +2,15 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import initializers -from keras.backend.common import dtypes -from keras.backend.common.variables import AutocastScope -from keras.backend.common.variables import KerasVariable -from keras.backend.common.variables import shape_equal -from keras.backend.common.variables import standardize_dtype -from keras.backend.common.variables import standardize_shape -from keras.testing import test_case +from keras.src import backend +from keras.src import initializers +from keras.src.backend.common import dtypes +from keras.src.backend.common.variables import AutocastScope +from keras.src.backend.common.variables import KerasVariable +from keras.src.backend.common.variables import shape_equal +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.common.variables import standardize_shape +from keras.src.testing import test_case class VariableInitializationTest(test_case.TestCase): @@ -267,7 +267,7 @@ def test_variable_numpy(self): reason="Tests for MirroredVariable under tf backend", ) def test_variable_numpy_scalar(self): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf strategy = tf.distribute.MirroredStrategy(["cpu:0", "cpu:1"]) with strategy.scope(): diff --git a/keras/backend/config.py b/keras/src/backend/config.py similarity index 99% rename from keras/backend/config.py rename to keras/src/backend/config.py index 8725e27b56ff..19af01fe83a2 100644 --- a/keras/backend/config.py +++ b/keras/src/backend/config.py @@ -1,7 +1,7 @@ import json import os -from keras.api_export import keras_export +from keras.src.api_export import keras_export # The type of float to use throughout a session. _FLOATX = "float32" diff --git a/keras/backend/exports.py b/keras/src/backend/exports.py similarity index 86% rename from keras/backend/exports.py rename to keras/src/backend/exports.py index 2c02eed603d4..54ee1c74bb8a 100644 --- a/keras/backend/exports.py +++ b/keras/src/backend/exports.py @@ -1,5 +1,5 @@ -from keras import backend -from keras.api_export import keras_export +from keras.src import backend +from keras.src.api_export import keras_export if backend.backend() == "tensorflow": BackendVariable = backend.tensorflow.core.Variable @@ -11,7 +11,7 @@ BackendVariable = backend.torch.core.Variable backend_name_scope = backend.common.name_scope.name_scope elif backend.backend() == "numpy": - from keras.backend.numpy.core import Variable as NumpyVariable + from keras.src.backend.numpy.core import Variable as NumpyVariable BackendVariable = NumpyVariable backend_name_scope = backend.common.name_scope.name_scope diff --git a/keras/src/backend/jax/__init__.py b/keras/src/backend/jax/__init__.py new file mode 100644 index 000000000000..934bf5f4b159 --- /dev/null +++ b/keras/src/backend/jax/__init__.py @@ -0,0 +1,25 @@ +from keras.src.backend.jax import core +from keras.src.backend.jax import distribution_lib +from keras.src.backend.jax import image +from keras.src.backend.jax import linalg +from keras.src.backend.jax import math +from keras.src.backend.jax import nn +from keras.src.backend.jax import numpy +from keras.src.backend.jax import random +from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS +from keras.src.backend.jax.core import Variable +from keras.src.backend.jax.core import cast +from keras.src.backend.jax.core import compute_output_spec +from keras.src.backend.jax.core import cond +from keras.src.backend.jax.core import convert_to_numpy +from keras.src.backend.jax.core import convert_to_tensor +from keras.src.backend.jax.core import device_scope +from keras.src.backend.jax.core import is_tensor +from keras.src.backend.jax.core import scatter +from keras.src.backend.jax.core import shape +from keras.src.backend.jax.core import stop_gradient +from keras.src.backend.jax.core import vectorized_map +from keras.src.backend.jax.rnn import cudnn_ok +from keras.src.backend.jax.rnn import gru +from keras.src.backend.jax.rnn import lstm +from keras.src.backend.jax.rnn import rnn diff --git a/keras/backend/jax/core.py b/keras/src/backend/jax/core.py similarity index 97% rename from keras/backend/jax/core.py rename to keras/src/backend/jax/core.py index 7b37807239f4..4d95bcaa7004 100644 --- a/keras/backend/jax/core.py +++ b/keras/src/backend/jax/core.py @@ -4,13 +4,13 @@ import ml_dtypes import numpy as np -from keras import tree -from keras.backend.common import KerasVariable -from keras.backend.common import global_state -from keras.backend.common import standardize_dtype -from keras.backend.common.keras_tensor import KerasTensor -from keras.backend.common.stateless_scope import StatelessScope -from keras.backend.jax import distribution_lib +from keras.src import tree +from keras.src.backend.common import KerasVariable +from keras.src.backend.common import global_state +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.jax import distribution_lib SUPPORTS_SPARSE_TENSORS = True diff --git a/keras/backend/jax/distribution_lib.py b/keras/src/backend/jax/distribution_lib.py similarity index 99% rename from keras/backend/jax/distribution_lib.py rename to keras/src/backend/jax/distribution_lib.py index 8e15e3c96fb0..8dee363b86a1 100644 --- a/keras/backend/jax/distribution_lib.py +++ b/keras/src/backend/jax/distribution_lib.py @@ -9,7 +9,7 @@ import jax import numpy as np -from keras.utils import jax_utils +from keras.src.utils import jax_utils def list_devices(device_type=None): diff --git a/keras/backend/jax/distribution_lib_test.py b/keras/src/backend/jax/distribution_lib_test.py similarity index 98% rename from keras/backend/jax/distribution_lib_test.py rename to keras/src/backend/jax/distribution_lib_test.py index 130954fe050d..b3e6b014b70b 100644 --- a/keras/backend/jax/distribution_lib_test.py +++ b/keras/src/backend/jax/distribution_lib_test.py @@ -8,12 +8,12 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras import models -from keras import testing -from keras.backend import distribution_lib as backend_dlib -from keras.distribution import distribution_lib +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import testing +from keras.src.backend import distribution_lib as backend_dlib +from keras.src.distribution import distribution_lib if backend.backend() == "jax": # Due to https://github.com/google/jax/issues/17188, we can't diff --git a/keras/backend/jax/image.py b/keras/src/backend/jax/image.py similarity index 99% rename from keras/backend/jax/image.py rename to keras/src/backend/jax/image.py index 462611ba968d..7bba72e3f927 100644 --- a/keras/backend/jax/image.py +++ b/keras/src/backend/jax/image.py @@ -3,7 +3,7 @@ import jax import jax.numpy as jnp -from keras.backend.jax.core import convert_to_tensor +from keras.src.backend.jax.core import convert_to_tensor RESIZE_INTERPOLATIONS = ( "bilinear", diff --git a/keras/backend/jax/layer.py b/keras/src/backend/jax/layer.py similarity index 100% rename from keras/backend/jax/layer.py rename to keras/src/backend/jax/layer.py diff --git a/keras/backend/jax/linalg.py b/keras/src/backend/jax/linalg.py similarity index 86% rename from keras/backend/jax/linalg.py rename to keras/src/backend/jax/linalg.py index 73d548a2d91d..bffdeba67dcc 100644 --- a/keras/backend/jax/linalg.py +++ b/keras/src/backend/jax/linalg.py @@ -2,11 +2,11 @@ import jax.numpy as jnp import jax.scipy as jsp -from keras.backend import config -from keras.backend import standardize_dtype -from keras.backend.common import dtypes -from keras.backend.jax.core import cast -from keras.backend.jax.core import convert_to_tensor +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.jax.core import cast +from keras.src.backend.jax.core import convert_to_tensor def cholesky(a): diff --git a/keras/backend/jax/math.py b/keras/src/backend/jax/math.py similarity index 96% rename from keras/backend/jax/math.py rename to keras/src/backend/jax/math.py index 70add7674269..361eeee89173 100644 --- a/keras/backend/jax/math.py +++ b/keras/src/backend/jax/math.py @@ -3,12 +3,12 @@ import jax import jax.numpy as jnp -from keras.backend import config -from keras.backend import standardize_dtype -from keras.backend.common import dtypes -from keras.backend.jax.core import cast -from keras.backend.jax.core import convert_to_tensor -from keras.utils.module_utils import scipy +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.jax.core import cast +from keras.src.backend.jax.core import convert_to_tensor +from keras.src.utils.module_utils import scipy def segment_sum(data, segment_ids, num_segments=None, sorted=False): diff --git a/keras/backend/jax/nn.py b/keras/src/backend/jax/nn.py similarity index 98% rename from keras/backend/jax/nn.py rename to keras/src/backend/jax/nn.py index 608a0dadc7fc..740c9b17e5a2 100644 --- a/keras/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -5,14 +5,14 @@ from jax import lax from jax import nn as jnn -from keras.backend import standardize_data_format -from keras.backend import standardize_dtype -from keras.backend.common.backend_utils import ( +from keras.src.backend import standardize_data_format +from keras.src.backend import standardize_dtype +from keras.src.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_jax, ) -from keras.backend.config import epsilon -from keras.backend.jax.core import cast -from keras.backend.jax.core import convert_to_tensor +from keras.src.backend.config import epsilon +from keras.src.backend.jax.core import cast +from keras.src.backend.jax.core import convert_to_tensor def relu(x): diff --git a/keras/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py similarity index 98% rename from keras/backend/jax/numpy.py rename to keras/src/backend/jax/numpy.py index 70435d144d32..9520b21ec281 100644 --- a/keras/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -4,15 +4,15 @@ import jax.experimental.sparse as jax_sparse import jax.numpy as jnp -from keras.backend import config -from keras.backend.common import dtypes -from keras.backend.common.backend_utils import canonicalize_axis -from keras.backend.common.backend_utils import to_tuple_or_list -from keras.backend.common.variables import standardize_dtype -from keras.backend.jax import nn -from keras.backend.jax import sparse -from keras.backend.jax.core import cast -from keras.backend.jax.core import convert_to_tensor +from keras.src.backend import config +from keras.src.backend.common import dtypes +from keras.src.backend.common.backend_utils import canonicalize_axis +from keras.src.backend.common.backend_utils import to_tuple_or_list +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.jax import nn +from keras.src.backend.jax import sparse +from keras.src.backend.jax.core import cast +from keras.src.backend.jax.core import convert_to_tensor @sparse.elementwise_binary_union(linear=True, use_sparsify=True) @@ -860,7 +860,7 @@ def repeat(x, repeats, axis=None): def reshape(x, newshape): if isinstance(x, jax_sparse.BCOO): - from keras.ops import operation_utils + from keras.src.ops import operation_utils # Resolve the -1 in `new_shape` if applicable and possible output_shape = operation_utils.compute_reshape_output_shape( diff --git a/keras/backend/jax/optimizer.py b/keras/src/backend/jax/optimizer.py similarity index 98% rename from keras/backend/jax/optimizer.py rename to keras/src/backend/jax/optimizer.py index 2a4d6844b9d9..cc461ce113a7 100644 --- a/keras/backend/jax/optimizer.py +++ b/keras/src/backend/jax/optimizer.py @@ -1,7 +1,7 @@ import jax from jax import numpy as jnp -from keras.optimizers import base_optimizer +from keras.src.optimizers import base_optimizer class JaxOptimizer(base_optimizer.BaseOptimizer): diff --git a/keras/backend/jax/random.py b/keras/src/backend/jax/random.py similarity index 93% rename from keras/backend/jax/random.py rename to keras/src/backend/jax/random.py index 6b2c7afba1b6..79901696339f 100644 --- a/keras/backend/jax/random.py +++ b/keras/src/backend/jax/random.py @@ -1,9 +1,9 @@ import jax -from keras.backend.config import floatx -from keras.random.seed_generator import SeedGenerator -from keras.random.seed_generator import draw_seed -from keras.random.seed_generator import make_default_seed +from keras.src.backend.config import floatx +from keras.src.random.seed_generator import SeedGenerator +from keras.src.random.seed_generator import draw_seed +from keras.src.random.seed_generator import make_default_seed def jax_draw_seed(seed): diff --git a/keras/backend/jax/rnn.py b/keras/src/backend/jax/rnn.py similarity index 98% rename from keras/backend/jax/rnn.py rename to keras/src/backend/jax/rnn.py index b6676fc8d3a9..688211b31f0d 100644 --- a/keras/backend/jax/rnn.py +++ b/keras/src/backend/jax/rnn.py @@ -3,8 +3,8 @@ from jax import lax from jax import numpy as jnp -from keras import tree -from keras.backend.common import stateless_scope +from keras.src import tree +from keras.src.backend.common import stateless_scope def rnn( diff --git a/keras/backend/jax/sparse.py b/keras/src/backend/jax/sparse.py similarity index 99% rename from keras/backend/jax/sparse.py rename to keras/src/backend/jax/sparse.py index 48359016cf1e..f2d7f19d7d16 100644 --- a/keras/backend/jax/sparse.py +++ b/keras/src/backend/jax/sparse.py @@ -3,7 +3,7 @@ import jax.experimental.sparse as jax_sparse import jax.numpy as jnp -from keras.utils import jax_utils +from keras.src.utils import jax_utils def axis_shape_dims_for_broadcast_in_dim(axis, input_shape, insert_dims): diff --git a/keras/backend/jax/trainer.py b/keras/src/backend/jax/trainer.py similarity index 98% rename from keras/backend/jax/trainer.py rename to keras/src/backend/jax/trainer.py index 85308f34175a..89ff3b0d1b7f 100644 --- a/keras/backend/jax/trainer.py +++ b/keras/src/backend/jax/trainer.py @@ -5,17 +5,17 @@ import jax import numpy as np -from keras import backend -from keras import callbacks as callbacks_module -from keras import optimizers as optimizers_module -from keras import tree -from keras.backend import distribution_lib as jax_distribution_lib -from keras.distribution import distribution_lib -from keras.trainers import trainer as base_trainer -from keras.trainers.data_adapters import array_slicing -from keras.trainers.data_adapters import data_adapter_utils -from keras.trainers.epoch_iterator import EpochIterator -from keras.utils import traceback_utils +from keras.src import backend +from keras.src import callbacks as callbacks_module +from keras.src import optimizers as optimizers_module +from keras.src import tree +from keras.src.backend import distribution_lib as jax_distribution_lib +from keras.src.distribution import distribution_lib +from keras.src.trainers import trainer as base_trainer +from keras.src.trainers.data_adapters import array_slicing +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.epoch_iterator import EpochIterator +from keras.src.utils import traceback_utils class JAXTrainer(base_trainer.Trainer): diff --git a/keras/src/backend/numpy/__init__.py b/keras/src/backend/numpy/__init__.py new file mode 100644 index 000000000000..ce1277b8ddc1 --- /dev/null +++ b/keras/src/backend/numpy/__init__.py @@ -0,0 +1,21 @@ +from keras.src.backend.numpy import core +from keras.src.backend.numpy import image +from keras.src.backend.numpy import linalg +from keras.src.backend.numpy import math +from keras.src.backend.numpy import nn +from keras.src.backend.numpy import numpy +from keras.src.backend.numpy import random +from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS +from keras.src.backend.numpy.core import Variable +from keras.src.backend.numpy.core import cast +from keras.src.backend.numpy.core import compute_output_spec +from keras.src.backend.numpy.core import cond +from keras.src.backend.numpy.core import convert_to_numpy +from keras.src.backend.numpy.core import convert_to_tensor +from keras.src.backend.numpy.core import is_tensor +from keras.src.backend.numpy.core import shape +from keras.src.backend.numpy.core import vectorized_map +from keras.src.backend.numpy.rnn import cudnn_ok +from keras.src.backend.numpy.rnn import gru +from keras.src.backend.numpy.rnn import lstm +from keras.src.backend.numpy.rnn import rnn diff --git a/keras/backend/numpy/core.py b/keras/src/backend/numpy/core.py similarity index 95% rename from keras/backend/numpy/core.py rename to keras/src/backend/numpy/core.py index dcd01554f2cb..c00b2598dfde 100644 --- a/keras/backend/numpy/core.py +++ b/keras/src/backend/numpy/core.py @@ -1,11 +1,11 @@ import numpy as np -from keras import tree -from keras.backend.common import KerasVariable -from keras.backend.common import standardize_dtype -from keras.backend.common.dtypes import result_type -from keras.backend.common.keras_tensor import KerasTensor -from keras.backend.common.stateless_scope import StatelessScope +from keras.src import tree +from keras.src.backend.common import KerasVariable +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope SUPPORTS_SPARSE_TENSORS = False diff --git a/keras/backend/numpy/image.py b/keras/src/backend/numpy/image.py similarity index 98% rename from keras/backend/numpy/image.py rename to keras/src/backend/numpy/image.py index efa42e42f137..2281c422d496 100644 --- a/keras/backend/numpy/image.py +++ b/keras/src/backend/numpy/image.py @@ -1,8 +1,8 @@ import jax import numpy as np -from keras.backend.numpy.core import convert_to_tensor -from keras.utils.module_utils import scipy +from keras.src.backend.numpy.core import convert_to_tensor +from keras.src.utils.module_utils import scipy RESIZE_INTERPOLATIONS = ( "bilinear", diff --git a/keras/backend/numpy/layer.py b/keras/src/backend/numpy/layer.py similarity index 100% rename from keras/backend/numpy/layer.py rename to keras/src/backend/numpy/layer.py diff --git a/keras/backend/numpy/linalg.py b/keras/src/backend/numpy/linalg.py similarity index 92% rename from keras/backend/numpy/linalg.py rename to keras/src/backend/numpy/linalg.py index 1b651a59aa05..32d0f762fd76 100644 --- a/keras/backend/numpy/linalg.py +++ b/keras/src/backend/numpy/linalg.py @@ -1,9 +1,9 @@ import numpy as np import scipy.linalg as sl -from keras.backend import standardize_dtype -from keras.backend.common import dtypes -from keras.backend.numpy.core import convert_to_tensor +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.numpy.core import convert_to_tensor def cholesky(a): diff --git a/keras/backend/numpy/math.py b/keras/src/backend/numpy/math.py similarity index 97% rename from keras/backend/numpy/math.py rename to keras/src/backend/numpy/math.py index bb9c3d499e5a..f14e43d863b8 100644 --- a/keras/backend/numpy/math.py +++ b/keras/src/backend/numpy/math.py @@ -1,11 +1,11 @@ import numpy as np -from keras.backend import standardize_dtype -from keras.backend.common import dtypes -from keras.backend.jax.math import fft as jax_fft -from keras.backend.jax.math import fft2 as jax_fft2 -from keras.backend.numpy.core import convert_to_tensor -from keras.utils.module_utils import scipy +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.jax.math import fft as jax_fft +from keras.src.backend.jax.math import fft2 as jax_fft2 +from keras.src.backend.numpy.core import convert_to_tensor +from keras.src.utils.module_utils import scipy def segment_sum(data, segment_ids, num_segments=None, sorted=False): diff --git a/keras/backend/numpy/nn.py b/keras/src/backend/numpy/nn.py similarity index 97% rename from keras/backend/numpy/nn.py rename to keras/src/backend/numpy/nn.py index 45ed3f05ac9c..7dee370ef005 100644 --- a/keras/backend/numpy/nn.py +++ b/keras/src/backend/numpy/nn.py @@ -3,16 +3,16 @@ from jax import lax from jax import numpy as jnp -from keras.backend import standardize_data_format -from keras.backend import standardize_dtype -from keras.backend.common.backend_utils import ( +from keras.src.backend import standardize_data_format +from keras.src.backend import standardize_dtype +from keras.src.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_jax, ) -from keras.backend.config import epsilon -from keras.backend.numpy.core import cast -from keras.backend.numpy.core import convert_to_tensor -from keras.backend.numpy.core import is_tensor -from keras.utils.module_utils import scipy +from keras.src.backend.config import epsilon +from keras.src.backend.numpy.core import cast +from keras.src.backend.numpy.core import convert_to_tensor +from keras.src.backend.numpy.core import is_tensor +from keras.src.utils.module_utils import scipy def relu(x): diff --git a/keras/backend/numpy/numpy.py b/keras/src/backend/numpy/numpy.py similarity index 98% rename from keras/backend/numpy/numpy.py rename to keras/src/backend/numpy/numpy.py index 5965ce380218..fa098a029978 100644 --- a/keras/backend/numpy/numpy.py +++ b/keras/src/backend/numpy/numpy.py @@ -1,11 +1,11 @@ import numpy as np -from keras import tree -from keras.backend import config -from keras.backend import standardize_dtype -from keras.backend.common import dtypes -from keras.backend.common.backend_utils import standardize_axis_for_numpy -from keras.backend.numpy.core import convert_to_tensor +from keras.src import tree +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.common.backend_utils import standardize_axis_for_numpy +from keras.src.backend.numpy.core import convert_to_tensor def add(x1, x2): diff --git a/keras/backend/numpy/random.py b/keras/src/backend/numpy/random.py similarity index 93% rename from keras/backend/numpy/random.py rename to keras/src/backend/numpy/random.py index b3204b430292..f8fd65aa38ba 100644 --- a/keras/backend/numpy/random.py +++ b/keras/src/backend/numpy/random.py @@ -1,10 +1,10 @@ import numpy as np -from keras.backend.config import floatx -from keras.backend.numpy.nn import softmax -from keras.random.seed_generator import SeedGenerator -from keras.random.seed_generator import draw_seed -from keras.random.seed_generator import make_default_seed +from keras.src.backend.config import floatx +from keras.src.backend.numpy.nn import softmax +from keras.src.random.seed_generator import SeedGenerator +from keras.src.random.seed_generator import draw_seed +from keras.src.random.seed_generator import make_default_seed def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): diff --git a/keras/backend/numpy/rnn.py b/keras/src/backend/numpy/rnn.py similarity index 99% rename from keras/backend/numpy/rnn.py rename to keras/src/backend/numpy/rnn.py index 6d760f0f6ebe..07f657525144 100644 --- a/keras/backend/numpy/rnn.py +++ b/keras/src/backend/numpy/rnn.py @@ -1,6 +1,6 @@ import numpy as np -from keras import tree +from keras.src import tree def rnn( diff --git a/keras/backend/numpy/trainer.py b/keras/src/backend/numpy/trainer.py similarity index 95% rename from keras/backend/numpy/trainer.py rename to keras/src/backend/numpy/trainer.py index 700e4b856dce..c92465874673 100644 --- a/keras/backend/numpy/trainer.py +++ b/keras/src/backend/numpy/trainer.py @@ -1,15 +1,15 @@ import numpy as np -from keras import backend -from keras import callbacks as callbacks_module -from keras import tree -from keras.backend.common import standardize_dtype -from keras.backend.common.keras_tensor import KerasTensor -from keras.backend.numpy.core import is_tensor -from keras.trainers import trainer as base_trainer -from keras.trainers.data_adapters import data_adapter_utils -from keras.trainers.epoch_iterator import EpochIterator -from keras.utils import traceback_utils +from keras.src import backend +from keras.src import callbacks as callbacks_module +from keras.src import tree +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.numpy.core import is_tensor +from keras.src.trainers import trainer as base_trainer +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.epoch_iterator import EpochIterator +from keras.src.utils import traceback_utils class NumpyTrainer(base_trainer.Trainer): diff --git a/keras/src/backend/tensorflow/__init__.py b/keras/src/backend/tensorflow/__init__.py new file mode 100644 index 000000000000..c70f22ee4668 --- /dev/null +++ b/keras/src/backend/tensorflow/__init__.py @@ -0,0 +1,27 @@ +from keras.src.backend.tensorflow import core +from keras.src.backend.tensorflow import distribution_lib +from keras.src.backend.tensorflow import image +from keras.src.backend.tensorflow import linalg +from keras.src.backend.tensorflow import math +from keras.src.backend.tensorflow import nn +from keras.src.backend.tensorflow import numpy +from keras.src.backend.tensorflow import random +from keras.src.backend.tensorflow import tensorboard +from keras.src.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS +from keras.src.backend.tensorflow.core import Variable +from keras.src.backend.tensorflow.core import cast +from keras.src.backend.tensorflow.core import compute_output_spec +from keras.src.backend.tensorflow.core import cond +from keras.src.backend.tensorflow.core import convert_to_numpy +from keras.src.backend.tensorflow.core import convert_to_tensor +from keras.src.backend.tensorflow.core import device_scope +from keras.src.backend.tensorflow.core import is_tensor +from keras.src.backend.tensorflow.core import name_scope +from keras.src.backend.tensorflow.core import scatter +from keras.src.backend.tensorflow.core import shape +from keras.src.backend.tensorflow.core import stop_gradient +from keras.src.backend.tensorflow.core import vectorized_map +from keras.src.backend.tensorflow.rnn import cudnn_ok +from keras.src.backend.tensorflow.rnn import gru +from keras.src.backend.tensorflow.rnn import lstm +from keras.src.backend.tensorflow.rnn import rnn diff --git a/keras/backend/tensorflow/core.py b/keras/src/backend/tensorflow/core.py similarity index 93% rename from keras/backend/tensorflow/core.py rename to keras/src/backend/tensorflow/core.py index 6e72539d11be..575e6dc6da26 100644 --- a/keras/backend/tensorflow/core.py +++ b/keras/src/backend/tensorflow/core.py @@ -2,16 +2,16 @@ import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice -from keras import tree -from keras.backend.common import KerasVariable -from keras.backend.common import global_state -from keras.backend.common import standardize_dtype -from keras.backend.common.keras_tensor import KerasTensor -from keras.backend.common.name_scope import name_scope as base_name_scope -from keras.backend.common.stateless_scope import StatelessScope -from keras.backend.common.stateless_scope import in_stateless_scope -from keras.backend.tensorflow.sparse import sparse_to_dense -from keras.utils.naming import auto_name +from keras.src import tree +from keras.src.backend.common import KerasVariable +from keras.src.backend.common import global_state +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.name_scope import name_scope as base_name_scope +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.common.stateless_scope import in_stateless_scope +from keras.src.backend.tensorflow.sparse import sparse_to_dense +from keras.src.utils.naming import auto_name SUPPORTS_SPARSE_TENSORS = True diff --git a/keras/backend/tensorflow/distribute_test.py b/keras/src/backend/tensorflow/distribute_test.py similarity index 95% rename from keras/backend/tensorflow/distribute_test.py rename to keras/src/backend/tensorflow/distribute_test.py index 413bac6f0fc4..ae07d08e6bf7 100644 --- a/keras/backend/tensorflow/distribute_test.py +++ b/keras/src/backend/tensorflow/distribute_test.py @@ -5,11 +5,11 @@ import tensorflow as tf from tensorflow.python.eager import context -from keras import backend -from keras import layers -from keras import models -from keras import testing -from keras.backend.tensorflow import trainer as tf_trainer +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import testing +from keras.src.backend.tensorflow import trainer as tf_trainer @pytest.mark.skipif( diff --git a/keras/backend/tensorflow/distribution_lib.py b/keras/src/backend/tensorflow/distribution_lib.py similarity index 100% rename from keras/backend/tensorflow/distribution_lib.py rename to keras/src/backend/tensorflow/distribution_lib.py diff --git a/keras/backend/tensorflow/image.py b/keras/src/backend/tensorflow/image.py similarity index 99% rename from keras/backend/tensorflow/image.py rename to keras/src/backend/tensorflow/image.py index b51b738c1732..c03825dd7d16 100644 --- a/keras/backend/tensorflow/image.py +++ b/keras/src/backend/tensorflow/image.py @@ -4,7 +4,7 @@ import tensorflow as tf -from keras.backend.tensorflow.core import convert_to_tensor +from keras.src.backend.tensorflow.core import convert_to_tensor RESIZE_INTERPOLATIONS = ( "bilinear", diff --git a/keras/backend/tensorflow/layer.py b/keras/src/backend/tensorflow/layer.py similarity index 92% rename from keras/backend/tensorflow/layer.py rename to keras/src/backend/tensorflow/layer.py index 7c871ba67cf8..fd1f33020132 100644 --- a/keras/backend/tensorflow/layer.py +++ b/keras/src/backend/tensorflow/layer.py @@ -1,9 +1,9 @@ import tensorflow as tf -from keras import tree -from keras.backend.tensorflow.trackable import KerasAutoTrackable -from keras.utils import tf_utils -from keras.utils import tracking +from keras.src import tree +from keras.src.backend.tensorflow.trackable import KerasAutoTrackable +from keras.src.utils import tf_utils +from keras.src.utils import tracking class TFLayer(KerasAutoTrackable): @@ -77,9 +77,9 @@ def _trackable_children(self, save_type="checkpoint", **kwargs): def _default_save_signature(self): """For SavedModel support: returns the default serving signature.""" - from keras.models.functional import Functional - from keras.models.model import Model - from keras.models.sequential import Sequential + from keras.src.models.functional import Functional + from keras.src.models.model import Model + from keras.src.models.sequential import Sequential if not isinstance(self, Model): return None diff --git a/keras/backend/tensorflow/linalg.py b/keras/src/backend/tensorflow/linalg.py similarity index 96% rename from keras/backend/tensorflow/linalg.py rename to keras/src/backend/tensorflow/linalg.py index 20b8fe5b23e7..b50a8b44e875 100644 --- a/keras/backend/tensorflow/linalg.py +++ b/keras/src/backend/tensorflow/linalg.py @@ -1,11 +1,11 @@ import tensorflow as tf from tensorflow.experimental import numpy as tfnp -from keras.backend import config -from keras.backend import standardize_dtype -from keras.backend.common import dtypes -from keras.backend.tensorflow.core import cast -from keras.backend.tensorflow.core import convert_to_tensor +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.tensorflow.core import cast +from keras.src.backend.tensorflow.core import convert_to_tensor def cholesky(a): diff --git a/keras/backend/tensorflow/math.py b/keras/src/backend/tensorflow/math.py similarity index 97% rename from keras/backend/tensorflow/math.py rename to keras/src/backend/tensorflow/math.py index 9b4d4d151f83..ffc7f99f0da0 100644 --- a/keras/backend/tensorflow/math.py +++ b/keras/src/backend/tensorflow/math.py @@ -1,11 +1,11 @@ import tensorflow as tf from tensorflow.experimental import numpy as tfnp -from keras.backend import config -from keras.backend import standardize_dtype -from keras.backend.common import dtypes -from keras.backend.tensorflow.core import cast -from keras.backend.tensorflow.core import convert_to_tensor +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.tensorflow.core import cast +from keras.src.backend.tensorflow.core import convert_to_tensor def segment_sum(data, segment_ids, num_segments=None, sorted=False): diff --git a/keras/backend/tensorflow/name_scope_test.py b/keras/src/backend/tensorflow/name_scope_test.py similarity index 94% rename from keras/backend/tensorflow/name_scope_test.py rename to keras/src/backend/tensorflow/name_scope_test.py index 8ec1aecb790e..f9d8eb7b8499 100644 --- a/keras/backend/tensorflow/name_scope_test.py +++ b/keras/src/backend/tensorflow/name_scope_test.py @@ -1,7 +1,7 @@ import tensorflow as tf -from keras.backend.tensorflow.core import name_scope -from keras.testing import TestCase +from keras.src.backend.tensorflow.core import name_scope +from keras.src.testing import TestCase class TFNameScopeTest(TestCase): diff --git a/keras/backend/tensorflow/nn.py b/keras/src/backend/tensorflow/nn.py similarity index 98% rename from keras/backend/tensorflow/nn.py rename to keras/src/backend/tensorflow/nn.py index 807f0206439a..2167087198f0 100644 --- a/keras/backend/tensorflow/nn.py +++ b/keras/src/backend/tensorflow/nn.py @@ -3,14 +3,14 @@ import tensorflow as tf -from keras.backend import standardize_data_format -from keras.backend import standardize_dtype -from keras.backend.common.backend_utils import ( +from keras.src.backend import standardize_data_format +from keras.src.backend import standardize_dtype +from keras.src.backend.common.backend_utils import ( compute_conv_transpose_output_shape, ) -from keras.backend.config import epsilon -from keras.backend.tensorflow.core import cast -from keras.backend.tensorflow.core import convert_to_tensor +from keras.src.backend.config import epsilon +from keras.src.backend.tensorflow.core import cast +from keras.src.backend.tensorflow.core import convert_to_tensor def relu(x): diff --git a/keras/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py similarity index 98% rename from keras/backend/tensorflow/numpy.py rename to keras/src/backend/tensorflow/numpy.py index 1b6b754c9d71..d1a12e53a780 100644 --- a/keras/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -10,15 +10,15 @@ from tensorflow.experimental import numpy as tfnp from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops -from keras import tree -from keras.backend import config -from keras.backend import standardize_dtype -from keras.backend.common import dtypes -from keras.backend.common.backend_utils import canonicalize_axis -from keras.backend.common.backend_utils import to_tuple_or_list -from keras.backend.tensorflow import sparse -from keras.backend.tensorflow.core import cast -from keras.backend.tensorflow.core import convert_to_tensor +from keras.src import tree +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.common.backend_utils import canonicalize_axis +from keras.src.backend.common.backend_utils import to_tuple_or_list +from keras.src.backend.tensorflow import sparse +from keras.src.backend.tensorflow.core import cast +from keras.src.backend.tensorflow.core import convert_to_tensor @sparse.elementwise_binary_union(tf.sparse.add) @@ -442,7 +442,7 @@ def sparse_dense_matmul_3d(a, b): ) if x1_sparse or x2_sparse: - from keras.ops.operation_utils import compute_matmul_output_shape + from keras.src.ops.operation_utils import compute_matmul_output_shape output_shape = compute_matmul_output_shape(x1_shape, x2_shape) if x1_sparse and x2_sparse: @@ -1022,7 +1022,9 @@ def expand_dims(x, axis): out_ndim = len(x.shape) + len(axis) axis = sorted([canonicalize_axis(a, out_ndim) for a in axis]) if isinstance(x, tf.SparseTensor): - from keras.ops.operation_utils import compute_expand_dims_output_shape + from keras.src.ops.operation_utils import ( + compute_expand_dims_output_shape, + ) output_shape = compute_expand_dims_output_shape(x.shape, axis) for a in axis: @@ -1607,7 +1609,7 @@ def repeat(x, repeats, axis=None): def reshape(x, newshape): x = convert_to_tensor(x) if isinstance(x, tf.SparseTensor): - from keras.ops.operation_utils import compute_reshape_output_shape + from keras.src.ops.operation_utils import compute_reshape_output_shape output_shape = compute_reshape_output_shape( x.shape, newshape, "newshape" @@ -2011,7 +2013,7 @@ def squeeze(x, axis=None): def transpose(x, axes=None): if isinstance(x, tf.SparseTensor): - from keras.ops.operation_utils import compute_transpose_output_shape + from keras.src.ops.operation_utils import compute_transpose_output_shape output = tf.sparse.transpose(x, perm=axes) output.set_shape(compute_transpose_output_shape(x.shape, axes)) diff --git a/keras/backend/tensorflow/optimizer.py b/keras/src/backend/tensorflow/optimizer.py similarity index 97% rename from keras/backend/tensorflow/optimizer.py rename to keras/src/backend/tensorflow/optimizer.py index 9524149b7535..1887ac52cf39 100644 --- a/keras/backend/tensorflow/optimizer.py +++ b/keras/src/backend/tensorflow/optimizer.py @@ -2,10 +2,10 @@ import tensorflow as tf -from keras import backend -from keras.backend.common import KerasVariable -from keras.backend.tensorflow.trackable import KerasAutoTrackable -from keras.optimizers import base_optimizer +from keras.src import backend +from keras.src.backend.common import KerasVariable +from keras.src.backend.tensorflow.trackable import KerasAutoTrackable +from keras.src.optimizers import base_optimizer class TFOptimizer(KerasAutoTrackable, base_optimizer.BaseOptimizer): diff --git a/keras/backend/tensorflow/optimizer_distribute_test.py b/keras/src/backend/tensorflow/optimizer_distribute_test.py similarity index 98% rename from keras/backend/tensorflow/optimizer_distribute_test.py rename to keras/src/backend/tensorflow/optimizer_distribute_test.py index 27f46ae54f32..a4d61da6b23d 100644 --- a/keras/backend/tensorflow/optimizer_distribute_test.py +++ b/keras/src/backend/tensorflow/optimizer_distribute_test.py @@ -5,9 +5,9 @@ import tensorflow as tf from tensorflow.python.eager import context -from keras import backend -from keras import testing -from keras.optimizers.sgd import SGD +from keras.src import backend +from keras.src import testing +from keras.src.optimizers.sgd import SGD @pytest.mark.skipif( diff --git a/keras/backend/tensorflow/random.py b/keras/src/backend/tensorflow/random.py similarity index 95% rename from keras/backend/tensorflow/random.py rename to keras/src/backend/tensorflow/random.py index b366c61be8c7..eeb38a6aa523 100644 --- a/keras/backend/tensorflow/random.py +++ b/keras/src/backend/tensorflow/random.py @@ -1,11 +1,11 @@ import tensorflow as tf from tensorflow.experimental import numpy as tfnp -from keras.backend.common import standardize_dtype -from keras.backend.config import floatx -from keras.random.seed_generator import SeedGenerator -from keras.random.seed_generator import draw_seed -from keras.random.seed_generator import make_default_seed +from keras.src.backend.common import standardize_dtype +from keras.src.backend.config import floatx +from keras.src.random.seed_generator import SeedGenerator +from keras.src.random.seed_generator import draw_seed +from keras.src.random.seed_generator import make_default_seed def tf_draw_seed(seed): diff --git a/keras/backend/tensorflow/rnn.py b/keras/src/backend/tensorflow/rnn.py similarity index 99% rename from keras/backend/tensorflow/rnn.py rename to keras/src/backend/tensorflow/rnn.py index c9de498a6cf6..1911deec897e 100644 --- a/keras/backend/tensorflow/rnn.py +++ b/keras/src/backend/tensorflow/rnn.py @@ -1,6 +1,6 @@ import tensorflow as tf -from keras import tree +from keras.src import tree def rnn( @@ -471,7 +471,7 @@ def gru( if not cudnn_supported: raise NotImplementedError - from keras.backend.tensorflow import Variable + from keras.src.backend.tensorflow import Variable if isinstance(kernel, Variable): kernel = kernel.value @@ -507,8 +507,8 @@ def _do_gru_arguments_support_cudnn( use_bias, reset_after, ): - from keras import activations - from keras import ops + from keras.src import activations + from keras.src import ops return ( activation in (activations.tanh, tf.tanh, ops.tanh) @@ -526,8 +526,8 @@ def _do_lstm_arguments_support_cudnn( unroll, use_bias, ): - from keras import activations - from keras import ops + from keras.src import activations + from keras.src import ops return ( activation in (activations.tanh, tf.tanh, ops.tanh) @@ -828,7 +828,7 @@ def lstm( if not cudnn_supported: raise NotImplementedError - from keras.backend.tensorflow import Variable + from keras.src.backend.tensorflow import Variable if isinstance(kernel, Variable): kernel = kernel.value diff --git a/keras/backend/tensorflow/saved_model_test.py b/keras/src/backend/tensorflow/saved_model_test.py similarity index 98% rename from keras/backend/tensorflow/saved_model_test.py rename to keras/src/backend/tensorflow/saved_model_test.py index 02be85362fd6..2e81818be176 100644 --- a/keras/backend/tensorflow/saved_model_test.py +++ b/keras/src/backend/tensorflow/saved_model_test.py @@ -6,13 +6,13 @@ import pytest import tensorflow as tf -from keras import backend -from keras import layers -from keras import metrics -from keras import models -from keras import optimizers -from keras import testing -from keras.saving import object_registration +from keras.src import backend +from keras.src import layers +from keras.src import metrics +from keras.src import models +from keras.src import optimizers +from keras.src import testing +from keras.src.saving import object_registration @object_registration.register_keras_serializable(package="my_package") diff --git a/keras/backend/tensorflow/sparse.py b/keras/src/backend/tensorflow/sparse.py similarity index 100% rename from keras/backend/tensorflow/sparse.py rename to keras/src/backend/tensorflow/sparse.py diff --git a/keras/backend/tensorflow/tensorboard.py b/keras/src/backend/tensorflow/tensorboard.py similarity index 100% rename from keras/backend/tensorflow/tensorboard.py rename to keras/src/backend/tensorflow/tensorboard.py diff --git a/keras/backend/tensorflow/trackable.py b/keras/src/backend/tensorflow/trackable.py similarity index 98% rename from keras/backend/tensorflow/trackable.py rename to keras/src/backend/tensorflow/trackable.py index 29ed4dd48017..e14b2996af34 100644 --- a/keras/backend/tensorflow/trackable.py +++ b/keras/src/backend/tensorflow/trackable.py @@ -1,6 +1,6 @@ import tensorflow as tf -from keras.utils import tracking +from keras.src.utils import tracking class KerasAutoTrackable(tf.__internal__.tracking.AutoTrackable): diff --git a/keras/backend/tensorflow/trainer.py b/keras/src/backend/tensorflow/trainer.py similarity index 98% rename from keras/backend/tensorflow/trainer.py rename to keras/src/backend/tensorflow/trainer.py index e1f0afaa1cf3..5a6b4aed945d 100644 --- a/keras/backend/tensorflow/trainer.py +++ b/keras/src/backend/tensorflow/trainer.py @@ -5,15 +5,15 @@ import tensorflow as tf from tensorflow.python.eager import context as tf_context -from keras import callbacks as callbacks_module -from keras import metrics as metrics_module -from keras import optimizers as optimizers_module -from keras import tree -from keras.trainers import trainer as base_trainer -from keras.trainers.data_adapters import array_slicing -from keras.trainers.data_adapters import data_adapter_utils -from keras.trainers.epoch_iterator import EpochIterator -from keras.utils import traceback_utils +from keras.src import callbacks as callbacks_module +from keras.src import metrics as metrics_module +from keras.src import optimizers as optimizers_module +from keras.src import tree +from keras.src.trainers import trainer as base_trainer +from keras.src.trainers.data_adapters import array_slicing +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.epoch_iterator import EpochIterator +from keras.src.utils import traceback_utils class TensorFlowTrainer(base_trainer.Trainer): diff --git a/keras/backend/tests/compute_output_spec_test.py b/keras/src/backend/tests/compute_output_spec_test.py similarity index 97% rename from keras/backend/tests/compute_output_spec_test.py rename to keras/src/backend/tests/compute_output_spec_test.py index 5982086124ed..b3458bcc876f 100644 --- a/keras/backend/tests/compute_output_spec_test.py +++ b/keras/src/backend/tests/compute_output_spec_test.py @@ -2,9 +2,9 @@ import pytest -from keras import backend -from keras import ops -from keras.backend.common.keras_tensor import KerasTensor +from keras.src import backend +from keras.src import ops +from keras.src.backend.common.keras_tensor import KerasTensor def single_arg_test_fn(x): diff --git a/keras/backend/tests/device_scope_test.py b/keras/src/backend/tests/device_scope_test.py similarity index 98% rename from keras/backend/tests/device_scope_test.py rename to keras/src/backend/tests/device_scope_test.py index 1a7213213303..caee6742f61d 100644 --- a/keras/backend/tests/device_scope_test.py +++ b/keras/src/backend/tests/device_scope_test.py @@ -1,7 +1,7 @@ import pytest -from keras import backend -from keras import testing +from keras.src import backend +from keras.src import testing class DeviceTest(testing.TestCase): diff --git a/keras/src/backend/torch/__init__.py b/keras/src/backend/torch/__init__.py new file mode 100644 index 000000000000..d980ec87cfec --- /dev/null +++ b/keras/src/backend/torch/__init__.py @@ -0,0 +1,41 @@ +"""Torch backend APIs. + +# Note on device placement + +Torch has a different device placement style compared to TF and JAX. +In short, variables/tensors are not created on GPU by default, +and the GPU cannot directly communicate with the CPU. +To bring Torch behavior in line with TF and JAX automated device placement, +we are doing the following to automate device placement if a GPU is available: + +- Variables are created on GPU. +- Input data will be placed on GPU at the first `keras.layers.Layer` call. +- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU. +- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy. +""" + +from keras.src.backend.torch import core +from keras.src.backend.torch import image +from keras.src.backend.torch import linalg +from keras.src.backend.torch import math +from keras.src.backend.torch import nn +from keras.src.backend.torch import numpy +from keras.src.backend.torch import random +from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS +from keras.src.backend.torch.core import Variable +from keras.src.backend.torch.core import cast +from keras.src.backend.torch.core import compute_output_spec +from keras.src.backend.torch.core import cond +from keras.src.backend.torch.core import convert_to_numpy +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import device_scope +from keras.src.backend.torch.core import is_tensor +from keras.src.backend.torch.core import scatter +from keras.src.backend.torch.core import shape +from keras.src.backend.torch.core import stop_gradient +from keras.src.backend.torch.core import to_torch_dtype +from keras.src.backend.torch.core import vectorized_map +from keras.src.backend.torch.rnn import cudnn_ok +from keras.src.backend.torch.rnn import gru +from keras.src.backend.torch.rnn import lstm +from keras.src.backend.torch.rnn import rnn diff --git a/keras/backend/torch/core.py b/keras/src/backend/torch/core.py similarity index 97% rename from keras/backend/torch/core.py rename to keras/src/backend/torch/core.py index bb9bd98ff401..257afeeec699 100644 --- a/keras/backend/torch/core.py +++ b/keras/src/backend/torch/core.py @@ -5,14 +5,14 @@ import numpy as np import torch -from keras import tree -from keras.backend.common import KerasVariable -from keras.backend.common import global_state -from keras.backend.common import standardize_dtype -from keras.backend.common.dtypes import result_type -from keras.backend.common.keras_tensor import KerasTensor -from keras.backend.common.stateless_scope import StatelessScope -from keras.backend.config import floatx +from keras.src import tree +from keras.src.backend.common import KerasVariable +from keras.src.backend.common import global_state +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.dtypes import result_type +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.config import floatx SUPPORTS_SPARSE_TENSORS = False diff --git a/keras/backend/torch/image.py b/keras/src/backend/torch/image.py similarity index 99% rename from keras/backend/torch/image.py rename to keras/src/backend/torch/image.py index e62162f8ed67..82f609612a78 100644 --- a/keras/backend/torch/image.py +++ b/keras/src/backend/torch/image.py @@ -4,7 +4,7 @@ import torch -from keras.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import convert_to_tensor RESIZE_INTERPOLATIONS = {} # populated after torchvision import diff --git a/keras/backend/torch/layer.py b/keras/src/backend/torch/layer.py similarity index 87% rename from keras/backend/torch/layer.py rename to keras/src/backend/torch/layer.py index 001925f276a1..dfb31a552ab1 100644 --- a/keras/backend/torch/layer.py +++ b/keras/src/backend/torch/layer.py @@ -1,7 +1,7 @@ import torch -from keras.backend.common.stateless_scope import in_stateless_scope -from keras.ops.operation import Operation +from keras.src.backend.common.stateless_scope import in_stateless_scope +from keras.src.ops.operation import Operation class TorchLayer(torch.nn.Module): @@ -27,14 +27,14 @@ def forward(self, *args, **kwargs): return Operation.__call__(self, *args, **kwargs) def _setattr_hook(self, name, value): - from keras.layers import Layer + from keras.src.layers import Layer if ( isinstance(value, torch.nn.Module) and not isinstance(value, Layer) and not name == "torch_params" ): - from keras.utils.torch_utils import TorchModuleWrapper + from keras.src.utils.torch_utils import TorchModuleWrapper if not isinstance(self, TorchModuleWrapper): value = TorchModuleWrapper(value) diff --git a/keras/backend/torch/linalg.py b/keras/src/backend/torch/linalg.py similarity index 86% rename from keras/backend/torch/linalg.py rename to keras/src/backend/torch/linalg.py index 3deeaefdbe79..a9158cc64a62 100644 --- a/keras/backend/torch/linalg.py +++ b/keras/src/backend/torch/linalg.py @@ -1,10 +1,10 @@ import torch -from keras.backend import config -from keras.backend import standardize_dtype -from keras.backend.common import dtypes -from keras.backend.torch.core import cast -from keras.backend.torch.core import convert_to_tensor +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.torch.core import cast +from keras.src.backend.torch.core import convert_to_tensor def cholesky(x): diff --git a/keras/backend/torch/math.py b/keras/src/backend/torch/math.py similarity index 97% rename from keras/backend/torch/math.py rename to keras/src/backend/torch/math.py index abcabae98c33..2d9de436d3fb 100644 --- a/keras/backend/torch/math.py +++ b/keras/src/backend/torch/math.py @@ -2,13 +2,13 @@ import torch -from keras.backend import config -from keras.backend import standardize_dtype -from keras.backend.common import dtypes -from keras.backend.torch.core import cast -from keras.backend.torch.core import convert_to_tensor -from keras.backend.torch.core import get_device -from keras.backend.torch.numpy import pad +from keras.src.backend import config +from keras.src.backend import standardize_dtype +from keras.src.backend.common import dtypes +from keras.src.backend.torch.core import cast +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import get_device +from keras.src.backend.torch.numpy import pad def segment_sum(data, segment_ids, num_segments=None, **kwargs): diff --git a/keras/backend/torch/nn.py b/keras/src/backend/torch/nn.py similarity index 97% rename from keras/backend/torch/nn.py rename to keras/src/backend/torch/nn.py index f2105b5d5e12..a5cbaab3ea47 100644 --- a/keras/backend/torch/nn.py +++ b/keras/src/backend/torch/nn.py @@ -1,20 +1,20 @@ import torch import torch.nn.functional as tnn -from keras import tree -from keras.backend import standardize_data_format -from keras.backend import standardize_dtype -from keras.backend.common.backend_utils import ( +from keras.src import tree +from keras.src.backend import standardize_data_format +from keras.src.backend import standardize_dtype +from keras.src.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_torch, ) -from keras.backend.config import epsilon -from keras.backend.torch.core import cast -from keras.backend.torch.core import convert_to_tensor -from keras.backend.torch.core import get_device -from keras.backend.torch.numpy import expand_dims -from keras.backend.torch.numpy import maximum -from keras.backend.torch.numpy import where -from keras.utils.argument_validation import standardize_tuple +from keras.src.backend.config import epsilon +from keras.src.backend.torch.core import cast +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import get_device +from keras.src.backend.torch.numpy import expand_dims +from keras.src.backend.torch.numpy import maximum +from keras.src.backend.torch.numpy import where +from keras.src.utils.argument_validation import standardize_tuple def relu(x): diff --git a/keras/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py similarity index 98% rename from keras/backend/torch/numpy.py rename to keras/src/backend/torch/numpy.py index bcf9e090f88a..13f6a47c77ac 100644 --- a/keras/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -3,17 +3,17 @@ import torch -from keras.backend import KerasTensor -from keras.backend import config -from keras.backend.common import dtypes -from keras.backend.common.backend_utils import canonicalize_axis -from keras.backend.common.backend_utils import to_tuple_or_list -from keras.backend.common.variables import standardize_dtype -from keras.backend.torch.core import cast -from keras.backend.torch.core import convert_to_tensor -from keras.backend.torch.core import get_device -from keras.backend.torch.core import is_tensor -from keras.backend.torch.core import to_torch_dtype +from keras.src.backend import KerasTensor +from keras.src.backend import config +from keras.src.backend.common import dtypes +from keras.src.backend.common.backend_utils import canonicalize_axis +from keras.src.backend.common.backend_utils import to_tuple_or_list +from keras.src.backend.common.variables import standardize_dtype +from keras.src.backend.torch.core import cast +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import get_device +from keras.src.backend.torch.core import is_tensor +from keras.src.backend.torch.core import to_torch_dtype TORCH_INT_TYPES = ( torch.int8, diff --git a/keras/src/backend/torch/optimizers/__init__.py b/keras/src/backend/torch/optimizers/__init__.py new file mode 100644 index 000000000000..008312b04b63 --- /dev/null +++ b/keras/src/backend/torch/optimizers/__init__.py @@ -0,0 +1 @@ +from keras.src.backend.torch.optimizers.torch_optimizer import TorchOptimizer diff --git a/keras/backend/torch/optimizers/torch_adadelta.py b/keras/src/backend/torch/optimizers/torch_adadelta.py similarity index 92% rename from keras/backend/torch/optimizers/torch_adadelta.py rename to keras/src/backend/torch/optimizers/torch_adadelta.py index c8a3607f9514..9e6038e7b6eb 100644 --- a/keras/backend/torch/optimizers/torch_adadelta.py +++ b/keras/src/backend/torch/optimizers/torch_adadelta.py @@ -1,8 +1,8 @@ import torch -from keras import ops -from keras import optimizers -from keras.backend.torch.optimizers import torch_parallel_optimizer +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer class Adadelta( diff --git a/keras/backend/torch/optimizers/torch_adagrad.py b/keras/src/backend/torch/optimizers/torch_adagrad.py similarity index 87% rename from keras/backend/torch/optimizers/torch_adagrad.py rename to keras/src/backend/torch/optimizers/torch_adagrad.py index 2cdaa87c1831..2a1e19f70fd6 100644 --- a/keras/backend/torch/optimizers/torch_adagrad.py +++ b/keras/src/backend/torch/optimizers/torch_adagrad.py @@ -1,8 +1,8 @@ import torch -from keras import ops -from keras import optimizers -from keras.backend.torch.optimizers import torch_parallel_optimizer +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer class Adagrad( diff --git a/keras/backend/torch/optimizers/torch_adam.py b/keras/src/backend/torch/optimizers/torch_adam.py similarity index 93% rename from keras/backend/torch/optimizers/torch_adam.py rename to keras/src/backend/torch/optimizers/torch_adam.py index 7819a0396943..3bb7db7c341c 100644 --- a/keras/backend/torch/optimizers/torch_adam.py +++ b/keras/src/backend/torch/optimizers/torch_adam.py @@ -1,8 +1,8 @@ import torch -from keras import ops -from keras import optimizers -from keras.backend.torch.optimizers import torch_parallel_optimizer +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer class Adam(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adam): diff --git a/keras/backend/torch/optimizers/torch_adamax.py b/keras/src/backend/torch/optimizers/torch_adamax.py similarity index 91% rename from keras/backend/torch/optimizers/torch_adamax.py rename to keras/src/backend/torch/optimizers/torch_adamax.py index b9463ca0745d..9cb3c0184499 100644 --- a/keras/backend/torch/optimizers/torch_adamax.py +++ b/keras/src/backend/torch/optimizers/torch_adamax.py @@ -1,8 +1,8 @@ import torch -from keras import ops -from keras import optimizers -from keras.backend.torch.optimizers import torch_parallel_optimizer +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer class Adamax( diff --git a/keras/src/backend/torch/optimizers/torch_adamw.py b/keras/src/backend/torch/optimizers/torch_adamw.py new file mode 100644 index 000000000000..394727cd9b59 --- /dev/null +++ b/keras/src/backend/torch/optimizers/torch_adamw.py @@ -0,0 +1,6 @@ +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_adam + + +class AdamW(torch_adam.Adam, optimizers.AdamW): + pass diff --git a/keras/backend/torch/optimizers/torch_lion.py b/keras/src/backend/torch/optimizers/torch_lion.py similarity index 87% rename from keras/backend/torch/optimizers/torch_lion.py rename to keras/src/backend/torch/optimizers/torch_lion.py index 9bb58d6fe328..f2022ad6e53e 100644 --- a/keras/backend/torch/optimizers/torch_lion.py +++ b/keras/src/backend/torch/optimizers/torch_lion.py @@ -1,8 +1,8 @@ import torch -from keras import ops -from keras import optimizers -from keras.backend.torch.optimizers import torch_parallel_optimizer +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer class Lion(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Lion): diff --git a/keras/backend/torch/optimizers/torch_nadam.py b/keras/src/backend/torch/optimizers/torch_nadam.py similarity index 92% rename from keras/backend/torch/optimizers/torch_nadam.py rename to keras/src/backend/torch/optimizers/torch_nadam.py index 08b73bb0c438..df82bd2c473b 100644 --- a/keras/backend/torch/optimizers/torch_nadam.py +++ b/keras/src/backend/torch/optimizers/torch_nadam.py @@ -1,9 +1,9 @@ import torch -from keras import ops -from keras import optimizers -from keras.backend.torch import core -from keras.backend.torch.optimizers import torch_parallel_optimizer +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch import core +from keras.src.backend.torch.optimizers import torch_parallel_optimizer class Nadam(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Nadam): diff --git a/keras/backend/torch/optimizers/torch_optimizer.py b/keras/src/backend/torch/optimizers/torch_optimizer.py similarity index 58% rename from keras/backend/torch/optimizers/torch_optimizer.py rename to keras/src/backend/torch/optimizers/torch_optimizer.py index 880b6f3ee04b..85fc274c574f 100644 --- a/keras/backend/torch/optimizers/torch_optimizer.py +++ b/keras/src/backend/torch/optimizers/torch_optimizer.py @@ -1,22 +1,22 @@ import torch -from keras import optimizers -from keras.optimizers.base_optimizer import BaseOptimizer -from keras.utils import torch_utils +from keras.src import optimizers +from keras.src.optimizers.base_optimizer import BaseOptimizer +from keras.src.utils import torch_utils class TorchOptimizer(BaseOptimizer): def __new__(cls, *args, **kwargs): # Import locally to avoid circular imports. - from keras.backend.torch.optimizers import torch_adadelta - from keras.backend.torch.optimizers import torch_adagrad - from keras.backend.torch.optimizers import torch_adam - from keras.backend.torch.optimizers import torch_adamax - from keras.backend.torch.optimizers import torch_adamw - from keras.backend.torch.optimizers import torch_lion - from keras.backend.torch.optimizers import torch_nadam - from keras.backend.torch.optimizers import torch_rmsprop - from keras.backend.torch.optimizers import torch_sgd + from keras.src.backend.torch.optimizers import torch_adadelta + from keras.src.backend.torch.optimizers import torch_adagrad + from keras.src.backend.torch.optimizers import torch_adam + from keras.src.backend.torch.optimizers import torch_adamax + from keras.src.backend.torch.optimizers import torch_adamw + from keras.src.backend.torch.optimizers import torch_lion + from keras.src.backend.torch.optimizers import torch_nadam + from keras.src.backend.torch.optimizers import torch_rmsprop + from keras.src.backend.torch.optimizers import torch_sgd OPTIMIZERS = { optimizers.Adadelta: torch_adadelta.Adadelta, diff --git a/keras/backend/torch/optimizers/torch_parallel_optimizer.py b/keras/src/backend/torch/optimizers/torch_parallel_optimizer.py similarity index 87% rename from keras/backend/torch/optimizers/torch_parallel_optimizer.py rename to keras/src/backend/torch/optimizers/torch_parallel_optimizer.py index 3aa8a072a087..4fe3802af226 100644 --- a/keras/backend/torch/optimizers/torch_parallel_optimizer.py +++ b/keras/src/backend/torch/optimizers/torch_parallel_optimizer.py @@ -1,7 +1,7 @@ import torch -from keras.optimizers.base_optimizer import BaseOptimizer -from keras.utils import torch_utils +from keras.src.optimizers.base_optimizer import BaseOptimizer +from keras.src.utils import torch_utils class TorchParallelOptimizer(BaseOptimizer): diff --git a/keras/backend/torch/optimizers/torch_rmsprop.py b/keras/src/backend/torch/optimizers/torch_rmsprop.py similarity index 93% rename from keras/backend/torch/optimizers/torch_rmsprop.py rename to keras/src/backend/torch/optimizers/torch_rmsprop.py index 100c72c25cee..49c4c3916bc1 100644 --- a/keras/backend/torch/optimizers/torch_rmsprop.py +++ b/keras/src/backend/torch/optimizers/torch_rmsprop.py @@ -1,8 +1,8 @@ import torch -from keras import ops -from keras import optimizers -from keras.backend.torch.optimizers import torch_parallel_optimizer +from keras.src import ops +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer class RMSprop( diff --git a/keras/backend/torch/optimizers/torch_sgd.py b/keras/src/backend/torch/optimizers/torch_sgd.py similarity index 91% rename from keras/backend/torch/optimizers/torch_sgd.py rename to keras/src/backend/torch/optimizers/torch_sgd.py index 726f102170e1..f16220d85ac3 100644 --- a/keras/backend/torch/optimizers/torch_sgd.py +++ b/keras/src/backend/torch/optimizers/torch_sgd.py @@ -1,7 +1,7 @@ import torch -from keras import optimizers -from keras.backend.torch.optimizers import torch_parallel_optimizer +from keras.src import optimizers +from keras.src.backend.torch.optimizers import torch_parallel_optimizer class SGD(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.SGD): diff --git a/keras/backend/torch/random.py b/keras/src/backend/torch/random.py similarity index 95% rename from keras/backend/torch/random.py rename to keras/src/backend/torch/random.py index ab81713d1f68..e080731952e6 100644 --- a/keras/backend/torch/random.py +++ b/keras/src/backend/torch/random.py @@ -2,13 +2,13 @@ import torch._dynamo as dynamo import torch.nn.functional as tnn -from keras.backend.config import floatx -from keras.backend.torch.core import convert_to_tensor -from keras.backend.torch.core import get_device -from keras.backend.torch.core import to_torch_dtype -from keras.random.seed_generator import SeedGenerator -from keras.random.seed_generator import draw_seed -from keras.random.seed_generator import make_default_seed +from keras.src.backend.config import floatx +from keras.src.backend.torch.core import convert_to_tensor +from keras.src.backend.torch.core import get_device +from keras.src.backend.torch.core import to_torch_dtype +from keras.src.random.seed_generator import SeedGenerator +from keras.src.random.seed_generator import draw_seed +from keras.src.random.seed_generator import make_default_seed # torch.Generator not supported with dynamo diff --git a/keras/backend/torch/rnn.py b/keras/src/backend/torch/rnn.py similarity index 99% rename from keras/backend/torch/rnn.py rename to keras/src/backend/torch/rnn.py index 163f2bc26227..55604b4c77e5 100644 --- a/keras/backend/torch/rnn.py +++ b/keras/src/backend/torch/rnn.py @@ -1,7 +1,7 @@ import torch -from keras import tree -from keras.backend.torch.core import convert_to_tensor +from keras.src import tree +from keras.src.backend.torch.core import convert_to_tensor def rnn( diff --git a/keras/backend/torch/trainer.py b/keras/src/backend/torch/trainer.py similarity index 97% rename from keras/backend/torch/trainer.py rename to keras/src/backend/torch/trainer.py index f4cca60f84a4..0fb3ff529a47 100644 --- a/keras/backend/torch/trainer.py +++ b/keras/src/backend/torch/trainer.py @@ -4,15 +4,15 @@ import torch from packaging.version import parse -from keras import backend -from keras import callbacks as callbacks_module -from keras import optimizers as optimizers_module -from keras import tree -from keras.trainers import trainer as base_trainer -from keras.trainers.data_adapters import array_slicing -from keras.trainers.data_adapters import data_adapter_utils -from keras.trainers.epoch_iterator import EpochIterator -from keras.utils import traceback_utils +from keras.src import backend +from keras.src import callbacks as callbacks_module +from keras.src import optimizers as optimizers_module +from keras.src import tree +from keras.src.trainers import trainer as base_trainer +from keras.src.trainers.data_adapters import array_slicing +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.epoch_iterator import EpochIterator +from keras.src.utils import traceback_utils class TorchTrainer(base_trainer.Trainer): diff --git a/keras/src/callbacks/__init__.py b/keras/src/callbacks/__init__.py new file mode 100644 index 000000000000..d8c835a418d4 --- /dev/null +++ b/keras/src/callbacks/__init__.py @@ -0,0 +1,15 @@ +from keras.src.callbacks.backup_and_restore import BackupAndRestore +from keras.src.callbacks.callback import Callback +from keras.src.callbacks.callback_list import CallbackList +from keras.src.callbacks.csv_logger import CSVLogger +from keras.src.callbacks.early_stopping import EarlyStopping +from keras.src.callbacks.history import History +from keras.src.callbacks.lambda_callback import LambdaCallback +from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler +from keras.src.callbacks.model_checkpoint import ModelCheckpoint +from keras.src.callbacks.progbar_logger import ProgbarLogger +from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau +from keras.src.callbacks.remote_monitor import RemoteMonitor +from keras.src.callbacks.swap_ema_weights import SwapEMAWeights +from keras.src.callbacks.tensorboard import TensorBoard +from keras.src.callbacks.terminate_on_nan import TerminateOnNaN diff --git a/keras/callbacks/backup_and_restore.py b/keras/src/callbacks/backup_and_restore.py similarity index 98% rename from keras/callbacks/backup_and_restore.py rename to keras/src/callbacks/backup_and_restore.py index 4798e8a5c5d2..36e52c15df42 100644 --- a/keras/callbacks/backup_and_restore.py +++ b/keras/src/callbacks/backup_and_restore.py @@ -1,8 +1,8 @@ import json -from keras.api_export import keras_export -from keras.callbacks.callback import Callback -from keras.utils import file_utils +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import file_utils @keras_export("keras.callbacks.BackupAndRestore") diff --git a/keras/callbacks/backup_and_restore_test.py b/keras/src/callbacks/backup_and_restore_test.py similarity index 97% rename from keras/callbacks/backup_and_restore_test.py rename to keras/src/callbacks/backup_and_restore_test.py index 814348307621..7ae5764bc5a8 100644 --- a/keras/callbacks/backup_and_restore_test.py +++ b/keras/src/callbacks/backup_and_restore_test.py @@ -1,11 +1,11 @@ import numpy as np import pytest -from keras import callbacks -from keras import layers -from keras import testing -from keras.models import Sequential -from keras.utils import file_utils +from keras.src import callbacks +from keras.src import layers +from keras.src import testing +from keras.src.models import Sequential +from keras.src.utils import file_utils class InterruptingCallback(callbacks.Callback): diff --git a/keras/callbacks/callback.py b/keras/src/callbacks/callback.py similarity index 99% rename from keras/callbacks/callback.py rename to keras/src/callbacks/callback.py index 764118655d7e..a232bd7a583f 100644 --- a/keras/callbacks/callback.py +++ b/keras/src/callbacks/callback.py @@ -1,5 +1,5 @@ -from keras import backend -from keras.api_export import keras_export +from keras.src import backend +from keras.src.api_export import keras_export @keras_export("keras.callbacks.Callback") diff --git a/keras/callbacks/callback_list.py b/keras/src/callbacks/callback_list.py similarity index 95% rename from keras/callbacks/callback_list.py rename to keras/src/callbacks/callback_list.py index 55d2567dbf35..04ea00a131f7 100644 --- a/keras/callbacks/callback_list.py +++ b/keras/src/callbacks/callback_list.py @@ -1,8 +1,8 @@ -from keras import tree -from keras.api_export import keras_export -from keras.callbacks.callback import Callback -from keras.callbacks.history import History -from keras.callbacks.progbar_logger import ProgbarLogger +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.callbacks.history import History +from keras.src.callbacks.progbar_logger import ProgbarLogger @keras_export("keras.callbacks.CallbackList") diff --git a/keras/callbacks/callback_test.py b/keras/src/callbacks/callback_test.py similarity index 89% rename from keras/callbacks/callback_test.py rename to keras/src/callbacks/callback_test.py index fcacf7eaa1da..31c77c904ceb 100644 --- a/keras/callbacks/callback_test.py +++ b/keras/src/callbacks/callback_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import models -from keras import testing -from keras.callbacks.callback import Callback +from keras.src import models +from keras.src import testing +from keras.src.callbacks.callback import Callback class CallbackTest(testing.TestCase): diff --git a/keras/callbacks/csv_logger.py b/keras/src/callbacks/csv_logger.py similarity index 95% rename from keras/callbacks/csv_logger.py rename to keras/src/callbacks/csv_logger.py index baab82adeb51..69665eacf004 100644 --- a/keras/callbacks/csv_logger.py +++ b/keras/src/callbacks/csv_logger.py @@ -3,9 +3,9 @@ import numpy as np -from keras.api_export import keras_export -from keras.callbacks.callback import Callback -from keras.utils import file_utils +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import file_utils @keras_export("keras.callbacks.CSVLogger") diff --git a/keras/callbacks/csv_logger_test.py b/keras/src/callbacks/csv_logger_test.py similarity index 96% rename from keras/callbacks/csv_logger_test.py rename to keras/src/callbacks/csv_logger_test.py index e2361b3711de..9da3be6aaa53 100644 --- a/keras/callbacks/csv_logger_test.py +++ b/keras/src/callbacks/csv_logger_test.py @@ -6,12 +6,12 @@ import numpy as np import pytest -from keras import callbacks -from keras import initializers -from keras import layers -from keras import testing -from keras.models import Sequential -from keras.utils import numerical_utils +from keras.src import callbacks +from keras.src import initializers +from keras.src import layers +from keras.src import testing +from keras.src.models import Sequential +from keras.src.utils import numerical_utils TRAIN_SAMPLES = 10 TEST_SAMPLES = 10 diff --git a/keras/callbacks/early_stopping.py b/keras/src/callbacks/early_stopping.py similarity index 97% rename from keras/callbacks/early_stopping.py rename to keras/src/callbacks/early_stopping.py index 1263357f4a3a..e7c1fe9c0dc0 100644 --- a/keras/callbacks/early_stopping.py +++ b/keras/src/callbacks/early_stopping.py @@ -1,10 +1,10 @@ import warnings -from keras import ops -from keras.api_export import keras_export -from keras.callbacks.callback import Callback -from keras.trainers import compile_utils -from keras.utils import io_utils +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.trainers import compile_utils +from keras.src.utils import io_utils @keras_export("keras.callbacks.EarlyStopping") diff --git a/keras/callbacks/early_stopping_test.py b/keras/src/callbacks/early_stopping_test.py similarity index 98% rename from keras/callbacks/early_stopping_test.py rename to keras/src/callbacks/early_stopping_test.py index 176e86ef6da3..e120fe8a2b2e 100644 --- a/keras/callbacks/early_stopping_test.py +++ b/keras/src/callbacks/early_stopping_test.py @@ -1,12 +1,12 @@ import numpy as np import pytest -from keras import callbacks -from keras import layers -from keras import metrics -from keras import models -from keras import ops -from keras import testing +from keras.src import callbacks +from keras.src import layers +from keras.src import metrics +from keras.src import models +from keras.src import ops +from keras.src import testing class EarlyStoppingTest(testing.TestCase): diff --git a/keras/callbacks/history.py b/keras/src/callbacks/history.py similarity index 92% rename from keras/callbacks/history.py rename to keras/src/callbacks/history.py index dd2a7d488c2e..6fb3c3c86171 100644 --- a/keras/callbacks/history.py +++ b/keras/src/callbacks/history.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.callbacks.callback import Callback +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback @keras_export("keras.callbacks.History") diff --git a/keras/callbacks/lambda_callback.py b/keras/src/callbacks/lambda_callback.py similarity index 97% rename from keras/callbacks/lambda_callback.py rename to keras/src/callbacks/lambda_callback.py index 59756aebdb78..46dfd46e560c 100644 --- a/keras/callbacks/lambda_callback.py +++ b/keras/src/callbacks/lambda_callback.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.callbacks.callback import Callback +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback @keras_export("keras.callbacks.LambdaCallback") diff --git a/keras/callbacks/lambda_callback_test.py b/keras/src/callbacks/lambda_callback_test.py similarity index 96% rename from keras/callbacks/lambda_callback_test.py rename to keras/src/callbacks/lambda_callback_test.py index bdecdac95af2..4c8a6add2146 100644 --- a/keras/callbacks/lambda_callback_test.py +++ b/keras/src/callbacks/lambda_callback_test.py @@ -2,12 +2,12 @@ import pytest from absl import logging -from keras import callbacks -from keras import layers -from keras import losses -from keras import optimizers -from keras import testing -from keras.models.sequential import Sequential +from keras.src import callbacks +from keras.src import layers +from keras.src import losses +from keras.src import optimizers +from keras.src import testing +from keras.src.models.sequential import Sequential class LambdaCallbackTest(testing.TestCase): diff --git a/keras/callbacks/learning_rate_scheduler.py b/keras/src/callbacks/learning_rate_scheduler.py similarity index 94% rename from keras/callbacks/learning_rate_scheduler.py rename to keras/src/callbacks/learning_rate_scheduler.py index dfedac312a30..6ac1486e8797 100644 --- a/keras/callbacks/learning_rate_scheduler.py +++ b/keras/src/callbacks/learning_rate_scheduler.py @@ -1,9 +1,9 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.callbacks.callback import Callback -from keras.utils import io_utils +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import io_utils @keras_export("keras.callbacks.LearningRateScheduler") diff --git a/keras/callbacks/learning_rate_scheduler_test.py b/keras/src/callbacks/learning_rate_scheduler_test.py similarity index 92% rename from keras/callbacks/learning_rate_scheduler_test.py rename to keras/src/callbacks/learning_rate_scheduler_test.py index 7276825615b5..b76bcf8cf3cf 100644 --- a/keras/callbacks/learning_rate_scheduler_test.py +++ b/keras/src/callbacks/learning_rate_scheduler_test.py @@ -1,13 +1,13 @@ import pytest -from keras import callbacks -from keras import layers -from keras import optimizers -from keras import testing -from keras.models import Sequential -from keras.testing import test_utils -from keras.utils import io_utils -from keras.utils import numerical_utils +from keras.src import callbacks +from keras.src import layers +from keras.src import optimizers +from keras.src import testing +from keras.src.models import Sequential +from keras.src.testing import test_utils +from keras.src.utils import io_utils +from keras.src.utils import numerical_utils class LearningRateSchedulerTest(testing.TestCase): diff --git a/keras/callbacks/model_checkpoint.py b/keras/src/callbacks/model_checkpoint.py similarity index 98% rename from keras/callbacks/model_checkpoint.py rename to keras/src/callbacks/model_checkpoint.py index e9d329159194..b602fe4403d3 100644 --- a/keras/callbacks/model_checkpoint.py +++ b/keras/src/callbacks/model_checkpoint.py @@ -4,11 +4,11 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.callbacks.callback import Callback -from keras.utils import file_utils -from keras.utils import io_utils +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import file_utils +from keras.src.utils import io_utils @keras_export("keras.callbacks.ModelCheckpoint") diff --git a/keras/callbacks/model_checkpoint_test.py b/keras/src/callbacks/model_checkpoint_test.py similarity index 98% rename from keras/callbacks/model_checkpoint_test.py rename to keras/src/callbacks/model_checkpoint_test.py index 680bb9501d74..38092e70a83f 100644 --- a/keras/callbacks/model_checkpoint_test.py +++ b/keras/src/callbacks/model_checkpoint_test.py @@ -3,15 +3,15 @@ import pytest -from keras import callbacks -from keras import layers -from keras import metrics -from keras import models -from keras import saving -from keras import testing -from keras.models import Sequential -from keras.testing import test_utils -from keras.utils import numerical_utils +from keras.src import callbacks +from keras.src import layers +from keras.src import metrics +from keras.src import models +from keras.src import saving +from keras.src import testing +from keras.src.models import Sequential +from keras.src.testing import test_utils +from keras.src.utils import numerical_utils try: import h5py diff --git a/keras/callbacks/progbar_logger.py b/keras/src/callbacks/progbar_logger.py similarity index 94% rename from keras/callbacks/progbar_logger.py rename to keras/src/callbacks/progbar_logger.py index be82f8a4f7a6..ac10d655a97c 100644 --- a/keras/callbacks/progbar_logger.py +++ b/keras/src/callbacks/progbar_logger.py @@ -1,7 +1,7 @@ -from keras.api_export import keras_export -from keras.callbacks.callback import Callback -from keras.utils import io_utils -from keras.utils.progbar import Progbar +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import io_utils +from keras.src.utils.progbar import Progbar @keras_export("keras.callbacks.ProgbarLogger") diff --git a/keras/callbacks/reduce_lr_on_plateau.py b/keras/src/callbacks/reduce_lr_on_plateau.py similarity index 96% rename from keras/callbacks/reduce_lr_on_plateau.py rename to keras/src/callbacks/reduce_lr_on_plateau.py index ccffb77206a5..d1ee33e34e1d 100644 --- a/keras/callbacks/reduce_lr_on_plateau.py +++ b/keras/src/callbacks/reduce_lr_on_plateau.py @@ -2,10 +2,10 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.callbacks.callback import Callback -from keras.utils import io_utils +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import io_utils @keras_export("keras.callbacks.ReduceLROnPlateau") diff --git a/keras/callbacks/reduce_lr_on_plateau_test.py b/keras/src/callbacks/reduce_lr_on_plateau_test.py similarity index 93% rename from keras/callbacks/reduce_lr_on_plateau_test.py rename to keras/src/callbacks/reduce_lr_on_plateau_test.py index 10a8b0f47aec..96ebbaab2cf2 100644 --- a/keras/callbacks/reduce_lr_on_plateau_test.py +++ b/keras/src/callbacks/reduce_lr_on_plateau_test.py @@ -1,13 +1,13 @@ import pytest -from keras import callbacks -from keras import layers -from keras import optimizers -from keras import testing -from keras.models import Sequential -from keras.testing import test_utils -from keras.utils import io_utils -from keras.utils import numerical_utils +from keras.src import callbacks +from keras.src import layers +from keras.src import optimizers +from keras.src import testing +from keras.src.models import Sequential +from keras.src.testing import test_utils +from keras.src.utils import io_utils +from keras.src.utils import numerical_utils class ReduceLROnPlateauTest(testing.TestCase): diff --git a/keras/callbacks/remote_monitor.py b/keras/src/callbacks/remote_monitor.py similarity index 96% rename from keras/callbacks/remote_monitor.py rename to keras/src/callbacks/remote_monitor.py index 8ddca56aa95f..f8605a5c1726 100644 --- a/keras/callbacks/remote_monitor.py +++ b/keras/src/callbacks/remote_monitor.py @@ -3,8 +3,8 @@ import numpy as np -from keras.api_export import keras_export -from keras.callbacks.callback import Callback +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback try: import requests diff --git a/keras/callbacks/remote_monitor_test.py b/keras/src/callbacks/remote_monitor_test.py similarity index 94% rename from keras/callbacks/remote_monitor_test.py rename to keras/src/callbacks/remote_monitor_test.py index 3103580428e5..0660b5850975 100644 --- a/keras/callbacks/remote_monitor_test.py +++ b/keras/src/callbacks/remote_monitor_test.py @@ -3,12 +3,12 @@ import numpy as np -from keras import backend -from keras import callbacks -from keras import layers -from keras import testing -from keras.models import Sequential -from keras.utils import numerical_utils +from keras.src import backend +from keras.src import callbacks +from keras.src import layers +from keras.src import testing +from keras.src.models import Sequential +from keras.src.utils import numerical_utils try: import requests diff --git a/keras/callbacks/swap_ema_weights.py b/keras/src/callbacks/swap_ema_weights.py similarity index 97% rename from keras/callbacks/swap_ema_weights.py rename to keras/src/callbacks/swap_ema_weights.py index d7077c037886..9c13a90fff53 100644 --- a/keras/callbacks/swap_ema_weights.py +++ b/keras/src/callbacks/swap_ema_weights.py @@ -1,7 +1,7 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.callbacks.callback import Callback +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback @keras_export("keras.callbacks.SwapEMAWeights") diff --git a/keras/callbacks/swap_ema_weights_test.py b/keras/src/callbacks/swap_ema_weights_test.py similarity index 94% rename from keras/callbacks/swap_ema_weights_test.py rename to keras/src/callbacks/swap_ema_weights_test.py index c24895d15a25..004544a27b30 100644 --- a/keras/callbacks/swap_ema_weights_test.py +++ b/keras/src/callbacks/swap_ema_weights_test.py @@ -4,17 +4,17 @@ import tensorflow as tf from tensorflow.python.eager import context -from keras import backend -from keras import callbacks -from keras import layers -from keras import losses -from keras import metrics -from keras import optimizers -from keras import saving -from keras import testing -from keras.models import Sequential -from keras.testing import test_utils -from keras.utils import numerical_utils +from keras.src import backend +from keras.src import callbacks +from keras.src import layers +from keras.src import losses +from keras.src import metrics +from keras.src import optimizers +from keras.src import saving +from keras.src import testing +from keras.src.models import Sequential +from keras.src.testing import test_utils +from keras.src.utils import numerical_utils class SwapEMAWeightsTest(testing.TestCase): diff --git a/keras/callbacks/tensorboard.py b/keras/src/callbacks/tensorboard.py similarity index 98% rename from keras/callbacks/tensorboard.py rename to keras/src/callbacks/tensorboard.py index 0a41c2b19e04..a9be2b29b816 100644 --- a/keras/callbacks/tensorboard.py +++ b/keras/src/callbacks/tensorboard.py @@ -4,14 +4,14 @@ import time import warnings -from keras import backend -from keras import ops -from keras import tree -from keras.api_export import keras_export -from keras.callbacks.callback import Callback -from keras.layers import Embedding -from keras.optimizers import Optimizer -from keras.utils import file_utils +from keras.src import backend +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.layers import Embedding +from keras.src.optimizers import Optimizer +from keras.src.utils import file_utils @keras_export("keras.callbacks.TensorBoard") diff --git a/keras/callbacks/tensorboard_test.py b/keras/src/callbacks/tensorboard_test.py similarity index 98% rename from keras/callbacks/tensorboard_test.py rename to keras/src/callbacks/tensorboard_test.py index 598d6e26dee1..3f67532a2d08 100644 --- a/keras/callbacks/tensorboard_test.py +++ b/keras/src/callbacks/tensorboard_test.py @@ -9,15 +9,15 @@ from tensorflow.core.util import event_pb2 from tensorflow.python.lib.io import tf_record -from keras import backend -from keras import callbacks -from keras import layers -from keras import losses -from keras import models -from keras import ops -from keras import optimizers -from keras import testing -from keras.optimizers import schedules +from keras.src import backend +from keras.src import callbacks +from keras.src import layers +from keras.src import losses +from keras.src import models +from keras.src import ops +from keras.src import optimizers +from keras.src import testing +from keras.src.optimizers import schedules # Note: this file and tensorboard in general has a dependency on tensorflow diff --git a/keras/callbacks/terminate_on_nan.py b/keras/src/callbacks/terminate_on_nan.py similarity index 80% rename from keras/callbacks/terminate_on_nan.py rename to keras/src/callbacks/terminate_on_nan.py index 20eb5f2b3d70..55f7e4c06ab8 100644 --- a/keras/callbacks/terminate_on_nan.py +++ b/keras/src/callbacks/terminate_on_nan.py @@ -1,8 +1,8 @@ import numpy as np -from keras.api_export import keras_export -from keras.callbacks.callback import Callback -from keras.utils import io_utils +from keras.src.api_export import keras_export +from keras.src.callbacks.callback import Callback +from keras.src.utils import io_utils @keras_export("keras.callbacks.TerminateOnNaN") diff --git a/keras/callbacks/terminate_on_nan_test.py b/keras/src/callbacks/terminate_on_nan_test.py similarity index 87% rename from keras/callbacks/terminate_on_nan_test.py rename to keras/src/callbacks/terminate_on_nan_test.py index 39b6cba5130b..f84b1b89b6bc 100644 --- a/keras/callbacks/terminate_on_nan_test.py +++ b/keras/src/callbacks/terminate_on_nan_test.py @@ -1,12 +1,12 @@ import numpy as np import pytest -from keras import callbacks -from keras import initializers -from keras import layers -from keras import testing -from keras.models import Sequential -from keras.utils import numerical_utils +from keras.src import callbacks +from keras.src import initializers +from keras.src import layers +from keras.src import testing +from keras.src.models import Sequential +from keras.src.utils import numerical_utils class TerminateOnNaNTest(testing.TestCase): diff --git a/keras/constraints/__init__.py b/keras/src/constraints/__init__.py similarity index 75% rename from keras/constraints/__init__.py rename to keras/src/constraints/__init__.py index 08824e716344..cfafab080cd6 100644 --- a/keras/constraints/__init__.py +++ b/keras/src/constraints/__init__.py @@ -1,13 +1,13 @@ import inspect -from keras.api_export import keras_export -from keras.constraints.constraints import Constraint -from keras.constraints.constraints import MaxNorm -from keras.constraints.constraints import MinMaxNorm -from keras.constraints.constraints import NonNeg -from keras.constraints.constraints import UnitNorm -from keras.saving import serialization_lib -from keras.utils.naming import to_snake_case +from keras.src.api_export import keras_export +from keras.src.constraints.constraints import Constraint +from keras.src.constraints.constraints import MaxNorm +from keras.src.constraints.constraints import MinMaxNorm +from keras.src.constraints.constraints import NonNeg +from keras.src.constraints.constraints import UnitNorm +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case ALL_OBJECTS = { Constraint, diff --git a/keras/constraints/constraints.py b/keras/src/constraints/constraints.py similarity index 98% rename from keras/constraints/constraints.py rename to keras/src/constraints/constraints.py index a897e900aa0b..ecba6c69c45d 100644 --- a/keras/constraints/constraints.py +++ b/keras/src/constraints/constraints.py @@ -1,6 +1,6 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export @keras_export("keras.constraints.Constraint") diff --git a/keras/constraints/constraints_test.py b/keras/src/constraints/constraints_test.py similarity index 97% rename from keras/constraints/constraints_test.py rename to keras/src/constraints/constraints_test.py index fb2168b161fa..0ebf6426e8f1 100644 --- a/keras/constraints/constraints_test.py +++ b/keras/src/constraints/constraints_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras import backend -from keras import constraints -from keras import testing +from keras.src import backend +from keras.src import constraints +from keras.src import testing def get_example_array(): diff --git a/keras/src/datasets/__init__.py b/keras/src/datasets/__init__.py new file mode 100644 index 000000000000..b62b41c4e61b --- /dev/null +++ b/keras/src/datasets/__init__.py @@ -0,0 +1,10 @@ +"""Small NumPy datasets for debugging/testing.""" + +from keras.src.datasets import boston_housing +from keras.src.datasets import california_housing +from keras.src.datasets import cifar10 +from keras.src.datasets import cifar100 +from keras.src.datasets import fashion_mnist +from keras.src.datasets import imdb +from keras.src.datasets import mnist +from keras.src.datasets import reuters diff --git a/keras/datasets/boston_housing.py b/keras/src/datasets/boston_housing.py similarity index 96% rename from keras/datasets/boston_housing.py rename to keras/src/datasets/boston_housing.py index 40060de667ae..de6133a223d2 100644 --- a/keras/datasets/boston_housing.py +++ b/keras/src/datasets/boston_housing.py @@ -1,7 +1,7 @@ import numpy as np -from keras.api_export import keras_export -from keras.utils.file_utils import get_file +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file @keras_export("keras.datasets.boston_housing.load_data") diff --git a/keras/datasets/california_housing.py b/keras/src/datasets/california_housing.py similarity index 97% rename from keras/datasets/california_housing.py rename to keras/src/datasets/california_housing.py index b32d6f0ca10b..467d196a720d 100644 --- a/keras/datasets/california_housing.py +++ b/keras/src/datasets/california_housing.py @@ -2,8 +2,8 @@ import numpy as np -from keras.api_export import keras_export -from keras.utils.file_utils import get_file +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file @keras_export("keras.datasets.california_housing.load_data") diff --git a/keras/datasets/cifar.py b/keras/src/datasets/cifar.py similarity index 100% rename from keras/datasets/cifar.py rename to keras/src/datasets/cifar.py diff --git a/keras/datasets/cifar10.py b/keras/src/datasets/cifar10.py similarity index 94% rename from keras/datasets/cifar10.py rename to keras/src/datasets/cifar10.py index 2010bfca0bf7..f5d8a6178660 100644 --- a/keras/datasets/cifar10.py +++ b/keras/src/datasets/cifar10.py @@ -4,10 +4,10 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.datasets.cifar import load_batch -from keras.utils.file_utils import get_file +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.datasets.cifar import load_batch +from keras.src.utils.file_utils import get_file @keras_export("keras.datasets.cifar10.load_data") diff --git a/keras/datasets/cifar100.py b/keras/src/datasets/cifar100.py similarity index 94% rename from keras/datasets/cifar100.py rename to keras/src/datasets/cifar100.py index a09cc0398d7d..5d58b5b18787 100644 --- a/keras/datasets/cifar100.py +++ b/keras/src/datasets/cifar100.py @@ -4,10 +4,10 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.datasets.cifar import load_batch -from keras.utils.file_utils import get_file +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.datasets.cifar import load_batch +from keras.src.utils.file_utils import get_file @keras_export("keras.datasets.cifar100.load_data") diff --git a/keras/datasets/fashion_mnist.py b/keras/src/datasets/fashion_mnist.py similarity index 96% rename from keras/datasets/fashion_mnist.py rename to keras/src/datasets/fashion_mnist.py index 286a7b18ecd5..6700490e058d 100644 --- a/keras/datasets/fashion_mnist.py +++ b/keras/src/datasets/fashion_mnist.py @@ -5,8 +5,8 @@ import numpy as np -from keras.api_export import keras_export -from keras.utils.file_utils import get_file +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file @keras_export("keras.datasets.fashion_mnist.load_data") diff --git a/keras/datasets/imdb.py b/keras/src/datasets/imdb.py similarity index 97% rename from keras/datasets/imdb.py rename to keras/src/datasets/imdb.py index 4b4d79b718ad..a8b5537b111f 100644 --- a/keras/datasets/imdb.py +++ b/keras/src/datasets/imdb.py @@ -4,9 +4,9 @@ import numpy as np -from keras.api_export import keras_export -from keras.utils.file_utils import get_file -from keras.utils.python_utils import remove_long_seq +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file +from keras.src.utils.python_utils import remove_long_seq @keras_export("keras.datasets.imdb.load_data") diff --git a/keras/datasets/mnist.py b/keras/src/datasets/mnist.py similarity index 96% rename from keras/datasets/mnist.py rename to keras/src/datasets/mnist.py index 74d6b0d4fac1..b7e41cb78136 100644 --- a/keras/datasets/mnist.py +++ b/keras/src/datasets/mnist.py @@ -2,8 +2,8 @@ import numpy as np -from keras.api_export import keras_export -from keras.utils.file_utils import get_file +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file @keras_export("keras.datasets.mnist.load_data") diff --git a/keras/datasets/reuters.py b/keras/src/datasets/reuters.py similarity index 97% rename from keras/datasets/reuters.py rename to keras/src/datasets/reuters.py index 86aa7591673c..998754d1c282 100644 --- a/keras/datasets/reuters.py +++ b/keras/src/datasets/reuters.py @@ -4,9 +4,9 @@ import numpy as np -from keras.api_export import keras_export -from keras.utils.file_utils import get_file -from keras.utils.python_utils import remove_long_seq +from keras.src.api_export import keras_export +from keras.src.utils.file_utils import get_file +from keras.src.utils.python_utils import remove_long_seq @keras_export("keras.datasets.reuters.load_data") diff --git a/keras/src/distribution/__init__.py b/keras/src/distribution/__init__.py new file mode 100644 index 000000000000..04d907f35697 --- /dev/null +++ b/keras/src/distribution/__init__.py @@ -0,0 +1,11 @@ +from keras.src.distribution.distribution_lib import DataParallel +from keras.src.distribution.distribution_lib import DeviceMesh +from keras.src.distribution.distribution_lib import Distribution +from keras.src.distribution.distribution_lib import LayoutMap +from keras.src.distribution.distribution_lib import ModelParallel +from keras.src.distribution.distribution_lib import TensorLayout +from keras.src.distribution.distribution_lib import distribute_tensor +from keras.src.distribution.distribution_lib import distribution +from keras.src.distribution.distribution_lib import initialize +from keras.src.distribution.distribution_lib import list_devices +from keras.src.distribution.distribution_lib import set_distribution diff --git a/keras/distribution/distribution_lib.py b/keras/src/distribution/distribution_lib.py similarity index 98% rename from keras/distribution/distribution_lib.py rename to keras/src/distribution/distribution_lib.py index fa2828f1f87e..817355bba7b6 100644 --- a/keras/distribution/distribution_lib.py +++ b/keras/src/distribution/distribution_lib.py @@ -14,10 +14,10 @@ import numpy as np -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.backend import distribution_lib -from keras.backend.common import global_state +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import distribution_lib +from keras.src.backend.common import global_state DEFAULT_BATCH_DIM_NAME = "batch" GLOBAL_ATTRIBUTE_NAME = "distribution" @@ -452,7 +452,7 @@ def distribute_dataset(self, dataset): distribute as tf_data_distribute, ) - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf if not isinstance(dataset, tf.data.Dataset): raise ValueError( @@ -594,7 +594,7 @@ def distribute_dataset(self, dataset): distribute as tf_data_distribute, ) - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf if not isinstance(dataset, tf.data.Dataset): raise ValueError( diff --git a/keras/distribution/distribution_lib_test.py b/keras/src/distribution/distribution_lib_test.py similarity index 99% rename from keras/distribution/distribution_lib_test.py rename to keras/src/distribution/distribution_lib_test.py index 7743f91f3944..93cadcd70f2d 100644 --- a/keras/distribution/distribution_lib_test.py +++ b/keras/src/distribution/distribution_lib_test.py @@ -7,10 +7,10 @@ import pytest import tensorflow as tf -from keras import backend -from keras import testing -from keras.backend import distribution_lib as backend_dlib -from keras.distribution import distribution_lib +from keras.src import backend +from keras.src import testing +from keras.src.backend import distribution_lib as backend_dlib +from keras.src.distribution import distribution_lib @pytest.mark.skipif( diff --git a/keras/dtype_policies/__init__.py b/keras/src/dtype_policies/__init__.py similarity index 64% rename from keras/dtype_policies/__init__.py rename to keras/src/dtype_policies/__init__.py index 39ccb8497ed5..ec84c2660417 100644 --- a/keras/dtype_policies/__init__.py +++ b/keras/src/dtype_policies/__init__.py @@ -1,16 +1,16 @@ -from keras import backend -from keras.dtype_policies import dtype_policy -from keras.dtype_policies.dtype_policy import QUANTIZATION_MODES -from keras.dtype_policies.dtype_policy import FloatDTypePolicy -from keras.dtype_policies.dtype_policy import QuantizedDTypePolicy -from keras.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy +from keras.src import backend +from keras.src.dtype_policies import dtype_policy +from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy def get(identifier): - from keras.dtype_policies.dtype_policy import ( + from keras.src.dtype_policies.dtype_policy import ( _get_quantized_dtype_policy_by_str, ) - from keras.saving import serialization_lib + from keras.src.saving import serialization_lib if identifier is None: return dtype_policy.dtype_policy() diff --git a/keras/dtype_policies/dtype_policy.py b/keras/src/dtype_policies/dtype_policy.py similarity index 98% rename from keras/dtype_policies/dtype_policy.py rename to keras/src/dtype_policies/dtype_policy.py index 0499a17bd3e7..75b39f075a26 100644 --- a/keras/dtype_policies/dtype_policy.py +++ b/keras/src/dtype_policies/dtype_policy.py @@ -1,7 +1,7 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.backend.common import global_state +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state QUANTIZATION_MODES = ("int8", "float8") diff --git a/keras/dtype_policies/dtype_policy_test.py b/keras/src/dtype_policies/dtype_policy_test.py similarity index 96% rename from keras/dtype_policies/dtype_policy_test.py rename to keras/src/dtype_policies/dtype_policy_test.py index 3e4fb8f75f5b..b040663781a8 100644 --- a/keras/dtype_policies/dtype_policy_test.py +++ b/keras/src/dtype_policies/dtype_policy_test.py @@ -1,12 +1,12 @@ from absl.testing import parameterized -from keras.dtype_policies.dtype_policy import DTypePolicy -from keras.dtype_policies.dtype_policy import FloatDTypePolicy -from keras.dtype_policies.dtype_policy import QuantizedDTypePolicy -from keras.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy -from keras.dtype_policies.dtype_policy import dtype_policy -from keras.dtype_policies.dtype_policy import set_dtype_policy -from keras.testing import test_case +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy +from keras.src.dtype_policies.dtype_policy import dtype_policy +from keras.src.dtype_policies.dtype_policy import set_dtype_policy +from keras.src.testing import test_case class DTypePolicyTest(test_case.TestCase): @@ -293,7 +293,7 @@ def test_serialization_for_float8(self): ("float8_from_mixed_bfloat16", "float8_from_mixed_bfloat16"), ) def test_get_quantized_dtype_policy_by_str(self, name): - from keras.dtype_policies.dtype_policy import ( + from keras.src.dtype_policies.dtype_policy import ( _get_quantized_dtype_policy_by_str, ) @@ -301,7 +301,7 @@ def test_get_quantized_dtype_policy_by_str(self, name): self.assertEqual(policy.name, name) def test_invalid_get_quantized_dtype_policy_by_str(self): - from keras.dtype_policies.dtype_policy import ( + from keras.src.dtype_policies.dtype_policy import ( _get_quantized_dtype_policy_by_str, ) diff --git a/keras/src/export/__init__.py b/keras/src/export/__init__.py new file mode 100644 index 000000000000..d9de43f685a0 --- /dev/null +++ b/keras/src/export/__init__.py @@ -0,0 +1 @@ +from keras.src.export.export_lib import ExportArchive diff --git a/keras/export/export_lib.py b/keras/src/export/export_lib.py similarity index 98% rename from keras/export/export_lib.py rename to keras/src/export/export_lib.py index a06901a3cae0..09ec145699cf 100644 --- a/keras/export/export_lib.py +++ b/keras/src/export/export_lib.py @@ -6,15 +6,15 @@ from absl import logging -from keras import backend -from keras import tree -from keras.api_export import keras_export -from keras.backend.common.stateless_scope import StatelessScope -from keras.layers import Layer -from keras.models import Functional -from keras.models import Sequential -from keras.utils import io_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.layers import Layer +from keras.src.models import Functional +from keras.src.models import Sequential +from keras.src.utils import io_utils +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras.export.ExportArchive") @@ -524,9 +524,9 @@ def _filter_and_track_resources(self): # Next, track lookup tables. # Hopefully, one day this will be automated at the tf.function level. self._tf_trackable._misc_assets = [] - from keras.layers import IntegerLookup - from keras.layers import StringLookup - from keras.layers import TextVectorization + from keras.src.layers import IntegerLookup + from keras.src.layers import StringLookup + from keras.src.layers import TextVectorization if hasattr(self, "_tracked"): for root in self._tracked: diff --git a/keras/export/export_lib_test.py b/keras/src/export/export_lib_test.py similarity index 98% rename from keras/export/export_lib_test.py rename to keras/src/export/export_lib_test.py index e53a84ce9a4b..dd23857df40f 100644 --- a/keras/export/export_lib_test.py +++ b/keras/src/export/export_lib_test.py @@ -7,17 +7,17 @@ import tensorflow as tf from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import models -from keras import ops -from keras import random -from keras import testing -from keras import tree -from keras import utils -from keras.export import export_lib -from keras.saving import saving_lib -from keras.testing.test_utils import named_product +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import ops +from keras.src import random +from keras.src import testing +from keras.src import tree +from keras.src import utils +from keras.src.export import export_lib +from keras.src.saving import saving_lib +from keras.src.testing.test_utils import named_product class CustomModel(models.Model): diff --git a/keras/initializers/__init__.py b/keras/src/initializers/__init__.py similarity index 69% rename from keras/initializers/__init__.py rename to keras/src/initializers/__init__.py index b428e259fdf1..af46b7ff9b8a 100644 --- a/keras/initializers/__init__.py +++ b/keras/src/initializers/__init__.py @@ -1,24 +1,24 @@ import inspect -from keras.api_export import keras_export -from keras.initializers.constant_initializers import Constant -from keras.initializers.constant_initializers import Identity -from keras.initializers.constant_initializers import Ones -from keras.initializers.constant_initializers import Zeros -from keras.initializers.initializer import Initializer -from keras.initializers.random_initializers import GlorotNormal -from keras.initializers.random_initializers import GlorotUniform -from keras.initializers.random_initializers import HeNormal -from keras.initializers.random_initializers import HeUniform -from keras.initializers.random_initializers import LecunNormal -from keras.initializers.random_initializers import LecunUniform -from keras.initializers.random_initializers import OrthogonalInitializer -from keras.initializers.random_initializers import RandomNormal -from keras.initializers.random_initializers import RandomUniform -from keras.initializers.random_initializers import TruncatedNormal -from keras.initializers.random_initializers import VarianceScaling -from keras.saving import serialization_lib -from keras.utils.naming import to_snake_case +from keras.src.api_export import keras_export +from keras.src.initializers.constant_initializers import Constant +from keras.src.initializers.constant_initializers import Identity +from keras.src.initializers.constant_initializers import Ones +from keras.src.initializers.constant_initializers import Zeros +from keras.src.initializers.initializer import Initializer +from keras.src.initializers.random_initializers import GlorotNormal +from keras.src.initializers.random_initializers import GlorotUniform +from keras.src.initializers.random_initializers import HeNormal +from keras.src.initializers.random_initializers import HeUniform +from keras.src.initializers.random_initializers import LecunNormal +from keras.src.initializers.random_initializers import LecunUniform +from keras.src.initializers.random_initializers import OrthogonalInitializer +from keras.src.initializers.random_initializers import RandomNormal +from keras.src.initializers.random_initializers import RandomUniform +from keras.src.initializers.random_initializers import TruncatedNormal +from keras.src.initializers.random_initializers import VarianceScaling +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case ALL_OBJECTS = { Initializer, diff --git a/keras/initializers/constant_initializers.py b/keras/src/initializers/constant_initializers.py similarity index 95% rename from keras/initializers/constant_initializers.py rename to keras/src/initializers/constant_initializers.py index f8d34f29d10a..c5ab6a42d6b2 100644 --- a/keras/initializers/constant_initializers.py +++ b/keras/src/initializers/constant_initializers.py @@ -1,8 +1,8 @@ -from keras import ops -from keras.api_export import keras_export -from keras.backend import standardize_dtype -from keras.initializers.initializer import Initializer -from keras.saving import serialization_lib +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend import standardize_dtype +from keras.src.initializers.initializer import Initializer +from keras.src.saving import serialization_lib @keras_export(["keras.initializers.Constant", "keras.initializers.constant"]) diff --git a/keras/initializers/constant_initializers_test.py b/keras/src/initializers/constant_initializers_test.py similarity index 95% rename from keras/initializers/constant_initializers_test.py rename to keras/src/initializers/constant_initializers_test.py index fca8b620dc99..ace475b499e1 100644 --- a/keras/initializers/constant_initializers_test.py +++ b/keras/src/initializers/constant_initializers_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras import backend -from keras import initializers -from keras import testing +from keras.src import backend +from keras.src import initializers +from keras.src import testing class ConstantInitializersTest(testing.TestCase): diff --git a/keras/initializers/initializer.py b/keras/src/initializers/initializer.py similarity index 98% rename from keras/initializers/initializer.py rename to keras/src/initializers/initializer.py index 6ea8ca123427..6d870488c3f4 100644 --- a/keras/initializers/initializer.py +++ b/keras/src/initializers/initializer.py @@ -1,4 +1,4 @@ -from keras.api_export import keras_export +from keras.src.api_export import keras_export @keras_export(["keras.Initializer", "keras.initializers.Initializer"]) diff --git a/keras/initializers/random_initializers.py b/keras/src/initializers/random_initializers.py similarity index 99% rename from keras/initializers/random_initializers.py rename to keras/src/initializers/random_initializers.py index 8c43df739aba..a25a654aa4bf 100644 --- a/keras/initializers/random_initializers.py +++ b/keras/src/initializers/random_initializers.py @@ -1,10 +1,10 @@ import math -from keras import ops -from keras.api_export import keras_export -from keras.backend import random -from keras.initializers.initializer import Initializer -from keras.saving import serialization_lib +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend import random +from keras.src.initializers.initializer import Initializer +from keras.src.saving import serialization_lib @keras_export( diff --git a/keras/initializers/random_initializers_test.py b/keras/src/initializers/random_initializers_test.py similarity index 98% rename from keras/initializers/random_initializers_test.py rename to keras/src/initializers/random_initializers_test.py index d47d412a7515..b58cf64ba610 100644 --- a/keras/initializers/random_initializers_test.py +++ b/keras/src/initializers/random_initializers_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras import backend -from keras import initializers -from keras import testing -from keras import utils +from keras.src import backend +from keras.src import initializers +from keras.src import testing +from keras.src import utils class InitializersTest(testing.TestCase): diff --git a/keras/src/layers/__init__.py b/keras/src/layers/__init__.py new file mode 100644 index 000000000000..567e1c7526e1 --- /dev/null +++ b/keras/src/layers/__init__.py @@ -0,0 +1,175 @@ +from keras.src.api_export import keras_export +from keras.src.layers.activations.activation import Activation +from keras.src.layers.activations.elu import ELU +from keras.src.layers.activations.leaky_relu import LeakyReLU +from keras.src.layers.activations.prelu import PReLU +from keras.src.layers.activations.relu import ReLU +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.attention.additive_attention import AdditiveAttention +from keras.src.layers.attention.attention import Attention +from keras.src.layers.attention.grouped_query_attention import ( + GroupedQueryAttention, +) +from keras.src.layers.attention.multi_head_attention import MultiHeadAttention +from keras.src.layers.convolutional.conv1d import Conv1D +from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose +from keras.src.layers.convolutional.conv2d import Conv2D +from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose +from keras.src.layers.convolutional.conv3d import Conv3D +from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose +from keras.src.layers.convolutional.depthwise_conv1d import DepthwiseConv1D +from keras.src.layers.convolutional.depthwise_conv2d import DepthwiseConv2D +from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D +from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D +from keras.src.layers.core.dense import Dense +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.core.embedding import Embedding +from keras.src.layers.core.identity import Identity +from keras.src.layers.core.input_layer import Input +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.core.lambda_layer import Lambda +from keras.src.layers.core.masking import Masking +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.layer import Layer +from keras.src.layers.merging.add import Add +from keras.src.layers.merging.add import add +from keras.src.layers.merging.average import Average +from keras.src.layers.merging.average import average +from keras.src.layers.merging.concatenate import Concatenate +from keras.src.layers.merging.concatenate import concatenate +from keras.src.layers.merging.dot import Dot +from keras.src.layers.merging.dot import dot +from keras.src.layers.merging.maximum import Maximum +from keras.src.layers.merging.maximum import maximum +from keras.src.layers.merging.minimum import Minimum +from keras.src.layers.merging.minimum import minimum +from keras.src.layers.merging.multiply import Multiply +from keras.src.layers.merging.multiply import multiply +from keras.src.layers.merging.subtract import Subtract +from keras.src.layers.merging.subtract import subtract +from keras.src.layers.normalization.batch_normalization import ( + BatchNormalization, +) +from keras.src.layers.normalization.group_normalization import ( + GroupNormalization, +) +from keras.src.layers.normalization.layer_normalization import ( + LayerNormalization, +) +from keras.src.layers.normalization.spectral_normalization import ( + SpectralNormalization, +) +from keras.src.layers.normalization.unit_normalization import UnitNormalization +from keras.src.layers.pooling.average_pooling1d import AveragePooling1D +from keras.src.layers.pooling.average_pooling2d import AveragePooling2D +from keras.src.layers.pooling.average_pooling3d import AveragePooling3D +from keras.src.layers.pooling.global_average_pooling1d import ( + GlobalAveragePooling1D, +) +from keras.src.layers.pooling.global_average_pooling2d import ( + GlobalAveragePooling2D, +) +from keras.src.layers.pooling.global_average_pooling3d import ( + GlobalAveragePooling3D, +) +from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D +from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D +from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D +from keras.src.layers.preprocessing.audio_preprocessing import MelSpectrogram +from keras.src.layers.preprocessing.category_encoding import CategoryEncoding +from keras.src.layers.preprocessing.center_crop import CenterCrop +from keras.src.layers.preprocessing.discretization import Discretization +from keras.src.layers.preprocessing.hashed_crossing import HashedCrossing +from keras.src.layers.preprocessing.hashing import Hashing +from keras.src.layers.preprocessing.index_lookup import IndexLookup +from keras.src.layers.preprocessing.integer_lookup import IntegerLookup +from keras.src.layers.preprocessing.normalization import Normalization +from keras.src.layers.preprocessing.random_brightness import RandomBrightness +from keras.src.layers.preprocessing.random_contrast import RandomContrast +from keras.src.layers.preprocessing.random_crop import RandomCrop +from keras.src.layers.preprocessing.random_flip import RandomFlip +from keras.src.layers.preprocessing.random_rotation import RandomRotation +from keras.src.layers.preprocessing.random_translation import RandomTranslation +from keras.src.layers.preprocessing.random_zoom import RandomZoom +from keras.src.layers.preprocessing.rescaling import Rescaling +from keras.src.layers.preprocessing.resizing import Resizing +from keras.src.layers.preprocessing.string_lookup import StringLookup +from keras.src.layers.preprocessing.text_vectorization import TextVectorization +from keras.src.layers.regularization.activity_regularization import ( + ActivityRegularization, +) +from keras.src.layers.regularization.alpha_dropout import AlphaDropout +from keras.src.layers.regularization.dropout import Dropout +from keras.src.layers.regularization.gaussian_dropout import GaussianDropout +from keras.src.layers.regularization.gaussian_noise import GaussianNoise +from keras.src.layers.regularization.spatial_dropout import SpatialDropout1D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout2D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout3D +from keras.src.layers.reshaping.cropping1d import Cropping1D +from keras.src.layers.reshaping.cropping2d import Cropping2D +from keras.src.layers.reshaping.cropping3d import Cropping3D +from keras.src.layers.reshaping.flatten import Flatten +from keras.src.layers.reshaping.permute import Permute +from keras.src.layers.reshaping.repeat_vector import RepeatVector +from keras.src.layers.reshaping.reshape import Reshape +from keras.src.layers.reshaping.up_sampling1d import UpSampling1D +from keras.src.layers.reshaping.up_sampling2d import UpSampling2D +from keras.src.layers.reshaping.up_sampling3d import UpSampling3D +from keras.src.layers.reshaping.zero_padding1d import ZeroPadding1D +from keras.src.layers.reshaping.zero_padding2d import ZeroPadding2D +from keras.src.layers.reshaping.zero_padding3d import ZeroPadding3D +from keras.src.layers.rnn.bidirectional import Bidirectional +from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D +from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D +from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D +from keras.src.layers.rnn.gru import GRU +from keras.src.layers.rnn.gru import GRUCell +from keras.src.layers.rnn.lstm import LSTM +from keras.src.layers.rnn.lstm import LSTMCell +from keras.src.layers.rnn.rnn import RNN +from keras.src.layers.rnn.simple_rnn import SimpleRNN +from keras.src.layers.rnn.simple_rnn import SimpleRNNCell +from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.src.layers.rnn.time_distributed import TimeDistributed +from keras.src.saving import serialization_lib + + +@keras_export("keras.layers.serialize") +def serialize(layer): + """Returns the layer configuration as a Python dict. + + Args: + layer: A `keras.layers.Layer` instance to serialize. + + Returns: + Python dict which contains the configuration of the layer. + """ + return serialization_lib.serialize_keras_object(layer) + + +@keras_export("keras.layers.deserialize") +def deserialize(config, custom_objects=None): + """Returns a Keras layer object via its configuration. + + Args: + config: A python dict containing a serialized layer configuration. + custom_objects: Optional dictionary mapping names (strings) to custom + objects (classes and functions) to be considered during + deserialization. + + Returns: + A Keras layer instance. + """ + obj = serialization_lib.deserialize_keras_object( + config, + custom_objects=custom_objects, + ) + if not isinstance(obj, Layer): + raise ValueError( + "`keras.layers.deserialize` was passed a `config` object that is " + f"not a `keras.layers.Layer`. Received: {config}" + ) + return obj diff --git a/keras/src/layers/activations/__init__.py b/keras/src/layers/activations/__init__.py new file mode 100644 index 000000000000..009ce976c51b --- /dev/null +++ b/keras/src/layers/activations/__init__.py @@ -0,0 +1,5 @@ +from keras.src.layers.activations.elu import ELU +from keras.src.layers.activations.leaky_relu import LeakyReLU +from keras.src.layers.activations.prelu import PReLU +from keras.src.layers.activations.relu import ReLU +from keras.src.layers.activations.softmax import Softmax diff --git a/keras/layers/activations/activation.py b/keras/src/layers/activations/activation.py similarity index 90% rename from keras/layers/activations/activation.py rename to keras/src/layers/activations/activation.py index 438bd3f28c7e..271cf1e4682e 100644 --- a/keras/layers/activations/activation.py +++ b/keras/src/layers/activations/activation.py @@ -1,6 +1,6 @@ -from keras import activations -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import activations +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.Activation") diff --git a/keras/layers/activations/activation_test.py b/keras/src/layers/activations/activation_test.py similarity index 91% rename from keras/layers/activations/activation_test.py rename to keras/src/layers/activations/activation_test.py index 355f0602c14a..26d77dd44b28 100644 --- a/keras/layers/activations/activation_test.py +++ b/keras/src/layers/activations/activation_test.py @@ -1,8 +1,8 @@ import pytest -from keras import activations -from keras import layers -from keras import testing +from keras.src import activations +from keras.src import layers +from keras.src import testing class ActivationTest(testing.TestCase): diff --git a/keras/layers/activations/elu.py b/keras/src/layers/activations/elu.py similarity index 85% rename from keras/layers/activations/elu.py rename to keras/src/layers/activations/elu.py index ed8abdf3369e..f15f91546cf4 100644 --- a/keras/layers/activations/elu.py +++ b/keras/src/layers/activations/elu.py @@ -1,6 +1,6 @@ -from keras import activations -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import activations +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.ELU") diff --git a/keras/layers/activations/elu_test.py b/keras/src/layers/activations/elu_test.py similarity index 91% rename from keras/layers/activations/elu_test.py rename to keras/src/layers/activations/elu_test.py index 1af54ffd4fb7..7dd8b3f7b799 100644 --- a/keras/layers/activations/elu_test.py +++ b/keras/src/layers/activations/elu_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras import testing -from keras.layers.activations import elu +from keras.src import testing +from keras.src.layers.activations import elu class ELUTest(testing.TestCase): diff --git a/keras/layers/activations/leaky_relu.py b/keras/src/layers/activations/leaky_relu.py similarity index 93% rename from keras/layers/activations/leaky_relu.py rename to keras/src/layers/activations/leaky_relu.py index 4e2783fc08f8..988c383b7450 100644 --- a/keras/layers/activations/leaky_relu.py +++ b/keras/src/layers/activations/leaky_relu.py @@ -1,8 +1,8 @@ import warnings -from keras import activations -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import activations +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.LeakyReLU") diff --git a/keras/layers/activations/leaky_relu_test.py b/keras/src/layers/activations/leaky_relu_test.py similarity index 93% rename from keras/layers/activations/leaky_relu_test.py rename to keras/src/layers/activations/leaky_relu_test.py index 8665ce11926a..e42c10c9f539 100644 --- a/keras/layers/activations/leaky_relu_test.py +++ b/keras/src/layers/activations/leaky_relu_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras import testing -from keras.layers.activations import leaky_relu +from keras.src import testing +from keras.src.layers.activations import leaky_relu class LeakyReLUTest(testing.TestCase): diff --git a/keras/layers/activations/prelu.py b/keras/src/layers/activations/prelu.py similarity index 92% rename from keras/layers/activations/prelu.py rename to keras/src/layers/activations/prelu.py index 4f2a0a2e76e1..f46d974df824 100644 --- a/keras/layers/activations/prelu.py +++ b/keras/src/layers/activations/prelu.py @@ -1,10 +1,10 @@ -from keras import activations -from keras import constraints -from keras import initializers -from keras import regularizers -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer +from keras.src import activations +from keras.src import constraints +from keras.src import initializers +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer @keras_export("keras.layers.PReLU") diff --git a/keras/layers/activations/prelu_test.py b/keras/src/layers/activations/prelu_test.py similarity index 93% rename from keras/layers/activations/prelu_test.py rename to keras/src/layers/activations/prelu_test.py index 82785de104a5..63b4aee20617 100644 --- a/keras/layers/activations/prelu_test.py +++ b/keras/src/layers/activations/prelu_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras import testing -from keras.layers.activations import prelu +from keras.src import testing +from keras.src.layers.activations import prelu class PReLUTest(testing.TestCase): diff --git a/keras/layers/activations/relu.py b/keras/src/layers/activations/relu.py similarity index 95% rename from keras/layers/activations/relu.py rename to keras/src/layers/activations/relu.py index e7efa03d7950..8467d34fa0f5 100644 --- a/keras/layers/activations/relu.py +++ b/keras/src/layers/activations/relu.py @@ -1,6 +1,6 @@ -from keras import activations -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import activations +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.ReLU") diff --git a/keras/layers/activations/relu_test.py b/keras/src/layers/activations/relu_test.py similarity index 97% rename from keras/layers/activations/relu_test.py rename to keras/src/layers/activations/relu_test.py index c423f76a3f64..0c1f64d73a18 100644 --- a/keras/layers/activations/relu_test.py +++ b/keras/src/layers/activations/relu_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras import testing -from keras.layers.activations import relu +from keras.src import testing +from keras.src.layers.activations import relu class ReLUTest(testing.TestCase): diff --git a/keras/layers/activations/softmax.py b/keras/src/layers/activations/softmax.py similarity index 93% rename from keras/layers/activations/softmax.py rename to keras/src/layers/activations/softmax.py index e29f2ab7c755..c1fee581a89d 100644 --- a/keras/layers/activations/softmax.py +++ b/keras/src/layers/activations/softmax.py @@ -1,7 +1,7 @@ -from keras import activations -from keras import backend -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import activations +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer def _large_negative_number(dtype): diff --git a/keras/layers/activations/softmax_test.py b/keras/src/layers/activations/softmax_test.py similarity index 95% rename from keras/layers/activations/softmax_test.py rename to keras/src/layers/activations/softmax_test.py index fb3484799f50..e835a5f345d7 100644 --- a/keras/layers/activations/softmax_test.py +++ b/keras/src/layers/activations/softmax_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras import testing -from keras.layers.activations import softmax +from keras.src import testing +from keras.src.layers.activations import softmax class SoftmaxTest(testing.TestCase): diff --git a/keras/layers/attention/__init__.py b/keras/src/layers/attention/__init__.py similarity index 100% rename from keras/layers/attention/__init__.py rename to keras/src/layers/attention/__init__.py diff --git a/keras/layers/attention/additive_attention.py b/keras/src/layers/attention/additive_attention.py similarity index 96% rename from keras/layers/attention/additive_attention.py rename to keras/src/layers/attention/additive_attention.py index f0103673d52a..31cc7c93f296 100644 --- a/keras/layers/attention/additive_attention.py +++ b/keras/src/layers/attention/additive_attention.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.attention.attention import Attention +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.attention.attention import Attention @keras_export("keras.layers.AdditiveAttention") diff --git a/keras/layers/attention/additive_attention_test.py b/keras/src/layers/attention/additive_attention_test.py similarity index 98% rename from keras/layers/attention/additive_attention_test.py rename to keras/src/layers/attention/additive_attention_test.py index 1b37b20fca4f..51092c6c4918 100644 --- a/keras/layers/attention/additive_attention_test.py +++ b/keras/src/layers/attention/additive_attention_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import layers -from keras import testing +from keras.src import layers +from keras.src import testing class AdditiveAttentionTest(testing.TestCase): diff --git a/keras/layers/attention/attention.py b/keras/src/layers/attention/attention.py similarity index 98% rename from keras/layers/attention/attention.py rename to keras/src/layers/attention/attention.py index b42b4c05634b..48c6332ff157 100644 --- a/keras/layers/attention/attention.py +++ b/keras/src/layers/attention/attention.py @@ -1,7 +1,7 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.Attention") diff --git a/keras/layers/attention/attention_test.py b/keras/src/layers/attention/attention_test.py similarity index 99% rename from keras/layers/attention/attention_test.py rename to keras/src/layers/attention/attention_test.py index 102717994ea8..de8dba643405 100644 --- a/keras/layers/attention/attention_test.py +++ b/keras/src/layers/attention/attention_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras import layers -from keras import ops -from keras import testing +from keras.src import layers +from keras.src import ops +from keras.src import testing class AttentionTest(testing.TestCase): diff --git a/keras/layers/attention/grouped_query_attention.py b/keras/src/layers/attention/grouped_query_attention.py similarity index 97% rename from keras/layers/attention/grouped_query_attention.py rename to keras/src/layers/attention/grouped_query_attention.py index 79a59c8d7c34..fe09f0633178 100644 --- a/keras/layers/attention/grouped_query_attention.py +++ b/keras/src/layers/attention/grouped_query_attention.py @@ -1,12 +1,12 @@ -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras.api_export import keras_export -from keras.layers.activations.softmax import Softmax -from keras.layers.core.einsum_dense import EinsumDense -from keras.layers.layer import Layer -from keras.layers.regularization.dropout import Dropout +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.layer import Layer +from keras.src.layers.regularization.dropout import Dropout @keras_export("keras.layers.GroupQueryAttention") diff --git a/keras/layers/attention/grouped_query_attention_test.py b/keras/src/layers/attention/grouped_query_attention_test.py similarity index 98% rename from keras/layers/attention/grouped_query_attention_test.py rename to keras/src/layers/attention/grouped_query_attention_test.py index 8a926a93bb4f..58a00dc54df5 100644 --- a/keras/layers/attention/grouped_query_attention_test.py +++ b/keras/src/layers/attention/grouped_query_attention_test.py @@ -2,10 +2,10 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import initializers -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src import testing class GroupedQueryAttentionTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/attention/multi_head_attention.py b/keras/src/layers/attention/multi_head_attention.py similarity index 98% rename from keras/layers/attention/multi_head_attention.py rename to keras/src/layers/attention/multi_head_attention.py index c8a6d32589ec..5571d05683ae 100644 --- a/keras/layers/attention/multi_head_attention.py +++ b/keras/src/layers/attention/multi_head_attention.py @@ -4,16 +4,16 @@ import numpy as np -from keras import backend -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras.api_export import keras_export -from keras.layers.activations.softmax import Softmax -from keras.layers.core.einsum_dense import EinsumDense -from keras.layers.layer import Layer -from keras.layers.regularization.dropout import Dropout +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.layer import Layer +from keras.src.layers.regularization.dropout import Dropout @keras_export("keras.layers.MultiHeadAttention") diff --git a/keras/layers/attention/multi_head_attention_test.py b/keras/src/layers/attention/multi_head_attention_test.py similarity index 98% rename from keras/layers/attention/multi_head_attention_test.py rename to keras/src/layers/attention/multi_head_attention_test.py index 361e015c17dc..6feac6086676 100644 --- a/keras/layers/attention/multi_head_attention_test.py +++ b/keras/src/layers/attention/multi_head_attention_test.py @@ -4,13 +4,13 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import constraints -from keras import initializers -from keras import layers -from keras import models -from keras import saving -from keras import testing +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import layers +from keras.src import models +from keras.src import saving +from keras.src import testing class MultiHeadAttentionTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/convolutional/__init__.py b/keras/src/layers/convolutional/__init__.py similarity index 100% rename from keras/layers/convolutional/__init__.py rename to keras/src/layers/convolutional/__init__.py diff --git a/keras/layers/convolutional/base_conv.py b/keras/src/layers/convolutional/base_conv.py similarity index 97% rename from keras/layers/convolutional/base_conv.py rename to keras/src/layers/convolutional/base_conv.py index 689083d94003..96a66e58e40c 100644 --- a/keras/layers/convolutional/base_conv.py +++ b/keras/src/layers/convolutional/base_conv.py @@ -1,16 +1,16 @@ """Keras base class for convolution layers.""" -from keras import activations -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras.backend import standardize_data_format -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.ops.operation_utils import compute_conv_output_shape -from keras.utils.argument_validation import standardize_padding -from keras.utils.argument_validation import standardize_tuple +from keras.src import activations +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.backend import standardize_data_format +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.ops.operation_utils import compute_conv_output_shape +from keras.src.utils.argument_validation import standardize_padding +from keras.src.utils.argument_validation import standardize_tuple class BaseConv(Layer): diff --git a/keras/layers/convolutional/base_conv_transpose.py b/keras/src/layers/convolutional/base_conv_transpose.py similarity index 95% rename from keras/layers/convolutional/base_conv_transpose.py rename to keras/src/layers/convolutional/base_conv_transpose.py index ec5ab1e2396b..af0a68e3aded 100644 --- a/keras/layers/convolutional/base_conv_transpose.py +++ b/keras/src/layers/convolutional/base_conv_transpose.py @@ -1,18 +1,18 @@ """Keras base class for transpose convolution layers.""" -from keras import activations -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras.backend import standardize_data_format -from keras.backend.common.backend_utils import ( +from keras.src import activations +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.backend import standardize_data_format +from keras.src.backend.common.backend_utils import ( compute_conv_transpose_output_shape, ) -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.utils.argument_validation import standardize_padding -from keras.utils.argument_validation import standardize_tuple +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils.argument_validation import standardize_padding +from keras.src.utils.argument_validation import standardize_tuple class BaseConvTranspose(Layer): diff --git a/keras/layers/convolutional/base_depthwise_conv.py b/keras/src/layers/convolutional/base_depthwise_conv.py similarity index 95% rename from keras/layers/convolutional/base_depthwise_conv.py rename to keras/src/layers/convolutional/base_depthwise_conv.py index 123acb920166..b9f5d442d22a 100644 --- a/keras/layers/convolutional/base_depthwise_conv.py +++ b/keras/src/layers/convolutional/base_depthwise_conv.py @@ -1,16 +1,16 @@ """Keras base class for depthwise convolution layers.""" -from keras import activations -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras.backend import standardize_data_format -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.ops.operation_utils import compute_conv_output_shape -from keras.utils.argument_validation import standardize_padding -from keras.utils.argument_validation import standardize_tuple +from keras.src import activations +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.backend import standardize_data_format +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.ops.operation_utils import compute_conv_output_shape +from keras.src.utils.argument_validation import standardize_padding +from keras.src.utils.argument_validation import standardize_tuple class BaseDepthwiseConv(Layer): diff --git a/keras/layers/convolutional/base_separable_conv.py b/keras/src/layers/convolutional/base_separable_conv.py similarity index 95% rename from keras/layers/convolutional/base_separable_conv.py rename to keras/src/layers/convolutional/base_separable_conv.py index 99c45afaa1d1..5073b1813dea 100644 --- a/keras/layers/convolutional/base_separable_conv.py +++ b/keras/src/layers/convolutional/base_separable_conv.py @@ -1,16 +1,16 @@ """Keras abstract base layer for separable convolution.""" -from keras import activations -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras.backend import standardize_data_format -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.ops.operation_utils import compute_conv_output_shape -from keras.utils.argument_validation import standardize_padding -from keras.utils.argument_validation import standardize_tuple +from keras.src import activations +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.backend import standardize_data_format +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.ops.operation_utils import compute_conv_output_shape +from keras.src.utils.argument_validation import standardize_padding +from keras.src.utils.argument_validation import standardize_tuple class BaseSeparableConv(Layer): diff --git a/keras/layers/convolutional/conv1d.py b/keras/src/layers/convolutional/conv1d.py similarity index 98% rename from keras/layers/convolutional/conv1d.py rename to keras/src/layers/convolutional/conv1d.py index 37509452b6ef..f88ac59e5d68 100644 --- a/keras/layers/convolutional/conv1d.py +++ b/keras/src/layers/convolutional/conv1d.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.convolutional.base_conv import BaseConv +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv import BaseConv @keras_export(["keras.layers.Conv1D", "keras.layers.Convolution1D"]) diff --git a/keras/layers/convolutional/conv1d_transpose.py b/keras/src/layers/convolutional/conv1d_transpose.py similarity index 97% rename from keras/layers/convolutional/conv1d_transpose.py rename to keras/src/layers/convolutional/conv1d_transpose.py index f97482af0e04..6f79ca3dafef 100644 --- a/keras/layers/convolutional/conv1d_transpose.py +++ b/keras/src/layers/convolutional/conv1d_transpose.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.convolutional.base_conv_transpose import BaseConvTranspose +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv_transpose import BaseConvTranspose @keras_export( diff --git a/keras/layers/convolutional/conv2d.py b/keras/src/layers/convolutional/conv2d.py similarity index 98% rename from keras/layers/convolutional/conv2d.py rename to keras/src/layers/convolutional/conv2d.py index 77830c9225c0..791cb041fb4e 100644 --- a/keras/layers/convolutional/conv2d.py +++ b/keras/src/layers/convolutional/conv2d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.convolutional.base_conv import BaseConv +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv import BaseConv @keras_export(["keras.layers.Conv2D", "keras.layers.Convolution2D"]) diff --git a/keras/layers/convolutional/conv2d_transpose.py b/keras/src/layers/convolutional/conv2d_transpose.py similarity index 97% rename from keras/layers/convolutional/conv2d_transpose.py rename to keras/src/layers/convolutional/conv2d_transpose.py index a690e85f237b..cc4293c8db4e 100644 --- a/keras/layers/convolutional/conv2d_transpose.py +++ b/keras/src/layers/convolutional/conv2d_transpose.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.convolutional.base_conv_transpose import BaseConvTranspose +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv_transpose import BaseConvTranspose @keras_export( diff --git a/keras/layers/convolutional/conv3d.py b/keras/src/layers/convolutional/conv3d.py similarity index 98% rename from keras/layers/convolutional/conv3d.py rename to keras/src/layers/convolutional/conv3d.py index 63171a1eb2b2..516342ddbc04 100644 --- a/keras/layers/convolutional/conv3d.py +++ b/keras/src/layers/convolutional/conv3d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.convolutional.base_conv import BaseConv +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv import BaseConv @keras_export(["keras.layers.Conv3D", "keras.layers.Convolution3D"]) diff --git a/keras/layers/convolutional/conv3d_transpose.py b/keras/src/layers/convolutional/conv3d_transpose.py similarity index 97% rename from keras/layers/convolutional/conv3d_transpose.py rename to keras/src/layers/convolutional/conv3d_transpose.py index f514f3322382..c0f651147fcf 100644 --- a/keras/layers/convolutional/conv3d_transpose.py +++ b/keras/src/layers/convolutional/conv3d_transpose.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.convolutional.base_conv_transpose import BaseConvTranspose +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_conv_transpose import BaseConvTranspose @keras_export( diff --git a/keras/layers/convolutional/conv_test.py b/keras/src/layers/convolutional/conv_test.py similarity index 99% rename from keras/layers/convolutional/conv_test.py rename to keras/src/layers/convolutional/conv_test.py index 281e35a960e5..6bf9c6776dc2 100644 --- a/keras/layers/convolutional/conv_test.py +++ b/keras/src/layers/convolutional/conv_test.py @@ -5,12 +5,12 @@ from absl.testing import parameterized from numpy.lib.stride_tricks import as_strided -from keras import backend -from keras import constraints -from keras import layers -from keras import models -from keras import saving -from keras import testing +from keras.src import backend +from keras.src import constraints +from keras.src import layers +from keras.src import models +from keras.src import saving +from keras.src import testing def _same_padding(input_size, kernel_size, stride): diff --git a/keras/layers/convolutional/conv_transpose_test.py b/keras/src/layers/convolutional/conv_transpose_test.py similarity index 99% rename from keras/layers/convolutional/conv_transpose_test.py rename to keras/src/layers/convolutional/conv_transpose_test.py index 57252ae2fd73..1a3eb8f5f07e 100644 --- a/keras/layers/convolutional/conv_transpose_test.py +++ b/keras/src/layers/convolutional/conv_transpose_test.py @@ -2,16 +2,16 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import testing -from keras.backend.common.backend_utils import ( +from keras.src import backend +from keras.src import layers +from keras.src import testing +from keras.src.backend.common.backend_utils import ( _convert_conv_tranpose_padding_args_from_keras_to_torch, ) -from keras.backend.common.backend_utils import ( +from keras.src.backend.common.backend_utils import ( compute_conv_transpose_output_shape, ) -from keras.backend.common.backend_utils import ( +from keras.src.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_jax, ) diff --git a/keras/layers/convolutional/depthwise_conv1d.py b/keras/src/layers/convolutional/depthwise_conv1d.py similarity index 97% rename from keras/layers/convolutional/depthwise_conv1d.py rename to keras/src/layers/convolutional/depthwise_conv1d.py index 2d9db92d4160..02e5cc26e366 100644 --- a/keras/layers/convolutional/depthwise_conv1d.py +++ b/keras/src/layers/convolutional/depthwise_conv1d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.convolutional.base_depthwise_conv import BaseDepthwiseConv +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_depthwise_conv import BaseDepthwiseConv @keras_export("keras.layers.DepthwiseConv1D") diff --git a/keras/layers/convolutional/depthwise_conv2d.py b/keras/src/layers/convolutional/depthwise_conv2d.py similarity index 97% rename from keras/layers/convolutional/depthwise_conv2d.py rename to keras/src/layers/convolutional/depthwise_conv2d.py index 0fb14337c2ad..9a169af0a8ec 100644 --- a/keras/layers/convolutional/depthwise_conv2d.py +++ b/keras/src/layers/convolutional/depthwise_conv2d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.convolutional.base_depthwise_conv import BaseDepthwiseConv +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_depthwise_conv import BaseDepthwiseConv @keras_export("keras.layers.DepthwiseConv2D") diff --git a/keras/layers/convolutional/depthwise_conv_test.py b/keras/src/layers/convolutional/depthwise_conv_test.py similarity index 99% rename from keras/layers/convolutional/depthwise_conv_test.py rename to keras/src/layers/convolutional/depthwise_conv_test.py index 6aa0b8098287..a22967141aaa 100644 --- a/keras/layers/convolutional/depthwise_conv_test.py +++ b/keras/src/layers/convolutional/depthwise_conv_test.py @@ -3,8 +3,8 @@ from absl.testing import parameterized from numpy.lib.stride_tricks import as_strided -from keras import layers -from keras import testing +from keras.src import layers +from keras.src import testing def _same_padding(input_size, kernel_size, stride): diff --git a/keras/layers/convolutional/separable_conv1d.py b/keras/src/layers/convolutional/separable_conv1d.py similarity index 98% rename from keras/layers/convolutional/separable_conv1d.py rename to keras/src/layers/convolutional/separable_conv1d.py index 0c8cb101a306..2f71556750be 100644 --- a/keras/layers/convolutional/separable_conv1d.py +++ b/keras/src/layers/convolutional/separable_conv1d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.convolutional.base_separable_conv import BaseSeparableConv +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_separable_conv import BaseSeparableConv @keras_export( diff --git a/keras/layers/convolutional/separable_conv2d.py b/keras/src/layers/convolutional/separable_conv2d.py similarity index 98% rename from keras/layers/convolutional/separable_conv2d.py rename to keras/src/layers/convolutional/separable_conv2d.py index 010fe6c29e58..503d2a22b7d9 100644 --- a/keras/layers/convolutional/separable_conv2d.py +++ b/keras/src/layers/convolutional/separable_conv2d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.convolutional.base_separable_conv import BaseSeparableConv +from keras.src.api_export import keras_export +from keras.src.layers.convolutional.base_separable_conv import BaseSeparableConv @keras_export( diff --git a/keras/layers/convolutional/separable_conv_test.py b/keras/src/layers/convolutional/separable_conv_test.py similarity index 96% rename from keras/layers/convolutional/separable_conv_test.py rename to keras/src/layers/convolutional/separable_conv_test.py index 9d5f3e02925b..4d6fda73516d 100644 --- a/keras/layers/convolutional/separable_conv_test.py +++ b/keras/src/layers/convolutional/separable_conv_test.py @@ -2,12 +2,16 @@ import pytest from absl.testing import parameterized -from keras import layers -from keras import testing -from keras.layers.convolutional.conv_test import np_conv1d -from keras.layers.convolutional.conv_test import np_conv2d -from keras.layers.convolutional.depthwise_conv_test import np_depthwise_conv1d -from keras.layers.convolutional.depthwise_conv_test import np_depthwise_conv2d +from keras.src import layers +from keras.src import testing +from keras.src.layers.convolutional.conv_test import np_conv1d +from keras.src.layers.convolutional.conv_test import np_conv2d +from keras.src.layers.convolutional.depthwise_conv_test import ( + np_depthwise_conv1d, +) +from keras.src.layers.convolutional.depthwise_conv_test import ( + np_depthwise_conv2d, +) class SeparableConvBasicTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/core/__init__.py b/keras/src/layers/core/__init__.py similarity index 100% rename from keras/layers/core/__init__.py rename to keras/src/layers/core/__init__.py diff --git a/keras/layers/core/dense.py b/keras/src/layers/core/dense.py similarity index 98% rename from keras/layers/core/dense.py rename to keras/src/layers/core/dense.py index d7dffadc0548..fad22e6b755e 100644 --- a/keras/layers/core/dense.py +++ b/keras/src/layers/core/dense.py @@ -1,16 +1,16 @@ import ml_dtypes -from keras import activations -from keras import backend -from keras import constraints -from keras import dtype_policies -from keras import initializers -from keras import ops -from keras import quantizers -from keras import regularizers -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import ops +from keras.src import quantizers +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer @keras_export("keras.layers.Dense") diff --git a/keras/layers/core/dense_test.py b/keras/src/layers/core/dense_test.py similarity index 98% rename from keras/layers/core/dense_test.py rename to keras/src/layers/core/dense_test.py index e7161f7a3921..8a959543dc0f 100644 --- a/keras/layers/core/dense_test.py +++ b/keras/src/layers/core/dense_test.py @@ -4,17 +4,17 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import constraints -from keras import layers -from keras import models -from keras import ops -from keras import optimizers -from keras import random -from keras import saving -from keras import testing -from keras.backend.common import keras_tensor -from keras.export import export_lib +from keras.src import backend +from keras.src import constraints +from keras.src import layers +from keras.src import models +from keras.src import ops +from keras.src import optimizers +from keras.src import random +from keras.src import saving +from keras.src import testing +from keras.src.backend.common import keras_tensor +from keras.src.export import export_lib class DenseTest(testing.TestCase, parameterized.TestCase): @@ -556,7 +556,7 @@ def test_quantize_float8_dtype_argument(self): def test_quantize_float8(self): import ml_dtypes - from keras import quantizers + from keras.src import quantizers layer = layers.Dense(units=32) layer.build((None, 16)) diff --git a/keras/layers/core/einsum_dense.py b/keras/src/layers/core/einsum_dense.py similarity index 99% rename from keras/layers/core/einsum_dense.py rename to keras/src/layers/core/einsum_dense.py index 95f7b7274215..8fcecc17c9c4 100644 --- a/keras/layers/core/einsum_dense.py +++ b/keras/src/layers/core/einsum_dense.py @@ -4,17 +4,17 @@ import ml_dtypes import numpy as np -from keras import activations -from keras import backend -from keras import constraints -from keras import dtype_policies -from keras import initializers -from keras import ops -from keras import quantizers -from keras import regularizers -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import ops +from keras.src import quantizers +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer @keras_export("keras.layers.EinsumDense") diff --git a/keras/layers/core/einsum_dense_test.py b/keras/src/layers/core/einsum_dense_test.py similarity index 98% rename from keras/layers/core/einsum_dense_test.py rename to keras/src/layers/core/einsum_dense_test.py index dbf782e1e607..7ce83dd75f84 100644 --- a/keras/layers/core/einsum_dense_test.py +++ b/keras/src/layers/core/einsum_dense_test.py @@ -4,16 +4,16 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import constraints -from keras import layers -from keras import models -from keras import ops -from keras import optimizers -from keras import random -from keras import saving -from keras import testing -from keras.export import export_lib +from keras.src import backend +from keras.src import constraints +from keras.src import layers +from keras.src import models +from keras.src import ops +from keras.src import optimizers +from keras.src import random +from keras.src import saving +from keras.src import testing +from keras.src.export import export_lib class EinsumDenseTest(testing.TestCase, parameterized.TestCase): @@ -659,7 +659,7 @@ def test_quantize_float8_dtype_argument(self): def test_quantize_float8(self): import ml_dtypes - from keras import quantizers + from keras.src import quantizers layer = layers.EinsumDense( "ab,bc->ac", diff --git a/keras/layers/core/embedding.py b/keras/src/layers/core/embedding.py similarity index 98% rename from keras/layers/core/embedding.py rename to keras/src/layers/core/embedding.py index d7afb529bb9e..03a9a61ee15b 100644 --- a/keras/layers/core/embedding.py +++ b/keras/src/layers/core/embedding.py @@ -1,14 +1,14 @@ import warnings -from keras import backend -from keras import constraints -from keras import dtype_policies -from keras import initializers -from keras import ops -from keras import quantizers -from keras import regularizers -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import backend +from keras.src import constraints +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import ops +from keras.src import quantizers +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.Embedding") diff --git a/keras/layers/core/embedding_test.py b/keras/src/layers/core/embedding_test.py similarity index 98% rename from keras/layers/core/embedding_test.py rename to keras/src/layers/core/embedding_test.py index 2663ee1fce13..6c0af85a3095 100644 --- a/keras/layers/core/embedding_test.py +++ b/keras/src/layers/core/embedding_test.py @@ -4,14 +4,14 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import constraints -from keras import layers -from keras import models -from keras import ops -from keras import saving -from keras.export import export_lib -from keras.testing import test_case +from keras.src import backend +from keras.src import constraints +from keras.src import layers +from keras.src import models +from keras.src import ops +from keras.src import saving +from keras.src.export import export_lib +from keras.src.testing import test_case class EmbeddingTest(test_case.TestCase, parameterized.TestCase): diff --git a/keras/layers/core/identity.py b/keras/src/layers/core/identity.py similarity index 80% rename from keras/layers/core/identity.py rename to keras/src/layers/core/identity.py index b1e1cd50912f..1fd329c3703f 100644 --- a/keras/layers/core/identity.py +++ b/keras/src/layers/core/identity.py @@ -1,7 +1,7 @@ -from keras import tree -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.layers.layer import Layer +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.layers.layer import Layer @keras_export("keras.layers.Identity") diff --git a/keras/layers/core/identity_test.py b/keras/src/layers/core/identity_test.py similarity index 91% rename from keras/layers/core/identity_test.py rename to keras/src/layers/core/identity_test.py index 456a292e1b01..1be760f531d6 100644 --- a/keras/layers/core/identity_test.py +++ b/keras/src/layers/core/identity_test.py @@ -1,9 +1,9 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class IdentityTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/core/input_layer.py b/keras/src/layers/core/input_layer.py similarity index 97% rename from keras/layers/core/input_layer.py rename to keras/src/layers/core/input_layer.py index 974681d57a44..6b4968c0d9b2 100644 --- a/keras/layers/core/input_layer.py +++ b/keras/src/layers/core/input_layer.py @@ -1,9 +1,9 @@ import warnings -from keras import backend -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.ops.node import Node +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.ops.node import Node @keras_export("keras.layers.InputLayer") diff --git a/keras/layers/core/input_layer_test.py b/keras/src/layers/core/input_layer_test.py similarity index 96% rename from keras/layers/core/input_layer_test.py rename to keras/src/layers/core/input_layer_test.py index 2823486b155f..c75ec4ac9463 100644 --- a/keras/layers/core/input_layer_test.py +++ b/keras/src/layers/core/input_layer_test.py @@ -1,10 +1,10 @@ import numpy as np from absl.testing import parameterized -from keras import backend -from keras import testing -from keras.backend import KerasTensor -from keras.layers import InputLayer +from keras.src import backend +from keras.src import testing +from keras.src.backend import KerasTensor +from keras.src.layers import InputLayer class InputLayerTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/core/lambda_layer.py b/keras/src/layers/core/lambda_layer.py similarity index 97% rename from keras/layers/core/lambda_layer.py rename to keras/src/layers/core/lambda_layer.py index 6ffb16ca7782..9980c6c35799 100644 --- a/keras/layers/core/lambda_layer.py +++ b/keras/src/layers/core/lambda_layer.py @@ -1,12 +1,12 @@ import inspect import types -from keras import backend -from keras import tree -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.saving import serialization_lib -from keras.utils import python_utils +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib +from keras.src.utils import python_utils @keras_export("keras.layers.Lambda") diff --git a/keras/layers/core/lambda_layer_test.py b/keras/src/layers/core/lambda_layer_test.py similarity index 97% rename from keras/layers/core/lambda_layer_test.py rename to keras/src/layers/core/lambda_layer_test.py index d65cf8bbaf65..1f80bcb0206b 100644 --- a/keras/layers/core/lambda_layer_test.py +++ b/keras/src/layers/core/lambda_layer_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import layers -from keras import ops -from keras import testing +from keras.src import layers +from keras.src import ops +from keras.src import testing class LambdaTest(testing.TestCase): diff --git a/keras/layers/core/masking.py b/keras/src/layers/core/masking.py similarity index 94% rename from keras/layers/core/masking.py rename to keras/src/layers/core/masking.py index f7a05eb48b54..8658cdb2896b 100644 --- a/keras/layers/core/masking.py +++ b/keras/src/layers/core/masking.py @@ -1,7 +1,7 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.Masking") diff --git a/keras/layers/core/masking_test.py b/keras/src/layers/core/masking_test.py similarity index 94% rename from keras/layers/core/masking_test.py rename to keras/src/layers/core/masking_test.py index 115783dbce24..2e17f047c78b 100644 --- a/keras/layers/core/masking_test.py +++ b/keras/src/layers/core/masking_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import layers -from keras import models -from keras import testing +from keras.src import layers +from keras.src import models +from keras.src import testing class MaskingTest(testing.TestCase): diff --git a/keras/layers/core/wrapper.py b/keras/src/layers/core/wrapper.py similarity index 91% rename from keras/layers/core/wrapper.py rename to keras/src/layers/core/wrapper.py index ee7f1347791c..ee98a70a0291 100644 --- a/keras/layers/core/wrapper.py +++ b/keras/src/layers/core/wrapper.py @@ -1,6 +1,6 @@ -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.saving import serialization_lib +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib @keras_export("keras.layers.Wrapper") diff --git a/keras/layers/core/wrapper_test.py b/keras/src/layers/core/wrapper_test.py similarity index 96% rename from keras/layers/core/wrapper_test.py rename to keras/src/layers/core/wrapper_test.py index a01194591eec..9302ca784240 100644 --- a/keras/layers/core/wrapper_test.py +++ b/keras/src/layers/core/wrapper_test.py @@ -1,8 +1,8 @@ import pytest -from keras import layers -from keras import ops -from keras import testing +from keras.src import layers +from keras.src import ops +from keras.src import testing class ExampleWrapper(layers.Wrapper): diff --git a/keras/layers/input_spec.py b/keras/src/layers/input_spec.py similarity index 98% rename from keras/layers/input_spec.py rename to keras/src/layers/input_spec.py index 6f47bd36aa80..72084d5cbdec 100644 --- a/keras/layers/input_spec.py +++ b/keras/src/layers/input_spec.py @@ -1,6 +1,6 @@ -from keras import backend -from keras import tree -from keras.api_export import keras_export +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export @keras_export(["keras.InputSpec", "keras.layers.InputSpec"]) diff --git a/keras/layers/layer.py b/keras/src/layers/layer.py similarity index 98% rename from keras/layers/layer.py rename to keras/src/layers/layer.py index db91e349b987..eb251122b930 100644 --- a/keras/layers/layer.py +++ b/keras/src/layers/layer.py @@ -21,34 +21,34 @@ import warnings from functools import wraps -from keras import backend -from keras import constraints -from keras import dtype_policies -from keras import initializers -from keras import regularizers -from keras import tree -from keras import utils -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.backend.common import global_state -from keras.backend.common.name_scope import current_path -from keras.distribution import distribution_lib -from keras.layers import input_spec -from keras.metrics.metric import Metric -from keras.ops.operation import Operation -from keras.utils import python_utils -from keras.utils import summary_utils -from keras.utils import traceback_utils -from keras.utils import tracking +from keras.src import backend +from keras.src import constraints +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import regularizers +from keras.src import tree +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend.common import global_state +from keras.src.backend.common.name_scope import current_path +from keras.src.distribution import distribution_lib +from keras.src.layers import input_spec +from keras.src.metrics.metric import Metric +from keras.src.ops.operation import Operation +from keras.src.utils import python_utils +from keras.src.utils import summary_utils +from keras.src.utils import traceback_utils +from keras.src.utils import tracking if backend.backend() == "tensorflow": - from keras.backend.tensorflow.layer import TFLayer as BackendLayer + from keras.src.backend.tensorflow.layer import TFLayer as BackendLayer elif backend.backend() == "jax": - from keras.backend.jax.layer import JaxLayer as BackendLayer + from keras.src.backend.jax.layer import JaxLayer as BackendLayer elif backend.backend() == "torch": - from keras.backend.torch.layer import TorchLayer as BackendLayer + from keras.src.backend.torch.layer import TorchLayer as BackendLayer elif backend.backend() == "numpy": - from keras.backend.numpy.layer import NumpyLayer as BackendLayer + from keras.src.backend.numpy.layer import NumpyLayer as BackendLayer else: raise RuntimeError( f"Backend '{backend.backend()}' must implement a layer mixin class." diff --git a/keras/layers/layer_test.py b/keras/src/layers/layer_test.py similarity index 99% rename from keras/layers/layer_test.py rename to keras/src/layers/layer_test.py index 0e8ca4548df5..ad274da84af2 100644 --- a/keras/layers/layer_test.py +++ b/keras/src/layers/layer_test.py @@ -1,13 +1,13 @@ import numpy as np import pytest -from keras import backend -from keras import dtype_policies -from keras import layers -from keras import metrics -from keras import models -from keras import ops -from keras import testing +from keras.src import backend +from keras.src import dtype_policies +from keras.src import layers +from keras.src import metrics +from keras.src import models +from keras.src import ops +from keras.src import testing class LayerTest(testing.TestCase): diff --git a/keras/layers/merging/__init__.py b/keras/src/layers/merging/__init__.py similarity index 100% rename from keras/layers/merging/__init__.py rename to keras/src/layers/merging/__init__.py diff --git a/keras/layers/merging/add.py b/keras/src/layers/merging/add.py similarity index 94% rename from keras/layers/merging/add.py rename to keras/src/layers/merging/add.py index 968f7f374017..bf5f1b2a6aac 100644 --- a/keras/layers/merging/add.py +++ b/keras/src/layers/merging/add.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.merging.base_merge import Merge +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge @keras_export("keras.layers.Add") diff --git a/keras/layers/merging/average.py b/keras/src/layers/merging/average.py similarity index 94% rename from keras/layers/merging/average.py rename to keras/src/layers/merging/average.py index 5763c99a6dd1..f90f75beead0 100644 --- a/keras/layers/merging/average.py +++ b/keras/src/layers/merging/average.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.merging.base_merge import Merge +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge @keras_export("keras.layers.Average") diff --git a/keras/layers/merging/base_merge.py b/keras/src/layers/merging/base_merge.py similarity index 98% rename from keras/layers/merging/base_merge.py rename to keras/src/layers/merging/base_merge.py index ae00f8121f62..69591e8cd074 100644 --- a/keras/layers/merging/base_merge.py +++ b/keras/src/layers/merging/base_merge.py @@ -1,7 +1,7 @@ -from keras import backend -from keras import ops -from keras.backend.common.keras_tensor import KerasTensor -from keras.layers.layer import Layer +from keras.src import backend +from keras.src import ops +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.layer import Layer class Merge(Layer): diff --git a/keras/layers/merging/concatenate.py b/keras/src/layers/merging/concatenate.py similarity index 98% rename from keras/layers/merging/concatenate.py rename to keras/src/layers/merging/concatenate.py index 37f9536d8d57..9c1d26ae8d5f 100644 --- a/keras/layers/merging/concatenate.py +++ b/keras/src/layers/merging/concatenate.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.merging.base_merge import Merge +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge @keras_export("keras.layers.Concatenate") diff --git a/keras/layers/merging/dot.py b/keras/src/layers/merging/dot.py similarity index 98% rename from keras/layers/merging/dot.py rename to keras/src/layers/merging/dot.py index 944ebac9ad65..e580269bef67 100644 --- a/keras/layers/merging/dot.py +++ b/keras/src/layers/merging/dot.py @@ -1,7 +1,7 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.merging.base_merge import Merge -from keras.utils.numerical_utils import normalize +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge +from keras.src.utils.numerical_utils import normalize def batch_dot(x, y, axes=None): diff --git a/keras/layers/merging/maximum.py b/keras/src/layers/merging/maximum.py similarity index 94% rename from keras/layers/merging/maximum.py rename to keras/src/layers/merging/maximum.py index fa71c1314ce1..47734a8470d5 100644 --- a/keras/layers/merging/maximum.py +++ b/keras/src/layers/merging/maximum.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.merging.base_merge import Merge +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge @keras_export("keras.layers.Maximum") diff --git a/keras/layers/merging/merging_test.py b/keras/src/layers/merging/merging_test.py similarity index 98% rename from keras/layers/merging/merging_test.py rename to keras/src/layers/merging/merging_test.py index 1518ae23d3d7..1419dc855b83 100644 --- a/keras/layers/merging/merging_test.py +++ b/keras/src/layers/merging/merging_test.py @@ -2,10 +2,10 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import models -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import testing def np_dot(a, b, axes): diff --git a/keras/layers/merging/minimum.py b/keras/src/layers/merging/minimum.py similarity index 94% rename from keras/layers/merging/minimum.py rename to keras/src/layers/merging/minimum.py index bca89abd609f..19137f08a5b3 100644 --- a/keras/layers/merging/minimum.py +++ b/keras/src/layers/merging/minimum.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.merging.base_merge import Merge +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge @keras_export("keras.layers.Minimum") diff --git a/keras/layers/merging/multiply.py b/keras/src/layers/merging/multiply.py similarity index 94% rename from keras/layers/merging/multiply.py rename to keras/src/layers/merging/multiply.py index 929b51191603..d908429d1c5b 100644 --- a/keras/layers/merging/multiply.py +++ b/keras/src/layers/merging/multiply.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.merging.base_merge import Merge +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge @keras_export("keras.layers.Multiply") diff --git a/keras/layers/merging/subtract.py b/keras/src/layers/merging/subtract.py similarity index 95% rename from keras/layers/merging/subtract.py rename to keras/src/layers/merging/subtract.py index 4dc7839172b0..78036adaf233 100644 --- a/keras/layers/merging/subtract.py +++ b/keras/src/layers/merging/subtract.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.merging.base_merge import Merge +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge @keras_export("keras.layers.Subtract") diff --git a/keras/layers/normalization/__init__.py b/keras/src/layers/normalization/__init__.py similarity index 100% rename from keras/layers/normalization/__init__.py rename to keras/src/layers/normalization/__init__.py diff --git a/keras/layers/normalization/batch_normalization.py b/keras/src/layers/normalization/batch_normalization.py similarity index 97% rename from keras/layers/normalization/batch_normalization.py rename to keras/src/layers/normalization/batch_normalization.py index d752e8d1a64f..ecbd1a453926 100644 --- a/keras/layers/normalization/batch_normalization.py +++ b/keras/src/layers/normalization/batch_normalization.py @@ -1,12 +1,12 @@ -from keras import backend -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras.api_export import keras_export -from keras.backend import standardize_dtype -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.backend import standardize_dtype +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer @keras_export("keras.layers.BatchNormalization") diff --git a/keras/layers/normalization/batch_normalization_test.py b/keras/src/layers/normalization/batch_normalization_test.py similarity index 97% rename from keras/layers/normalization/batch_normalization_test.py rename to keras/src/layers/normalization/batch_normalization_test.py index 1a1d1b10c96d..b0ccdfd0288f 100644 --- a/keras/layers/normalization/batch_normalization_test.py +++ b/keras/src/layers/normalization/batch_normalization_test.py @@ -2,12 +2,12 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import ops -from keras import testing -from keras.losses import MeanSquaredError -from keras.models import Model +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src import testing +from keras.src.losses import MeanSquaredError +from keras.src.models import Model class BatchNormalizationTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/normalization/group_normalization.py b/keras/src/layers/normalization/group_normalization.py similarity index 96% rename from keras/layers/normalization/group_normalization.py rename to keras/src/layers/normalization/group_normalization.py index a04b672c358a..f70fb69f3ed7 100644 --- a/keras/layers/normalization/group_normalization.py +++ b/keras/src/layers/normalization/group_normalization.py @@ -1,10 +1,10 @@ -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer @keras_export("keras.layers.GroupNormalization") diff --git a/keras/layers/normalization/group_normalization_test.py b/keras/src/layers/normalization/group_normalization_test.py similarity index 97% rename from keras/layers/normalization/group_normalization_test.py rename to keras/src/layers/normalization/group_normalization_test.py index c836b8930643..76e4eae280a8 100644 --- a/keras/layers/normalization/group_normalization_test.py +++ b/keras/src/layers/normalization/group_normalization_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras import constraints -from keras import layers -from keras import regularizers -from keras import testing +from keras.src import constraints +from keras.src import layers +from keras.src import regularizers +from keras.src import testing class GroupNormalizationTest(testing.TestCase): diff --git a/keras/layers/normalization/layer_normalization.py b/keras/src/layers/normalization/layer_normalization.py similarity index 97% rename from keras/layers/normalization/layer_normalization.py rename to keras/src/layers/normalization/layer_normalization.py index c26d008b0f69..854a071932b1 100644 --- a/keras/layers/normalization/layer_normalization.py +++ b/keras/src/layers/normalization/layer_normalization.py @@ -1,9 +1,9 @@ -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.LayerNormalization") diff --git a/keras/layers/normalization/layer_normalization_test.py b/keras/src/layers/normalization/layer_normalization_test.py similarity index 96% rename from keras/layers/normalization/layer_normalization_test.py rename to keras/src/layers/normalization/layer_normalization_test.py index 87550ff7aa3a..6afbd5435618 100644 --- a/keras/layers/normalization/layer_normalization_test.py +++ b/keras/src/layers/normalization/layer_normalization_test.py @@ -1,11 +1,11 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras import ops -from keras import regularizers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src import regularizers +from keras.src import testing class LayerNormalizationTest(testing.TestCase): diff --git a/keras/layers/normalization/spectral_normalization.py b/keras/src/layers/normalization/spectral_normalization.py similarity index 94% rename from keras/layers/normalization/spectral_normalization.py rename to keras/src/layers/normalization/spectral_normalization.py index 84471125b49d..727d6bb58dbd 100644 --- a/keras/layers/normalization/spectral_normalization.py +++ b/keras/src/layers/normalization/spectral_normalization.py @@ -1,9 +1,9 @@ -from keras import initializers -from keras import ops -from keras.api_export import keras_export -from keras.layers import Wrapper -from keras.layers.input_spec import InputSpec -from keras.utils.numerical_utils import normalize +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers import Wrapper +from keras.src.layers.input_spec import InputSpec +from keras.src.utils.numerical_utils import normalize @keras_export("keras.layers.SpectralNormalization") diff --git a/keras/layers/normalization/spectral_normalization_test.py b/keras/src/layers/normalization/spectral_normalization_test.py similarity index 94% rename from keras/layers/normalization/spectral_normalization_test.py rename to keras/src/layers/normalization/spectral_normalization_test.py index 632edd20ecbe..b3cc47d8d9f0 100644 --- a/keras/layers/normalization/spectral_normalization_test.py +++ b/keras/src/layers/normalization/spectral_normalization_test.py @@ -1,11 +1,11 @@ import numpy as np import pytest -from keras import backend -from keras import initializers -from keras import layers -from keras import models -from keras import testing +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src import models +from keras.src import testing class SpectralNormalizationTest(testing.TestCase): diff --git a/keras/layers/normalization/unit_normalization.py b/keras/src/layers/normalization/unit_normalization.py similarity index 93% rename from keras/layers/normalization/unit_normalization.py rename to keras/src/layers/normalization/unit_normalization.py index 09b5a34e2721..1dbf97c74fe1 100644 --- a/keras/layers/normalization/unit_normalization.py +++ b/keras/src/layers/normalization/unit_normalization.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.UnitNormalization") diff --git a/keras/layers/normalization/unit_normalization_test.py b/keras/src/layers/normalization/unit_normalization_test.py similarity index 95% rename from keras/layers/normalization/unit_normalization_test.py rename to keras/src/layers/normalization/unit_normalization_test.py index 591e03311ad2..ea9201fb7e68 100644 --- a/keras/layers/normalization/unit_normalization_test.py +++ b/keras/src/layers/normalization/unit_normalization_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing def squared_l2_norm(x): diff --git a/keras/layers/pooling/__init__.py b/keras/src/layers/pooling/__init__.py similarity index 100% rename from keras/layers/pooling/__init__.py rename to keras/src/layers/pooling/__init__.py diff --git a/keras/layers/pooling/average_pooling1d.py b/keras/src/layers/pooling/average_pooling1d.py similarity index 96% rename from keras/layers/pooling/average_pooling1d.py rename to keras/src/layers/pooling/average_pooling1d.py index fb5ee068bfb1..43b91f0f2ace 100644 --- a/keras/layers/pooling/average_pooling1d.py +++ b/keras/src/layers/pooling/average_pooling1d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.pooling.base_pooling import BasePooling +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.AveragePooling1D", "keras.layers.AvgPool1D"]) diff --git a/keras/layers/pooling/average_pooling2d.py b/keras/src/layers/pooling/average_pooling2d.py similarity index 97% rename from keras/layers/pooling/average_pooling2d.py rename to keras/src/layers/pooling/average_pooling2d.py index 16bb8ee250f6..778c40191f5d 100644 --- a/keras/layers/pooling/average_pooling2d.py +++ b/keras/src/layers/pooling/average_pooling2d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.pooling.base_pooling import BasePooling +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"]) diff --git a/keras/layers/pooling/average_pooling3d.py b/keras/src/layers/pooling/average_pooling3d.py similarity index 96% rename from keras/layers/pooling/average_pooling3d.py rename to keras/src/layers/pooling/average_pooling3d.py index 9d6da60a86e1..ed4d2269459c 100644 --- a/keras/layers/pooling/average_pooling3d.py +++ b/keras/src/layers/pooling/average_pooling3d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.pooling.base_pooling import BasePooling +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.AveragePooling3D", "keras.layers.AvgPool3D"]) diff --git a/keras/layers/pooling/average_pooling_test.py b/keras/src/layers/pooling/average_pooling_test.py similarity index 99% rename from keras/layers/pooling/average_pooling_test.py rename to keras/src/layers/pooling/average_pooling_test.py index 6256876b6d6a..ae76d2c77f79 100644 --- a/keras/layers/pooling/average_pooling_test.py +++ b/keras/src/layers/pooling/average_pooling_test.py @@ -3,9 +3,9 @@ from absl.testing import parameterized from numpy.lib.stride_tricks import as_strided -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing def _same_padding(input_size, pool_size, stride): diff --git a/keras/layers/pooling/base_global_pooling.py b/keras/src/layers/pooling/base_global_pooling.py similarity index 91% rename from keras/layers/pooling/base_global_pooling.py rename to keras/src/layers/pooling/base_global_pooling.py index da7a0fcd9d42..260e1d8eba37 100644 --- a/keras/layers/pooling/base_global_pooling.py +++ b/keras/src/layers/pooling/base_global_pooling.py @@ -1,6 +1,6 @@ -from keras import backend -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer +from keras.src import backend +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer class BaseGlobalPooling(Layer): diff --git a/keras/layers/pooling/base_pooling.py b/keras/src/layers/pooling/base_pooling.py similarity index 89% rename from keras/layers/pooling/base_pooling.py rename to keras/src/layers/pooling/base_pooling.py index 85751086cc96..e2c85394f731 100644 --- a/keras/layers/pooling/base_pooling.py +++ b/keras/src/layers/pooling/base_pooling.py @@ -1,9 +1,9 @@ -from keras import backend -from keras import ops -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.ops.operation_utils import compute_pooling_output_shape -from keras.utils import argument_validation +from keras.src import backend +from keras.src import ops +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.ops.operation_utils import compute_pooling_output_shape +from keras.src.utils import argument_validation class BasePooling(Layer): diff --git a/keras/layers/pooling/global_average_pooling1d.py b/keras/src/layers/pooling/global_average_pooling1d.py similarity index 94% rename from keras/layers/pooling/global_average_pooling1d.py rename to keras/src/layers/pooling/global_average_pooling1d.py index f5b4d0a0886c..6db5fb923c8c 100644 --- a/keras/layers/pooling/global_average_pooling1d.py +++ b/keras/src/layers/pooling/global_average_pooling1d.py @@ -1,7 +1,7 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling @keras_export( diff --git a/keras/layers/pooling/global_average_pooling2d.py b/keras/src/layers/pooling/global_average_pooling2d.py similarity index 94% rename from keras/layers/pooling/global_average_pooling2d.py rename to keras/src/layers/pooling/global_average_pooling2d.py index d6147ea21f04..1536c3c302e8 100644 --- a/keras/layers/pooling/global_average_pooling2d.py +++ b/keras/src/layers/pooling/global_average_pooling2d.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling @keras_export( diff --git a/keras/layers/pooling/global_average_pooling3d.py b/keras/src/layers/pooling/global_average_pooling3d.py similarity index 94% rename from keras/layers/pooling/global_average_pooling3d.py rename to keras/src/layers/pooling/global_average_pooling3d.py index 8fa05eea5a87..14ffc5bfc4d0 100644 --- a/keras/layers/pooling/global_average_pooling3d.py +++ b/keras/src/layers/pooling/global_average_pooling3d.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling @keras_export( diff --git a/keras/layers/pooling/global_average_pooling_test.py b/keras/src/layers/pooling/global_average_pooling_test.py similarity index 99% rename from keras/layers/pooling/global_average_pooling_test.py rename to keras/src/layers/pooling/global_average_pooling_test.py index 4069abc75940..868601c31e65 100644 --- a/keras/layers/pooling/global_average_pooling_test.py +++ b/keras/src/layers/pooling/global_average_pooling_test.py @@ -2,8 +2,8 @@ import pytest from absl.testing import parameterized -from keras import layers -from keras import testing +from keras.src import layers +from keras.src import testing @pytest.mark.requires_trainable_backend diff --git a/keras/layers/pooling/global_max_pooling1d.py b/keras/src/layers/pooling/global_max_pooling1d.py similarity index 93% rename from keras/layers/pooling/global_max_pooling1d.py rename to keras/src/layers/pooling/global_max_pooling1d.py index 2956474cb004..7c6d9ff79692 100644 --- a/keras/layers/pooling/global_max_pooling1d.py +++ b/keras/src/layers/pooling/global_max_pooling1d.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling @keras_export( diff --git a/keras/layers/pooling/global_max_pooling2d.py b/keras/src/layers/pooling/global_max_pooling2d.py similarity index 94% rename from keras/layers/pooling/global_max_pooling2d.py rename to keras/src/layers/pooling/global_max_pooling2d.py index 66a117cd58f0..289ebe0a87d6 100644 --- a/keras/layers/pooling/global_max_pooling2d.py +++ b/keras/src/layers/pooling/global_max_pooling2d.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling @keras_export( diff --git a/keras/layers/pooling/global_max_pooling3d.py b/keras/src/layers/pooling/global_max_pooling3d.py similarity index 94% rename from keras/layers/pooling/global_max_pooling3d.py rename to keras/src/layers/pooling/global_max_pooling3d.py index fb4598356d8f..07e1eb065bc7 100644 --- a/keras/layers/pooling/global_max_pooling3d.py +++ b/keras/src/layers/pooling/global_max_pooling3d.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.pooling.base_global_pooling import BaseGlobalPooling +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling @keras_export( diff --git a/keras/layers/pooling/global_max_pooling_test.py b/keras/src/layers/pooling/global_max_pooling_test.py similarity index 98% rename from keras/layers/pooling/global_max_pooling_test.py rename to keras/src/layers/pooling/global_max_pooling_test.py index 6b413fd17461..b2d5cb6ada39 100644 --- a/keras/layers/pooling/global_max_pooling_test.py +++ b/keras/src/layers/pooling/global_max_pooling_test.py @@ -2,8 +2,8 @@ import pytest from absl.testing import parameterized -from keras import layers -from keras import testing +from keras.src import layers +from keras.src import testing @pytest.mark.requires_trainable_backend diff --git a/keras/layers/pooling/max_pooling1d.py b/keras/src/layers/pooling/max_pooling1d.py similarity index 96% rename from keras/layers/pooling/max_pooling1d.py rename to keras/src/layers/pooling/max_pooling1d.py index 74505b450e27..3636a984c764 100644 --- a/keras/layers/pooling/max_pooling1d.py +++ b/keras/src/layers/pooling/max_pooling1d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.pooling.base_pooling import BasePooling +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"]) diff --git a/keras/layers/pooling/max_pooling2d.py b/keras/src/layers/pooling/max_pooling2d.py similarity index 97% rename from keras/layers/pooling/max_pooling2d.py rename to keras/src/layers/pooling/max_pooling2d.py index 8c0e4ee65204..d2189f9e841a 100644 --- a/keras/layers/pooling/max_pooling2d.py +++ b/keras/src/layers/pooling/max_pooling2d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.pooling.base_pooling import BasePooling +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.MaxPooling2D", "keras.layers.MaxPool2D"]) diff --git a/keras/layers/pooling/max_pooling3d.py b/keras/src/layers/pooling/max_pooling3d.py similarity index 96% rename from keras/layers/pooling/max_pooling3d.py rename to keras/src/layers/pooling/max_pooling3d.py index 9ddbb38c0f07..225df65fb1e2 100644 --- a/keras/layers/pooling/max_pooling3d.py +++ b/keras/src/layers/pooling/max_pooling3d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.pooling.base_pooling import BasePooling +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"]) diff --git a/keras/layers/pooling/max_pooling_test.py b/keras/src/layers/pooling/max_pooling_test.py similarity index 99% rename from keras/layers/pooling/max_pooling_test.py rename to keras/src/layers/pooling/max_pooling_test.py index 418a77f8327c..be1adb7ecfdb 100644 --- a/keras/layers/pooling/max_pooling_test.py +++ b/keras/src/layers/pooling/max_pooling_test.py @@ -3,8 +3,8 @@ from absl.testing import parameterized from numpy.lib.stride_tricks import as_strided -from keras import layers -from keras import testing +from keras.src import layers +from keras.src import testing def _same_padding(input_size, pool_size, stride): diff --git a/keras/layers/preprocessing/__init__.py b/keras/src/layers/preprocessing/__init__.py similarity index 100% rename from keras/layers/preprocessing/__init__.py rename to keras/src/layers/preprocessing/__init__.py diff --git a/keras/layers/preprocessing/audio_preprocessing.py b/keras/src/layers/preprocessing/audio_preprocessing.py similarity index 99% rename from keras/layers/preprocessing/audio_preprocessing.py rename to keras/src/layers/preprocessing/audio_preprocessing.py index 4c22977a8720..f91a4ccd8ceb 100644 --- a/keras/layers/preprocessing/audio_preprocessing.py +++ b/keras/src/layers/preprocessing/audio_preprocessing.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer # mel spectrum constants. _MEL_BREAK_FREQUENCY_HERTZ = 700.0 diff --git a/keras/layers/preprocessing/audio_preprocessing_test.py b/keras/src/layers/preprocessing/audio_preprocessing_test.py similarity index 98% rename from keras/layers/preprocessing/audio_preprocessing_test.py rename to keras/src/layers/preprocessing/audio_preprocessing_test.py index 3f45fcc3f1de..745794e11cd9 100644 --- a/keras/layers/preprocessing/audio_preprocessing_test.py +++ b/keras/src/layers/preprocessing/audio_preprocessing_test.py @@ -3,8 +3,8 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import layers -from keras import testing +from keras.src import layers +from keras.src import testing class MelSpectrogramTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/preprocessing/category_encoding.py b/keras/src/layers/preprocessing/category_encoding.py similarity index 97% rename from keras/layers/preprocessing/category_encoding.py rename to keras/src/layers/preprocessing/category_encoding.py index 2feb929dee2c..bf30752eaedd 100644 --- a/keras/layers/preprocessing/category_encoding.py +++ b/keras/src/layers/preprocessing/category_encoding.py @@ -1,7 +1,7 @@ -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.layers.preprocessing.tf_data_layer import TFDataLayer -from keras.utils import backend_utils +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.utils import backend_utils @keras_export("keras.layers.CategoryEncoding") diff --git a/keras/layers/preprocessing/category_encoding_test.py b/keras/src/layers/preprocessing/category_encoding_test.py similarity index 99% rename from keras/layers/preprocessing/category_encoding_test.py rename to keras/src/layers/preprocessing/category_encoding_test.py index da55f9d35297..5ac8ebff7f07 100644 --- a/keras/layers/preprocessing/category_encoding_test.py +++ b/keras/src/layers/preprocessing/category_encoding_test.py @@ -2,9 +2,9 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing TEST_CASES = [{"testcase_name": "dense", "sparse": False}] if backend.SUPPORTS_SPARSE_TENSORS: diff --git a/keras/layers/preprocessing/center_crop.py b/keras/src/layers/preprocessing/center_crop.py similarity index 96% rename from keras/layers/preprocessing/center_crop.py rename to keras/src/layers/preprocessing/center_crop.py index 3a2dadfbeaec..e5fbbaa8a333 100644 --- a/keras/layers/preprocessing/center_crop.py +++ b/keras/src/layers/preprocessing/center_crop.py @@ -1,7 +1,7 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer -from keras.utils import image_utils +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.utils import image_utils @keras_export("keras.layers.CenterCrop") diff --git a/keras/layers/preprocessing/center_crop_test.py b/keras/src/layers/preprocessing/center_crop_test.py similarity index 98% rename from keras/layers/preprocessing/center_crop_test.py rename to keras/src/layers/preprocessing/center_crop_test.py index 8fbb429b6f4c..4652f9abee16 100644 --- a/keras/layers/preprocessing/center_crop_test.py +++ b/keras/src/layers/preprocessing/center_crop_test.py @@ -3,9 +3,9 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class CenterCropTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/preprocessing/discretization.py b/keras/src/layers/preprocessing/discretization.py similarity index 97% rename from keras/layers/preprocessing/discretization.py rename to keras/src/layers/preprocessing/discretization.py index e8a3cbbe1405..7b40857d8371 100644 --- a/keras/layers/preprocessing/discretization.py +++ b/keras/src/layers/preprocessing/discretization.py @@ -1,11 +1,11 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer -from keras.utils import argument_validation -from keras.utils import numerical_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.utils import argument_validation +from keras.src.utils import numerical_utils +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras.layers.Discretization") diff --git a/keras/layers/preprocessing/discretization_test.py b/keras/src/layers/preprocessing/discretization_test.py similarity index 96% rename from keras/layers/preprocessing/discretization_test.py rename to keras/src/layers/preprocessing/discretization_test.py index 8677282de7fc..e33dd6a706d1 100644 --- a/keras/layers/preprocessing/discretization_test.py +++ b/keras/src/layers/preprocessing/discretization_test.py @@ -5,11 +5,11 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import models -from keras import testing -from keras.saving import saving_api +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import testing +from keras.src.saving import saving_api class DiscretizationTest(testing.TestCase, parameterized.TestCase): @@ -169,7 +169,7 @@ def test_saving(self): reason="Sparse tensor only works in TensorFlow", ) def test_sparse_output(self, output_mode, input_array, expected_output): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf x = np.array(input_array) layer = layers.Discretization( diff --git a/keras/layers/preprocessing/feature_space.py b/keras/src/layers/preprocessing/feature_space.py similarity index 98% rename from keras/layers/preprocessing/feature_space.py rename to keras/src/layers/preprocessing/feature_space.py index c2b0a9b15581..f66b0328f4c1 100644 --- a/keras/layers/preprocessing/feature_space.py +++ b/keras/src/layers/preprocessing/feature_space.py @@ -1,13 +1,13 @@ -from keras import backend -from keras import layers -from keras import tree -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.saving import saving_lib -from keras.saving import serialization_lib -from keras.utils import backend_utils -from keras.utils.module_utils import tensorflow as tf -from keras.utils.naming import auto_name +from keras.src import backend +from keras.src import layers +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import saving_lib +from keras.src.saving import serialization_lib +from keras.src.utils import backend_utils +from keras.src.utils.module_utils import tensorflow as tf +from keras.src.utils.naming import auto_name class Cross: @@ -277,7 +277,7 @@ def feature(cls, dtype, preprocessor, output_mode): @classmethod def float(cls, name=None): - from keras.layers.core import identity + from keras.src.layers.core import identity name = name or auto_name("float") preprocessor = identity.Identity( diff --git a/keras/layers/preprocessing/feature_space_test.py b/keras/src/layers/preprocessing/feature_space_test.py similarity index 98% rename from keras/layers/preprocessing/feature_space_test.py rename to keras/src/layers/preprocessing/feature_space_test.py index 84738520e0ab..475ad09b319d 100644 --- a/keras/layers/preprocessing/feature_space_test.py +++ b/keras/src/layers/preprocessing/feature_space_test.py @@ -3,13 +3,13 @@ import pytest from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import models -from keras import ops -from keras import testing -from keras.layers.preprocessing import feature_space -from keras.saving import saving_api +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import ops +from keras.src import testing +from keras.src.layers.preprocessing import feature_space +from keras.src.saving import saving_api class FeatureSpaceTest(testing.TestCase): diff --git a/keras/layers/preprocessing/hashed_crossing.py b/keras/src/layers/preprocessing/hashed_crossing.py similarity index 96% rename from keras/layers/preprocessing/hashed_crossing.py rename to keras/src/layers/preprocessing/hashed_crossing.py index 1daebf8059fb..f2182bcd898b 100644 --- a/keras/layers/preprocessing/hashed_crossing.py +++ b/keras/src/layers/preprocessing/hashed_crossing.py @@ -1,10 +1,10 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.utils import argument_validation -from keras.utils import backend_utils -from keras.utils import tf_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation +from keras.src.utils import backend_utils +from keras.src.utils import tf_utils +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras.layers.HashedCrossing") diff --git a/keras/layers/preprocessing/hashed_crossing_test.py b/keras/src/layers/preprocessing/hashed_crossing_test.py similarity index 98% rename from keras/layers/preprocessing/hashed_crossing_test.py rename to keras/src/layers/preprocessing/hashed_crossing_test.py index 53a4c0390c29..d599e4a1c60d 100644 --- a/keras/layers/preprocessing/hashed_crossing_test.py +++ b/keras/src/layers/preprocessing/hashed_crossing_test.py @@ -2,9 +2,9 @@ import pytest import tensorflow as tf -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class HashedCrossingTest(testing.TestCase): diff --git a/keras/layers/preprocessing/hashing.py b/keras/src/layers/preprocessing/hashing.py similarity index 97% rename from keras/layers/preprocessing/hashing.py rename to keras/src/layers/preprocessing/hashing.py index 832dd234fbab..3a05b11ed418 100644 --- a/keras/layers/preprocessing/hashing.py +++ b/keras/src/layers/preprocessing/hashing.py @@ -1,9 +1,9 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.utils import backend_utils -from keras.utils import tf_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.utils import backend_utils +from keras.src.utils import tf_utils +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras.layers.Hashing") diff --git a/keras/layers/preprocessing/hashing_test.py b/keras/src/layers/preprocessing/hashing_test.py similarity index 99% rename from keras/layers/preprocessing/hashing_test.py rename to keras/src/layers/preprocessing/hashing_test.py index cabd567aa6bb..d5836d9741ac 100644 --- a/keras/layers/preprocessing/hashing_test.py +++ b/keras/src/layers/preprocessing/hashing_test.py @@ -5,11 +5,11 @@ import tensorflow as tf from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import models -from keras import testing -from keras.saving import load_model +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import testing +from keras.src.saving import load_model class ArrayLike: diff --git a/keras/layers/preprocessing/index_lookup.py b/keras/src/layers/preprocessing/index_lookup.py similarity index 99% rename from keras/layers/preprocessing/index_lookup.py rename to keras/src/layers/preprocessing/index_lookup.py index a99651f62ea7..91436fce49d3 100644 --- a/keras/layers/preprocessing/index_lookup.py +++ b/keras/src/layers/preprocessing/index_lookup.py @@ -2,11 +2,11 @@ import numpy as np -from keras import backend -from keras.layers.layer import Layer -from keras.utils import argument_validation -from keras.utils import tf_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation +from keras.src.utils import tf_utils +from keras.src.utils.module_utils import tensorflow as tf class IndexLookup(Layer): diff --git a/keras/layers/preprocessing/index_lookup_test.py b/keras/src/layers/preprocessing/index_lookup_test.py similarity index 99% rename from keras/layers/preprocessing/index_lookup_test.py rename to keras/src/layers/preprocessing/index_lookup_test.py index 1a0ef9428beb..1cdda22c8c00 100644 --- a/keras/layers/preprocessing/index_lookup_test.py +++ b/keras/src/layers/preprocessing/index_lookup_test.py @@ -5,11 +5,11 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import models -from keras import testing -from keras.saving import saving_api +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import testing +from keras.src.saving import saving_api @pytest.mark.skipif( diff --git a/keras/layers/preprocessing/integer_lookup.py b/keras/src/layers/preprocessing/integer_lookup.py similarity index 98% rename from keras/layers/preprocessing/integer_lookup.py rename to keras/src/layers/preprocessing/integer_lookup.py index 121eedf45061..bf357552e57e 100644 --- a/keras/layers/preprocessing/integer_lookup.py +++ b/keras/src/layers/preprocessing/integer_lookup.py @@ -1,10 +1,10 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.layers.preprocessing.index_lookup import IndexLookup -from keras.utils import backend_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.index_lookup import IndexLookup +from keras.src.utils import backend_utils +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras.layers.IntegerLookup") diff --git a/keras/layers/preprocessing/integer_lookup_test.py b/keras/src/layers/preprocessing/integer_lookup_test.py similarity index 97% rename from keras/layers/preprocessing/integer_lookup_test.py rename to keras/src/layers/preprocessing/integer_lookup_test.py index ede05bedf29a..d1c6a732cbe9 100644 --- a/keras/layers/preprocessing/integer_lookup_test.py +++ b/keras/src/layers/preprocessing/integer_lookup_test.py @@ -1,9 +1,9 @@ import numpy as np from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class IntegerLookupTest(testing.TestCase): diff --git a/keras/layers/preprocessing/normalization.py b/keras/src/layers/preprocessing/normalization.py similarity index 98% rename from keras/layers/preprocessing/normalization.py rename to keras/src/layers/preprocessing/normalization.py index 76a4c37972d2..5ace2f5e76cd 100644 --- a/keras/layers/preprocessing/normalization.py +++ b/keras/src/layers/preprocessing/normalization.py @@ -2,11 +2,11 @@ import numpy as np -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras.layers.Normalization") diff --git a/keras/layers/preprocessing/normalization_test.py b/keras/src/layers/preprocessing/normalization_test.py similarity index 97% rename from keras/layers/preprocessing/normalization_test.py rename to keras/src/layers/preprocessing/normalization_test.py index c2db784f95c9..2c480c701e61 100644 --- a/keras/layers/preprocessing/normalization_test.py +++ b/keras/src/layers/preprocessing/normalization_test.py @@ -3,9 +3,9 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class NormalizationTest(testing.TestCase, parameterized.TestCase): @@ -96,7 +96,7 @@ def test_normalization_adapt(self, input_type): reason="Test symbolic call for torch meta device.", ) def test_call_on_meta_device_after_built(self): - from keras.backend.torch import core + from keras.src.backend.torch import core layer = layers.Normalization() data = np.random.random((32, 4)) diff --git a/keras/layers/preprocessing/random_brightness.py b/keras/src/layers/preprocessing/random_brightness.py similarity index 97% rename from keras/layers/preprocessing/random_brightness.py rename to keras/src/layers/preprocessing/random_brightness.py index 16816c66d544..8ba25e39cd16 100644 --- a/keras/layers/preprocessing/random_brightness.py +++ b/keras/src/layers/preprocessing/random_brightness.py @@ -1,6 +1,6 @@ -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer -from keras.random.seed_generator import SeedGenerator +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.random.seed_generator import SeedGenerator @keras_export("keras.layers.RandomBrightness") diff --git a/keras/layers/preprocessing/random_brightness_test.py b/keras/src/layers/preprocessing/random_brightness_test.py similarity index 97% rename from keras/layers/preprocessing/random_brightness_test.py rename to keras/src/layers/preprocessing/random_brightness_test.py index 129ddd946642..547d3dc265e0 100644 --- a/keras/layers/preprocessing/random_brightness_test.py +++ b/keras/src/layers/preprocessing/random_brightness_test.py @@ -2,9 +2,9 @@ import pytest from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class RandomBrightnessTest(testing.TestCase): diff --git a/keras/layers/preprocessing/random_contrast.py b/keras/src/layers/preprocessing/random_contrast.py similarity index 95% rename from keras/layers/preprocessing/random_contrast.py rename to keras/src/layers/preprocessing/random_contrast.py index c9d52727b6d2..d29f9fba3a80 100644 --- a/keras/layers/preprocessing/random_contrast.py +++ b/keras/src/layers/preprocessing/random_contrast.py @@ -1,6 +1,6 @@ -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer -from keras.random.seed_generator import SeedGenerator +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.random.seed_generator import SeedGenerator @keras_export("keras.layers.RandomContrast") diff --git a/keras/layers/preprocessing/random_contrast_test.py b/keras/src/layers/preprocessing/random_contrast_test.py similarity index 94% rename from keras/layers/preprocessing/random_contrast_test.py rename to keras/src/layers/preprocessing/random_contrast_test.py index 95a9d1d85ce2..48eac8ec89aa 100644 --- a/keras/layers/preprocessing/random_contrast_test.py +++ b/keras/src/layers/preprocessing/random_contrast_test.py @@ -2,9 +2,9 @@ import pytest from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class RandomContrastTest(testing.TestCase): diff --git a/keras/layers/preprocessing/random_crop.py b/keras/src/layers/preprocessing/random_crop.py similarity index 96% rename from keras/layers/preprocessing/random_crop.py rename to keras/src/layers/preprocessing/random_crop.py index b9b6bd8a883a..4eec8ae077e3 100644 --- a/keras/layers/preprocessing/random_crop.py +++ b/keras/src/layers/preprocessing/random_crop.py @@ -1,8 +1,8 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer -from keras.random.seed_generator import SeedGenerator -from keras.utils import image_utils +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.random.seed_generator import SeedGenerator +from keras.src.utils import image_utils @keras_export("keras.layers.RandomCrop") diff --git a/keras/layers/preprocessing/random_crop_test.py b/keras/src/layers/preprocessing/random_crop_test.py similarity index 97% rename from keras/layers/preprocessing/random_crop_test.py rename to keras/src/layers/preprocessing/random_crop_test.py index 9ba94ae2ad87..53b88265a974 100644 --- a/keras/layers/preprocessing/random_crop_test.py +++ b/keras/src/layers/preprocessing/random_crop_test.py @@ -1,9 +1,9 @@ import numpy as np from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class RandomCropTest(testing.TestCase): diff --git a/keras/layers/preprocessing/random_flip.py b/keras/src/layers/preprocessing/random_flip.py similarity index 95% rename from keras/layers/preprocessing/random_flip.py rename to keras/src/layers/preprocessing/random_flip.py index df0127432295..040a3dcb6c63 100644 --- a/keras/layers/preprocessing/random_flip.py +++ b/keras/src/layers/preprocessing/random_flip.py @@ -1,6 +1,6 @@ -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer -from keras.random.seed_generator import SeedGenerator +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.random.seed_generator import SeedGenerator HORIZONTAL = "horizontal" VERTICAL = "vertical" diff --git a/keras/layers/preprocessing/random_flip_test.py b/keras/src/layers/preprocessing/random_flip_test.py similarity index 97% rename from keras/layers/preprocessing/random_flip_test.py rename to keras/src/layers/preprocessing/random_flip_test.py index b143979aa309..8a938c507093 100644 --- a/keras/layers/preprocessing/random_flip_test.py +++ b/keras/src/layers/preprocessing/random_flip_test.py @@ -4,10 +4,10 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing -from keras import utils +from keras.src import backend +from keras.src import layers +from keras.src import testing +from keras.src import utils class MockedRandomFlip(layers.RandomFlip): diff --git a/keras/layers/preprocessing/random_rotation.py b/keras/src/layers/preprocessing/random_rotation.py similarity index 97% rename from keras/layers/preprocessing/random_rotation.py rename to keras/src/layers/preprocessing/random_rotation.py index 308fa40b8ec6..c52acbcbc76d 100644 --- a/keras/layers/preprocessing/random_rotation.py +++ b/keras/src/layers/preprocessing/random_rotation.py @@ -1,9 +1,9 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer -from keras.random.seed_generator import SeedGenerator +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.random.seed_generator import SeedGenerator @keras_export("keras.layers.RandomRotation") diff --git a/keras/layers/preprocessing/random_rotation_test.py b/keras/src/layers/preprocessing/random_rotation_test.py similarity index 96% rename from keras/layers/preprocessing/random_rotation_test.py rename to keras/src/layers/preprocessing/random_rotation_test.py index 4def62a640e8..b5c6da7fbc45 100644 --- a/keras/layers/preprocessing/random_rotation_test.py +++ b/keras/src/layers/preprocessing/random_rotation_test.py @@ -2,9 +2,9 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class RandomRotationTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/preprocessing/random_translation.py b/keras/src/layers/preprocessing/random_translation.py similarity index 98% rename from keras/layers/preprocessing/random_translation.py rename to keras/src/layers/preprocessing/random_translation.py index ae2db36a551a..695bc31519ef 100644 --- a/keras/layers/preprocessing/random_translation.py +++ b/keras/src/layers/preprocessing/random_translation.py @@ -1,7 +1,7 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer -from keras.random.seed_generator import SeedGenerator +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.random.seed_generator import SeedGenerator @keras_export("keras.layers.RandomTranslation") diff --git a/keras/layers/preprocessing/random_translation_test.py b/keras/src/layers/preprocessing/random_translation_test.py similarity index 99% rename from keras/layers/preprocessing/random_translation_test.py rename to keras/src/layers/preprocessing/random_translation_test.py index 7545dd96ff3f..0f926ec3ede7 100644 --- a/keras/layers/preprocessing/random_translation_test.py +++ b/keras/src/layers/preprocessing/random_translation_test.py @@ -2,9 +2,9 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class RandomTranslationTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/preprocessing/random_zoom.py b/keras/src/layers/preprocessing/random_zoom.py similarity index 98% rename from keras/layers/preprocessing/random_zoom.py rename to keras/src/layers/preprocessing/random_zoom.py index db8ab5fcbb3e..332da8e0abaa 100644 --- a/keras/layers/preprocessing/random_zoom.py +++ b/keras/src/layers/preprocessing/random_zoom.py @@ -1,7 +1,7 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer -from keras.random.seed_generator import SeedGenerator +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src.random.seed_generator import SeedGenerator @keras_export("keras.layers.RandomZoom") diff --git a/keras/layers/preprocessing/random_zoom_test.py b/keras/src/layers/preprocessing/random_zoom_test.py similarity index 97% rename from keras/layers/preprocessing/random_zoom_test.py rename to keras/src/layers/preprocessing/random_zoom_test.py index 926f1951242e..fe5ca61710ae 100644 --- a/keras/layers/preprocessing/random_zoom_test.py +++ b/keras/src/layers/preprocessing/random_zoom_test.py @@ -3,10 +3,10 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import models -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import testing class RandomZoomTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/preprocessing/rescaling.py b/keras/src/layers/preprocessing/rescaling.py similarity index 93% rename from keras/layers/preprocessing/rescaling.py rename to keras/src/layers/preprocessing/rescaling.py index 78046fde9f28..a7131eaabd56 100644 --- a/keras/layers/preprocessing/rescaling.py +++ b/keras/src/layers/preprocessing/rescaling.py @@ -1,6 +1,6 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer @keras_export("keras.layers.Rescaling") diff --git a/keras/layers/preprocessing/rescaling_test.py b/keras/src/layers/preprocessing/rescaling_test.py similarity index 97% rename from keras/layers/preprocessing/rescaling_test.py rename to keras/src/layers/preprocessing/rescaling_test.py index 34ae51714031..83a55e1f8a15 100644 --- a/keras/layers/preprocessing/rescaling_test.py +++ b/keras/src/layers/preprocessing/rescaling_test.py @@ -2,9 +2,9 @@ import pytest from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class RescalingTest(testing.TestCase): diff --git a/keras/layers/preprocessing/resizing.py b/keras/src/layers/preprocessing/resizing.py similarity index 96% rename from keras/layers/preprocessing/resizing.py rename to keras/src/layers/preprocessing/resizing.py index 44425c435798..6a6c5bab6a8c 100644 --- a/keras/layers/preprocessing/resizing.py +++ b/keras/src/layers/preprocessing/resizing.py @@ -1,6 +1,6 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.preprocessing.tf_data_layer import TFDataLayer +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer @keras_export("keras.layers.Resizing") diff --git a/keras/layers/preprocessing/resizing_test.py b/keras/src/layers/preprocessing/resizing_test.py similarity index 98% rename from keras/layers/preprocessing/resizing_test.py rename to keras/src/layers/preprocessing/resizing_test.py index afb71af3f0a2..d5b9a718d376 100644 --- a/keras/layers/preprocessing/resizing_test.py +++ b/keras/src/layers/preprocessing/resizing_test.py @@ -3,10 +3,10 @@ from absl.testing import parameterized from tensorflow import data as tf_data -from keras import Sequential -from keras import backend -from keras import layers -from keras import testing +from keras.src import Sequential +from keras.src import backend +from keras.src import layers +from keras.src import testing class ResizingTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/preprocessing/string_lookup.py b/keras/src/layers/preprocessing/string_lookup.py similarity index 98% rename from keras/layers/preprocessing/string_lookup.py rename to keras/src/layers/preprocessing/string_lookup.py index 63cf54db7084..ff4fa9074806 100644 --- a/keras/layers/preprocessing/string_lookup.py +++ b/keras/src/layers/preprocessing/string_lookup.py @@ -1,10 +1,10 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.layers.preprocessing.index_lookup import IndexLookup -from keras.utils import backend_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.index_lookup import IndexLookup +from keras.src.utils import backend_utils +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras.layers.StringLookup") diff --git a/keras/layers/preprocessing/string_lookup_test.py b/keras/src/layers/preprocessing/string_lookup_test.py similarity index 95% rename from keras/layers/preprocessing/string_lookup_test.py rename to keras/src/layers/preprocessing/string_lookup_test.py index 0f33f3d3b1cc..be6c3e56be8a 100644 --- a/keras/layers/preprocessing/string_lookup_test.py +++ b/keras/src/layers/preprocessing/string_lookup_test.py @@ -1,9 +1,9 @@ import numpy as np from tensorflow import data as tf_data -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class StringLookupTest(testing.TestCase): diff --git a/keras/layers/preprocessing/text_vectorization.py b/keras/src/layers/preprocessing/text_vectorization.py similarity index 98% rename from keras/layers/preprocessing/text_vectorization.py rename to keras/src/layers/preprocessing/text_vectorization.py index 7ddc0e24fed8..ebe828a34ab5 100644 --- a/keras/layers/preprocessing/text_vectorization.py +++ b/keras/src/layers/preprocessing/text_vectorization.py @@ -1,15 +1,15 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.layers.preprocessing.index_lookup import listify_tensors -from keras.layers.preprocessing.string_lookup import StringLookup -from keras.saving import serialization_lib -from keras.utils import argument_validation -from keras.utils import backend_utils -from keras.utils import tf_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.layers.preprocessing.index_lookup import listify_tensors +from keras.src.layers.preprocessing.string_lookup import StringLookup +from keras.src.saving import serialization_lib +from keras.src.utils import argument_validation +from keras.src.utils import backend_utils +from keras.src.utils import tf_utils +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras.layers.TextVectorization") diff --git a/keras/layers/preprocessing/text_vectorization_test.py b/keras/src/layers/preprocessing/text_vectorization_test.py similarity index 96% rename from keras/layers/preprocessing/text_vectorization_test.py rename to keras/src/layers/preprocessing/text_vectorization_test.py index 633013adc6e5..1f641e5a92de 100644 --- a/keras/layers/preprocessing/text_vectorization_test.py +++ b/keras/src/layers/preprocessing/text_vectorization_test.py @@ -5,12 +5,12 @@ import tensorflow as tf from tensorflow import data as tf_data -from keras import Sequential -from keras import backend -from keras import layers -from keras import models -from keras import saving -from keras import testing +from keras.src import Sequential +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import saving +from keras.src import testing class TextVectorizationTest(testing.TestCase): diff --git a/keras/layers/preprocessing/tf_data_layer.py b/keras/src/layers/preprocessing/tf_data_layer.py similarity index 89% rename from keras/layers/preprocessing/tf_data_layer.py rename to keras/src/layers/preprocessing/tf_data_layer.py index 74cf6515ce3c..f91b84ad9049 100644 --- a/keras/layers/preprocessing/tf_data_layer.py +++ b/keras/src/layers/preprocessing/tf_data_layer.py @@ -1,9 +1,9 @@ -import keras.backend -from keras import tree -from keras.layers.layer import Layer -from keras.random.seed_generator import SeedGenerator -from keras.utils import backend_utils -from keras.utils import tracking +import keras.src.backend +from keras.src import tree +from keras.src.layers.layer import Layer +from keras.src.random.seed_generator import SeedGenerator +from keras.src.utils import backend_utils +from keras.src.utils import tracking class TFDataLayer(Layer): diff --git a/keras/layers/regularization/__init__.py b/keras/src/layers/regularization/__init__.py similarity index 100% rename from keras/layers/regularization/__init__.py rename to keras/src/layers/regularization/__init__.py diff --git a/keras/layers/regularization/activity_regularization.py b/keras/src/layers/regularization/activity_regularization.py similarity index 89% rename from keras/layers/regularization/activity_regularization.py rename to keras/src/layers/regularization/activity_regularization.py index c3908ec74f55..66e724963ca3 100644 --- a/keras/layers/regularization/activity_regularization.py +++ b/keras/src/layers/regularization/activity_regularization.py @@ -1,6 +1,6 @@ -from keras import regularizers -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.ActivityRegularization") diff --git a/keras/layers/regularization/activity_regularization_test.py b/keras/src/layers/regularization/activity_regularization_test.py similarity index 92% rename from keras/layers/regularization/activity_regularization_test.py rename to keras/src/layers/regularization/activity_regularization_test.py index bc92635596de..b3334dadd42a 100644 --- a/keras/layers/regularization/activity_regularization_test.py +++ b/keras/src/layers/regularization/activity_regularization_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from keras import layers -from keras.testing import test_case +from keras.src import layers +from keras.src.testing import test_case class ActivityRegularizationTest(test_case.TestCase): diff --git a/keras/layers/regularization/alpha_dropout.py b/keras/src/layers/regularization/alpha_dropout.py similarity index 96% rename from keras/layers/regularization/alpha_dropout.py rename to keras/src/layers/regularization/alpha_dropout.py index 55b55f7d89b4..9bb7ac7afc80 100644 --- a/keras/layers/regularization/alpha_dropout.py +++ b/keras/src/layers/regularization/alpha_dropout.py @@ -1,7 +1,7 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.AlphaDropout") diff --git a/keras/layers/regularization/alpha_dropout_test.py b/keras/src/layers/regularization/alpha_dropout_test.py similarity index 95% rename from keras/layers/regularization/alpha_dropout_test.py rename to keras/src/layers/regularization/alpha_dropout_test.py index bcde257818c4..56d2362d8eef 100644 --- a/keras/layers/regularization/alpha_dropout_test.py +++ b/keras/src/layers/regularization/alpha_dropout_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class AlphaDropoutTest(testing.TestCase): diff --git a/keras/layers/regularization/dropout.py b/keras/src/layers/regularization/dropout.py similarity index 96% rename from keras/layers/regularization/dropout.py rename to keras/src/layers/regularization/dropout.py index 677a545a2c69..8db64b3a539c 100644 --- a/keras/layers/regularization/dropout.py +++ b/keras/src/layers/regularization/dropout.py @@ -1,6 +1,6 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.layer import Layer +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer @keras_export("keras.layers.Dropout") diff --git a/keras/layers/regularization/dropout_test.py b/keras/src/layers/regularization/dropout_test.py similarity index 95% rename from keras/layers/regularization/dropout_test.py rename to keras/src/layers/regularization/dropout_test.py index 5cff84f7f0f4..90f5cbeaa058 100644 --- a/keras/layers/regularization/dropout_test.py +++ b/keras/src/layers/regularization/dropout_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class DropoutTest(testing.TestCase): diff --git a/keras/layers/regularization/gaussian_dropout.py b/keras/src/layers/regularization/gaussian_dropout.py similarity index 93% rename from keras/layers/regularization/gaussian_dropout.py rename to keras/src/layers/regularization/gaussian_dropout.py index 2450960e5fcf..e7e8ea3467ed 100644 --- a/keras/layers/regularization/gaussian_dropout.py +++ b/keras/src/layers/regularization/gaussian_dropout.py @@ -1,9 +1,9 @@ import math -from keras import backend -from keras import layers -from keras import ops -from keras.api_export import keras_export +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src.api_export import keras_export @keras_export("keras.layers.GaussianDropout") diff --git a/keras/layers/regularization/gaussian_dropout_test.py b/keras/src/layers/regularization/gaussian_dropout_test.py similarity index 91% rename from keras/layers/regularization/gaussian_dropout_test.py rename to keras/src/layers/regularization/gaussian_dropout_test.py index 1d01281ee369..33f6b3759b95 100644 --- a/keras/layers/regularization/gaussian_dropout_test.py +++ b/keras/src/layers/regularization/gaussian_dropout_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class GaussianDropoutTest(testing.TestCase): diff --git a/keras/layers/regularization/gaussian_noise.py b/keras/src/layers/regularization/gaussian_noise.py similarity index 93% rename from keras/layers/regularization/gaussian_noise.py rename to keras/src/layers/regularization/gaussian_noise.py index e5eaaba71a09..89ab962f6df6 100644 --- a/keras/layers/regularization/gaussian_noise.py +++ b/keras/src/layers/regularization/gaussian_noise.py @@ -1,7 +1,7 @@ -from keras import backend -from keras import layers -from keras import ops -from keras.api_export import keras_export +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src.api_export import keras_export @keras_export("keras.layers.GaussianNoise") diff --git a/keras/layers/regularization/gaussian_noise_test.py b/keras/src/layers/regularization/gaussian_noise_test.py similarity index 91% rename from keras/layers/regularization/gaussian_noise_test.py rename to keras/src/layers/regularization/gaussian_noise_test.py index d23a78f2483e..4aa2784dc05c 100644 --- a/keras/layers/regularization/gaussian_noise_test.py +++ b/keras/src/layers/regularization/gaussian_noise_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class GaussianNoiseTest(testing.TestCase): diff --git a/keras/layers/regularization/spatial_dropout.py b/keras/src/layers/regularization/spatial_dropout.py similarity index 97% rename from keras/layers/regularization/spatial_dropout.py rename to keras/src/layers/regularization/spatial_dropout.py index c7ac9a3d0c14..5f440164f40d 100644 --- a/keras/layers/regularization/spatial_dropout.py +++ b/keras/src/layers/regularization/spatial_dropout.py @@ -1,8 +1,8 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.regularization.dropout import Dropout +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.regularization.dropout import Dropout class BaseSpatialDropout(Dropout): diff --git a/keras/layers/regularization/spatial_dropout_test.py b/keras/src/layers/regularization/spatial_dropout_test.py similarity index 97% rename from keras/layers/regularization/spatial_dropout_test.py rename to keras/src/layers/regularization/spatial_dropout_test.py index 09ef67fca5b4..cc0581072cfd 100644 --- a/keras/layers/regularization/spatial_dropout_test.py +++ b/keras/src/layers/regularization/spatial_dropout_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras.testing import test_case +from keras.src import backend +from keras.src import layers +from keras.src.testing import test_case class SpatialDropoutTest(test_case.TestCase): diff --git a/keras/layers/reshaping/__init__.py b/keras/src/layers/reshaping/__init__.py similarity index 100% rename from keras/layers/reshaping/__init__.py rename to keras/src/layers/reshaping/__init__.py diff --git a/keras/layers/reshaping/cropping1d.py b/keras/src/layers/reshaping/cropping1d.py similarity index 93% rename from keras/layers/reshaping/cropping1d.py rename to keras/src/layers/reshaping/cropping1d.py index f5a99da9cf11..abce618dff65 100644 --- a/keras/layers/reshaping/cropping1d.py +++ b/keras/src/layers/reshaping/cropping1d.py @@ -1,7 +1,7 @@ -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.utils import argument_validation +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation @keras_export("keras.layers.Cropping1D") diff --git a/keras/layers/reshaping/cropping1d_test.py b/keras/src/layers/reshaping/cropping1d_test.py similarity index 97% rename from keras/layers/reshaping/cropping1d_test.py rename to keras/src/layers/reshaping/cropping1d_test.py index 085466b0412e..cceb5922d92e 100644 --- a/keras/layers/reshaping/cropping1d_test.py +++ b/keras/src/layers/reshaping/cropping1d_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import layers -from keras import ops -from keras import testing +from keras.src import layers +from keras.src import ops +from keras.src import testing class Cropping1DTest(testing.TestCase): diff --git a/keras/layers/reshaping/cropping2d.py b/keras/src/layers/reshaping/cropping2d.py similarity index 97% rename from keras/layers/reshaping/cropping2d.py rename to keras/src/layers/reshaping/cropping2d.py index 9194f565a233..aec6813a861f 100644 --- a/keras/layers/reshaping/cropping2d.py +++ b/keras/src/layers/reshaping/cropping2d.py @@ -1,8 +1,8 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.utils import argument_validation +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation @keras_export("keras.layers.Cropping2D") diff --git a/keras/layers/reshaping/cropping2d_test.py b/keras/src/layers/reshaping/cropping2d_test.py similarity index 97% rename from keras/layers/reshaping/cropping2d_test.py rename to keras/src/layers/reshaping/cropping2d_test.py index fbf88695fd8f..001f1a466f11 100644 --- a/keras/layers/reshaping/cropping2d_test.py +++ b/keras/src/layers/reshaping/cropping2d_test.py @@ -2,10 +2,10 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import ops -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src import testing class Cropping2DTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/reshaping/cropping3d.py b/keras/src/layers/reshaping/cropping3d.py similarity index 98% rename from keras/layers/reshaping/cropping3d.py rename to keras/src/layers/reshaping/cropping3d.py index 569432004087..724d0cf72635 100644 --- a/keras/layers/reshaping/cropping3d.py +++ b/keras/src/layers/reshaping/cropping3d.py @@ -1,8 +1,8 @@ -from keras import backend -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.utils import argument_validation +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation @keras_export("keras.layers.Cropping3D") diff --git a/keras/layers/reshaping/cropping3d_test.py b/keras/src/layers/reshaping/cropping3d_test.py similarity index 98% rename from keras/layers/reshaping/cropping3d_test.py rename to keras/src/layers/reshaping/cropping3d_test.py index 93aeaabe8447..90569711b412 100644 --- a/keras/layers/reshaping/cropping3d_test.py +++ b/keras/src/layers/reshaping/cropping3d_test.py @@ -2,10 +2,10 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import ops -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src import testing class Cropping3DTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/reshaping/flatten.py b/keras/src/layers/reshaping/flatten.py similarity index 91% rename from keras/layers/reshaping/flatten.py rename to keras/src/layers/reshaping/flatten.py index 0923f33da4b8..84aad840246c 100644 --- a/keras/layers/reshaping/flatten.py +++ b/keras/src/layers/reshaping/flatten.py @@ -1,11 +1,11 @@ import math -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.backend.common.keras_tensor import KerasTensor -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer @keras_export("keras.layers.Flatten") diff --git a/keras/layers/reshaping/flatten_test.py b/keras/src/layers/reshaping/flatten_test.py similarity index 97% rename from keras/layers/reshaping/flatten_test.py rename to keras/src/layers/reshaping/flatten_test.py index 921db5906ac7..00208bedaebf 100644 --- a/keras/layers/reshaping/flatten_test.py +++ b/keras/src/layers/reshaping/flatten_test.py @@ -2,10 +2,10 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import ops -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src import testing class FlattenTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/reshaping/permute.py b/keras/src/layers/reshaping/permute.py similarity index 89% rename from keras/layers/reshaping/permute.py rename to keras/src/layers/reshaping/permute.py index 30d52e4a7403..fce4eb2d328e 100644 --- a/keras/layers/reshaping/permute.py +++ b/keras/src/layers/reshaping/permute.py @@ -1,8 +1,8 @@ -from keras import ops -from keras.api_export import keras_export -from keras.backend.common.keras_tensor import KerasTensor -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer @keras_export("keras.layers.Permute") diff --git a/keras/layers/reshaping/permute_test.py b/keras/src/layers/reshaping/permute_test.py similarity index 95% rename from keras/layers/reshaping/permute_test.py rename to keras/src/layers/reshaping/permute_test.py index d01c63a99a2a..f165fbbf8a86 100644 --- a/keras/layers/reshaping/permute_test.py +++ b/keras/src/layers/reshaping/permute_test.py @@ -2,10 +2,10 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import ops -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src import testing class PermuteTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/reshaping/repeat_vector.py b/keras/src/layers/reshaping/repeat_vector.py similarity index 87% rename from keras/layers/reshaping/repeat_vector.py rename to keras/src/layers/reshaping/repeat_vector.py index 0e9507e10d38..d8914d10fce7 100644 --- a/keras/layers/reshaping/repeat_vector.py +++ b/keras/src/layers/reshaping/repeat_vector.py @@ -1,7 +1,7 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer @keras_export("keras.layers.RepeatVector") diff --git a/keras/layers/reshaping/repeat_vector_test.py b/keras/src/layers/reshaping/repeat_vector_test.py similarity index 94% rename from keras/layers/reshaping/repeat_vector_test.py rename to keras/src/layers/reshaping/repeat_vector_test.py index 3baf10517211..3d1d1a59624a 100644 --- a/keras/layers/reshaping/repeat_vector_test.py +++ b/keras/src/layers/reshaping/repeat_vector_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import layers -from keras import ops -from keras import testing +from keras.src import layers +from keras.src import ops +from keras.src import testing class FlattenTest(testing.TestCase): diff --git a/keras/layers/reshaping/reshape.py b/keras/src/layers/reshaping/reshape.py similarity index 90% rename from keras/layers/reshaping/reshape.py rename to keras/src/layers/reshaping/reshape.py index e052957534b8..c87e4bd7381b 100644 --- a/keras/layers/reshaping/reshape.py +++ b/keras/src/layers/reshaping/reshape.py @@ -1,8 +1,8 @@ -from keras import ops -from keras.api_export import keras_export -from keras.backend.common.keras_tensor import KerasTensor -from keras.layers.layer import Layer -from keras.ops import operation_utils +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.layer import Layer +from keras.src.ops import operation_utils @keras_export("keras.layers.Reshape") diff --git a/keras/layers/reshaping/reshape_test.py b/keras/src/layers/reshaping/reshape_test.py similarity index 96% rename from keras/layers/reshaping/reshape_test.py rename to keras/src/layers/reshaping/reshape_test.py index 9e508f84dc96..453b13d84fb2 100644 --- a/keras/layers/reshaping/reshape_test.py +++ b/keras/src/layers/reshaping/reshape_test.py @@ -1,10 +1,10 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import testing -from keras.backend.common.keras_tensor import KerasTensor +from keras.src import backend +from keras.src import layers +from keras.src import testing +from keras.src.backend.common.keras_tensor import KerasTensor class ReshapeTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/reshaping/up_sampling1d.py b/keras/src/layers/reshaping/up_sampling1d.py similarity index 89% rename from keras/layers/reshaping/up_sampling1d.py rename to keras/src/layers/reshaping/up_sampling1d.py index 480ab7b5a336..bbb7657efe91 100644 --- a/keras/layers/reshaping/up_sampling1d.py +++ b/keras/src/layers/reshaping/up_sampling1d.py @@ -1,7 +1,7 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer @keras_export("keras.layers.UpSampling1D") diff --git a/keras/layers/reshaping/up_sampling1d_test.py b/keras/src/layers/reshaping/up_sampling1d_test.py similarity index 94% rename from keras/layers/reshaping/up_sampling1d_test.py rename to keras/src/layers/reshaping/up_sampling1d_test.py index aa5f8c0dee43..978401fd7157 100644 --- a/keras/layers/reshaping/up_sampling1d_test.py +++ b/keras/src/layers/reshaping/up_sampling1d_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import layers -from keras import testing -from keras.backend.common.keras_tensor import KerasTensor +from keras.src import layers +from keras.src import testing +from keras.src.backend.common.keras_tensor import KerasTensor class UpSamplingTest(testing.TestCase): diff --git a/keras/layers/reshaping/up_sampling2d.py b/keras/src/layers/reshaping/up_sampling2d.py similarity index 95% rename from keras/layers/reshaping/up_sampling2d.py rename to keras/src/layers/reshaping/up_sampling2d.py index 7ecc5c6a49e4..d9f5fa21c49c 100644 --- a/keras/layers/reshaping/up_sampling2d.py +++ b/keras/src/layers/reshaping/up_sampling2d.py @@ -1,9 +1,9 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.utils import argument_validation +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation @keras_export("keras.layers.UpSampling2D") diff --git a/keras/layers/reshaping/up_sampling2d_test.py b/keras/src/layers/reshaping/up_sampling2d_test.py similarity index 98% rename from keras/layers/reshaping/up_sampling2d_test.py rename to keras/src/layers/reshaping/up_sampling2d_test.py index feaefe3af931..680f9eac68f5 100644 --- a/keras/layers/reshaping/up_sampling2d_test.py +++ b/keras/src/layers/reshaping/up_sampling2d_test.py @@ -3,9 +3,9 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class UpSampling2dTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/reshaping/up_sampling3d.py b/keras/src/layers/reshaping/up_sampling3d.py similarity index 95% rename from keras/layers/reshaping/up_sampling3d.py rename to keras/src/layers/reshaping/up_sampling3d.py index f0f7aa6e5889..3b642e48ef6a 100644 --- a/keras/layers/reshaping/up_sampling3d.py +++ b/keras/src/layers/reshaping/up_sampling3d.py @@ -1,9 +1,9 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.utils import argument_validation +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation @keras_export("keras.layers.UpSampling3D") diff --git a/keras/layers/reshaping/up_sampling3d_test.py b/keras/src/layers/reshaping/up_sampling3d_test.py similarity index 98% rename from keras/layers/reshaping/up_sampling3d_test.py rename to keras/src/layers/reshaping/up_sampling3d_test.py index c14d2179452f..f2d6bc2d0eaf 100644 --- a/keras/layers/reshaping/up_sampling3d_test.py +++ b/keras/src/layers/reshaping/up_sampling3d_test.py @@ -2,9 +2,9 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class UpSampling3dTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/reshaping/zero_padding1d.py b/keras/src/layers/reshaping/zero_padding1d.py similarity index 89% rename from keras/layers/reshaping/zero_padding1d.py rename to keras/src/layers/reshaping/zero_padding1d.py index 9bc444763d69..2777423b7921 100644 --- a/keras/layers/reshaping/zero_padding1d.py +++ b/keras/src/layers/reshaping/zero_padding1d.py @@ -1,8 +1,8 @@ -from keras import ops -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.utils import argument_validation +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation @keras_export("keras.layers.ZeroPadding1D") diff --git a/keras/layers/reshaping/zero_padding1d_test.py b/keras/src/layers/reshaping/zero_padding1d_test.py similarity index 96% rename from keras/layers/reshaping/zero_padding1d_test.py rename to keras/src/layers/reshaping/zero_padding1d_test.py index bf2e3d4e5cbe..918cd133a777 100644 --- a/keras/layers/reshaping/zero_padding1d_test.py +++ b/keras/src/layers/reshaping/zero_padding1d_test.py @@ -1,8 +1,8 @@ import numpy as np from absl.testing import parameterized -from keras import layers -from keras import testing +from keras.src import layers +from keras.src import testing class ZeroPadding1DTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/reshaping/zero_padding2d.py b/keras/src/layers/reshaping/zero_padding2d.py similarity index 94% rename from keras/layers/reshaping/zero_padding2d.py rename to keras/src/layers/reshaping/zero_padding2d.py index 557b02f270e7..e5d88d16d76d 100644 --- a/keras/layers/reshaping/zero_padding2d.py +++ b/keras/src/layers/reshaping/zero_padding2d.py @@ -1,9 +1,9 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.utils import argument_validation +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation @keras_export("keras.layers.ZeroPadding2D") diff --git a/keras/layers/reshaping/zero_padding2d_test.py b/keras/src/layers/reshaping/zero_padding2d_test.py similarity index 97% rename from keras/layers/reshaping/zero_padding2d_test.py rename to keras/src/layers/reshaping/zero_padding2d_test.py index 391dd30d2325..404ee9b4b4e4 100644 --- a/keras/layers/reshaping/zero_padding2d_test.py +++ b/keras/src/layers/reshaping/zero_padding2d_test.py @@ -1,9 +1,9 @@ import numpy as np from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class ZeroPadding2DTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/reshaping/zero_padding3d.py b/keras/src/layers/reshaping/zero_padding3d.py similarity index 95% rename from keras/layers/reshaping/zero_padding3d.py rename to keras/src/layers/reshaping/zero_padding3d.py index 5b0191d0a102..87e39bf00060 100644 --- a/keras/layers/reshaping/zero_padding3d.py +++ b/keras/src/layers/reshaping/zero_padding3d.py @@ -1,9 +1,9 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.utils import argument_validation +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation @keras_export("keras.layers.ZeroPadding3D") diff --git a/keras/layers/reshaping/zero_padding3d_test.py b/keras/src/layers/reshaping/zero_padding3d_test.py similarity index 97% rename from keras/layers/reshaping/zero_padding3d_test.py rename to keras/src/layers/reshaping/zero_padding3d_test.py index 7a1f919586b8..bf6cd80c1153 100644 --- a/keras/layers/reshaping/zero_padding3d_test.py +++ b/keras/src/layers/reshaping/zero_padding3d_test.py @@ -1,9 +1,9 @@ import numpy as np from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import layers +from keras.src import testing class ZeroPadding3DTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/rnn/__init__.py b/keras/src/layers/rnn/__init__.py similarity index 100% rename from keras/layers/rnn/__init__.py rename to keras/src/layers/rnn/__init__.py diff --git a/keras/layers/rnn/bidirectional.py b/keras/src/layers/rnn/bidirectional.py similarity index 98% rename from keras/layers/rnn/bidirectional.py rename to keras/src/layers/rnn/bidirectional.py index 9a2569fa1591..9d9d29d24602 100644 --- a/keras/layers/rnn/bidirectional.py +++ b/keras/src/layers/rnn/bidirectional.py @@ -1,11 +1,11 @@ import copy -from keras import ops -from keras import utils -from keras.api_export import keras_export -from keras.layers.core.wrapper import Wrapper -from keras.layers.layer import Layer -from keras.saving import serialization_lib +from keras.src import ops +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib @keras_export("keras.layers.Bidirectional") diff --git a/keras/layers/rnn/bidirectional_test.py b/keras/src/layers/rnn/bidirectional_test.py similarity index 98% rename from keras/layers/rnn/bidirectional_test.py rename to keras/src/layers/rnn/bidirectional_test.py index 6391abdace2d..476965f935f6 100644 --- a/keras/layers/rnn/bidirectional_test.py +++ b/keras/src/layers/rnn/bidirectional_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import initializers -from keras import layers -from keras import testing +from keras.src import initializers +from keras.src import layers +from keras.src import testing class SimpleRNNTest(testing.TestCase): diff --git a/keras/layers/rnn/conv_lstm.py b/keras/src/layers/rnn/conv_lstm.py similarity index 98% rename from keras/layers/rnn/conv_lstm.py rename to keras/src/layers/rnn/conv_lstm.py index e8e3ee3ffebe..d6a30b5b353e 100644 --- a/keras/layers/rnn/conv_lstm.py +++ b/keras/src/layers/rnn/conv_lstm.py @@ -1,16 +1,16 @@ -from keras import activations -from keras import backend -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras import tree -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell -from keras.layers.rnn.rnn import RNN -from keras.ops import operation_utils -from keras.utils import argument_validation +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src import tree +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN +from keras.src.ops import operation_utils +from keras.src.utils import argument_validation class ConvLSTMCell(Layer, DropoutRNNCell): diff --git a/keras/layers/rnn/conv_lstm1d.py b/keras/src/layers/rnn/conv_lstm1d.py similarity index 98% rename from keras/layers/rnn/conv_lstm1d.py rename to keras/src/layers/rnn/conv_lstm1d.py index a96cdbe778e6..d0ad56b5ce26 100644 --- a/keras/layers/rnn/conv_lstm1d.py +++ b/keras/src/layers/rnn/conv_lstm1d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.rnn.conv_lstm import ConvLSTM +from keras.src.api_export import keras_export +from keras.src.layers.rnn.conv_lstm import ConvLSTM @keras_export("keras.layers.ConvLSTM1D") diff --git a/keras/layers/rnn/conv_lstm1d_test.py b/keras/src/layers/rnn/conv_lstm1d_test.py similarity index 95% rename from keras/layers/rnn/conv_lstm1d_test.py rename to keras/src/layers/rnn/conv_lstm1d_test.py index 89bec5a6b2db..b69cbf8b55aa 100644 --- a/keras/layers/rnn/conv_lstm1d_test.py +++ b/keras/src/layers/rnn/conv_lstm1d_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras import backend -from keras import initializers -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src import testing class ConvLSTM1DTest(testing.TestCase): diff --git a/keras/layers/rnn/conv_lstm2d.py b/keras/src/layers/rnn/conv_lstm2d.py similarity index 98% rename from keras/layers/rnn/conv_lstm2d.py rename to keras/src/layers/rnn/conv_lstm2d.py index 1819e5a9ba32..6837eea99298 100644 --- a/keras/layers/rnn/conv_lstm2d.py +++ b/keras/src/layers/rnn/conv_lstm2d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.rnn.conv_lstm import ConvLSTM +from keras.src.api_export import keras_export +from keras.src.layers.rnn.conv_lstm import ConvLSTM @keras_export("keras.layers.ConvLSTM2D") diff --git a/keras/layers/rnn/conv_lstm2d_test.py b/keras/src/layers/rnn/conv_lstm2d_test.py similarity index 96% rename from keras/layers/rnn/conv_lstm2d_test.py rename to keras/src/layers/rnn/conv_lstm2d_test.py index 5c2bb6c0855c..b3846b64058c 100644 --- a/keras/layers/rnn/conv_lstm2d_test.py +++ b/keras/src/layers/rnn/conv_lstm2d_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras import backend -from keras import initializers -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src import testing class ConvLSTM2DTest(testing.TestCase): diff --git a/keras/layers/rnn/conv_lstm3d.py b/keras/src/layers/rnn/conv_lstm3d.py similarity index 98% rename from keras/layers/rnn/conv_lstm3d.py rename to keras/src/layers/rnn/conv_lstm3d.py index e22bb2cc6cab..534750abebef 100644 --- a/keras/layers/rnn/conv_lstm3d.py +++ b/keras/src/layers/rnn/conv_lstm3d.py @@ -1,5 +1,5 @@ -from keras.api_export import keras_export -from keras.layers.rnn.conv_lstm import ConvLSTM +from keras.src.api_export import keras_export +from keras.src.layers.rnn.conv_lstm import ConvLSTM @keras_export("keras.layers.ConvLSTM3D") diff --git a/keras/layers/rnn/conv_lstm3d_test.py b/keras/src/layers/rnn/conv_lstm3d_test.py similarity index 96% rename from keras/layers/rnn/conv_lstm3d_test.py rename to keras/src/layers/rnn/conv_lstm3d_test.py index e0008bbc3b45..b6c23326539f 100644 --- a/keras/layers/rnn/conv_lstm3d_test.py +++ b/keras/src/layers/rnn/conv_lstm3d_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras import backend -from keras import initializers -from keras import layers -from keras import testing +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src import testing class ConvLSTM1DTest(testing.TestCase): diff --git a/keras/layers/rnn/conv_lstm_test.py b/keras/src/layers/rnn/conv_lstm_test.py similarity index 91% rename from keras/layers/rnn/conv_lstm_test.py rename to keras/src/layers/rnn/conv_lstm_test.py index 76123a8cd0f1..e66fed91b62c 100644 --- a/keras/layers/rnn/conv_lstm_test.py +++ b/keras/src/layers/rnn/conv_lstm_test.py @@ -1,10 +1,10 @@ import numpy as np -from keras import backend -from keras import initializers -from keras import testing -from keras.layers.rnn.conv_lstm import ConvLSTM -from keras.layers.rnn.conv_lstm import ConvLSTMCell +from keras.src import backend +from keras.src import initializers +from keras.src import testing +from keras.src.layers.rnn.conv_lstm import ConvLSTM +from keras.src.layers.rnn.conv_lstm import ConvLSTMCell class ConvLSTMCellTest(testing.TestCase): diff --git a/keras/layers/rnn/dropout_rnn_cell.py b/keras/src/layers/rnn/dropout_rnn_cell.py similarity index 97% rename from keras/layers/rnn/dropout_rnn_cell.py rename to keras/src/layers/rnn/dropout_rnn_cell.py index a4d4d69fa552..33a522224c7e 100644 --- a/keras/layers/rnn/dropout_rnn_cell.py +++ b/keras/src/layers/rnn/dropout_rnn_cell.py @@ -1,5 +1,5 @@ -from keras import backend -from keras import ops +from keras.src import backend +from keras.src import ops class DropoutRNNCell: diff --git a/keras/layers/rnn/dropout_rnn_cell_test.py b/keras/src/layers/rnn/dropout_rnn_cell_test.py similarity index 94% rename from keras/layers/rnn/dropout_rnn_cell_test.py rename to keras/src/layers/rnn/dropout_rnn_cell_test.py index 2f4e5a658c02..01f3d2e00acf 100644 --- a/keras/layers/rnn/dropout_rnn_cell_test.py +++ b/keras/src/layers/rnn/dropout_rnn_cell_test.py @@ -1,10 +1,10 @@ import pytest -from keras import backend -from keras import layers -from keras import ops -from keras import testing -from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src import testing +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell class RNNCellWithDropout(layers.Layer, DropoutRNNCell): diff --git a/keras/layers/rnn/gru.py b/keras/src/layers/rnn/gru.py similarity index 98% rename from keras/layers/rnn/gru.py rename to keras/src/layers/rnn/gru.py index d1b03e256cb0..f489bd6638fc 100644 --- a/keras/layers/rnn/gru.py +++ b/keras/src/layers/rnn/gru.py @@ -1,15 +1,15 @@ -from keras import activations -from keras import backend -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras import tree -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell -from keras.layers.rnn.rnn import RNN +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN @keras_export("keras.layers.GRUCell") diff --git a/keras/layers/rnn/gru_test.py b/keras/src/layers/rnn/gru_test.py similarity index 99% rename from keras/layers/rnn/gru_test.py rename to keras/src/layers/rnn/gru_test.py index 14f5248d78b0..529220803ddf 100644 --- a/keras/layers/rnn/gru_test.py +++ b/keras/src/layers/rnn/gru_test.py @@ -2,9 +2,9 @@ import pytest from absl.testing import parameterized -from keras import initializers -from keras import layers -from keras import testing +from keras.src import initializers +from keras.src import layers +from keras.src import testing class GRUTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/rnn/lstm.py b/keras/src/layers/rnn/lstm.py similarity index 98% rename from keras/layers/rnn/lstm.py rename to keras/src/layers/rnn/lstm.py index 13c32278418f..33055fd197ec 100644 --- a/keras/layers/rnn/lstm.py +++ b/keras/src/layers/rnn/lstm.py @@ -1,15 +1,15 @@ -from keras import activations -from keras import backend -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras import tree -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell -from keras.layers.rnn.rnn import RNN +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN @keras_export("keras.layers.LSTMCell") diff --git a/keras/layers/rnn/lstm_test.py b/keras/src/layers/rnn/lstm_test.py similarity index 99% rename from keras/layers/rnn/lstm_test.py rename to keras/src/layers/rnn/lstm_test.py index 8811cc760160..bdf262d66ac4 100644 --- a/keras/layers/rnn/lstm_test.py +++ b/keras/src/layers/rnn/lstm_test.py @@ -2,9 +2,9 @@ import pytest from absl.testing import parameterized -from keras import initializers -from keras import layers -from keras import testing +from keras.src import initializers +from keras.src import layers +from keras.src import testing class LSTMTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/layers/rnn/rnn.py b/keras/src/layers/rnn/rnn.py similarity index 97% rename from keras/layers/rnn/rnn.py rename to keras/src/layers/rnn/rnn.py index 6880ab26488a..a8a55718f0d7 100644 --- a/keras/layers/rnn/rnn.py +++ b/keras/src/layers/rnn/rnn.py @@ -1,12 +1,12 @@ -from keras import backend -from keras import ops -from keras import tree -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell -from keras.layers.rnn.stacked_rnn_cells import StackedRNNCells -from keras.saving import serialization_lib -from keras.utils import tracking +from keras.src import backend +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.src.saving import serialization_lib +from keras.src.utils import tracking @keras_export("keras.layers.RNN") @@ -133,8 +133,8 @@ class RNN(Layer): Examples: ```python - from keras.layers import RNN - from keras import ops + from keras.src.layers import RNN + from keras.src import ops # First, let's define a RNN Cell, as a layer subclass. class MinimalRNNCell(keras.layers.Layer): diff --git a/keras/layers/rnn/rnn_test.py b/keras/src/layers/rnn/rnn_test.py similarity index 99% rename from keras/layers/rnn/rnn_test.py rename to keras/src/layers/rnn/rnn_test.py index dcfa075d75ff..f5e5a34efabe 100644 --- a/keras/layers/rnn/rnn_test.py +++ b/keras/src/layers/rnn/rnn_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import layers -from keras import ops -from keras import testing +from keras.src import layers +from keras.src import ops +from keras.src import testing class OneStateRNNCell(layers.Layer): diff --git a/keras/layers/rnn/simple_rnn.py b/keras/src/layers/rnn/simple_rnn.py similarity index 97% rename from keras/layers/rnn/simple_rnn.py rename to keras/src/layers/rnn/simple_rnn.py index a3e263069fda..79105f1539ea 100644 --- a/keras/layers/rnn/simple_rnn.py +++ b/keras/src/layers/rnn/simple_rnn.py @@ -1,14 +1,14 @@ -from keras import activations -from keras import backend -from keras import constraints -from keras import initializers -from keras import ops -from keras import regularizers -from keras.api_export import keras_export -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell -from keras.layers.rnn.rnn import RNN +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN @keras_export("keras.layers.SimpleRNNCell") diff --git a/keras/layers/rnn/simple_rnn_test.py b/keras/src/layers/rnn/simple_rnn_test.py similarity index 98% rename from keras/layers/rnn/simple_rnn_test.py rename to keras/src/layers/rnn/simple_rnn_test.py index 19bf67d3275e..8493bdbee8a8 100644 --- a/keras/layers/rnn/simple_rnn_test.py +++ b/keras/src/layers/rnn/simple_rnn_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import initializers -from keras import layers -from keras import testing +from keras.src import initializers +from keras.src import layers +from keras.src import testing class SimpleRNNTest(testing.TestCase): diff --git a/keras/layers/rnn/stacked_rnn_cells.py b/keras/src/layers/rnn/stacked_rnn_cells.py similarity index 96% rename from keras/layers/rnn/stacked_rnn_cells.py rename to keras/src/layers/rnn/stacked_rnn_cells.py index 20502d4655c7..a3e1b601d4c7 100644 --- a/keras/layers/rnn/stacked_rnn_cells.py +++ b/keras/src/layers/rnn/stacked_rnn_cells.py @@ -1,8 +1,8 @@ -from keras import ops -from keras import tree -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.saving import serialization_lib +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib @keras_export("keras.layers.StackedRNNCells") diff --git a/keras/layers/rnn/stacked_rnn_cells_test.py b/keras/src/layers/rnn/stacked_rnn_cells_test.py similarity index 98% rename from keras/layers/rnn/stacked_rnn_cells_test.py rename to keras/src/layers/rnn/stacked_rnn_cells_test.py index c3eff69b82ac..15d2d1d6054c 100644 --- a/keras/layers/rnn/stacked_rnn_cells_test.py +++ b/keras/src/layers/rnn/stacked_rnn_cells_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras import layers -from keras import testing -from keras.layers.rnn.rnn_test import OneStateRNNCell -from keras.layers.rnn.rnn_test import TwoStatesRNNCell +from keras.src import layers +from keras.src import testing +from keras.src.layers.rnn.rnn_test import OneStateRNNCell +from keras.src.layers.rnn.rnn_test import TwoStatesRNNCell class StackedRNNTest(testing.TestCase): diff --git a/keras/layers/rnn/time_distributed.py b/keras/src/layers/rnn/time_distributed.py similarity index 95% rename from keras/layers/rnn/time_distributed.py rename to keras/src/layers/rnn/time_distributed.py index d1cc613ebd73..cac954a4f11b 100644 --- a/keras/layers/rnn/time_distributed.py +++ b/keras/src/layers/rnn/time_distributed.py @@ -1,10 +1,10 @@ """Wrapper layer to apply every temporal slice of an input.""" -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.layers.core.wrapper import Wrapper -from keras.layers.layer import Layer +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.layer import Layer @keras_export("keras.layers.TimeDistributed") diff --git a/keras/layers/rnn/time_distributed_test.py b/keras/src/layers/rnn/time_distributed_test.py similarity index 94% rename from keras/layers/rnn/time_distributed_test.py rename to keras/src/layers/rnn/time_distributed_test.py index cf0793bb7f84..f2ad37e9d110 100644 --- a/keras/layers/rnn/time_distributed_test.py +++ b/keras/src/layers/rnn/time_distributed_test.py @@ -1,11 +1,11 @@ import numpy as np import pytest -from keras import backend -from keras import initializers -from keras import layers -from keras import ops -from keras import testing +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src import ops +from keras.src import testing class TimeDistributedTest(testing.TestCase): diff --git a/keras/legacy/__init__.py b/keras/src/legacy/__init__.py similarity index 100% rename from keras/legacy/__init__.py rename to keras/src/legacy/__init__.py diff --git a/keras/legacy/backend.py b/keras/src/legacy/backend.py similarity index 99% rename from keras/legacy/backend.py rename to keras/src/legacy/backend.py index a6256be0b852..dbb933112ad4 100644 --- a/keras/legacy/backend.py +++ b/keras/src/legacy/backend.py @@ -4,9 +4,9 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.utils.module_utils import tensorflow as tf py_any = any py_all = all diff --git a/keras/legacy/layers.py b/keras/src/legacy/layers.py similarity index 97% rename from keras/legacy/layers.py rename to keras/src/legacy/layers.py index 2ceaa151f6fc..97a369cb6480 100644 --- a/keras/legacy/layers.py +++ b/keras/src/legacy/layers.py @@ -6,10 +6,10 @@ ThresholdedReLU """ -from keras import backend -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras._legacy.layers.AlphaDropout") diff --git a/keras/legacy/losses.py b/keras/src/legacy/losses.py similarity index 91% rename from keras/legacy/losses.py rename to keras/src/legacy/losses.py index f0cd872bca55..a84284bfc38d 100644 --- a/keras/legacy/losses.py +++ b/keras/src/legacy/losses.py @@ -1,4 +1,4 @@ -from keras.api_export import keras_export +from keras.src.api_export import keras_export @keras_export("keras._legacy.losses.Reduction") diff --git a/keras/legacy/preprocessing/__init__.py b/keras/src/legacy/preprocessing/__init__.py similarity index 100% rename from keras/legacy/preprocessing/__init__.py rename to keras/src/legacy/preprocessing/__init__.py diff --git a/keras/legacy/preprocessing/image.py b/keras/src/legacy/preprocessing/image.py similarity index 99% rename from keras/legacy/preprocessing/image.py rename to keras/src/legacy/preprocessing/image.py index 4e9f74eef3d5..4a0e8b44d395 100644 --- a/keras/legacy/preprocessing/image.py +++ b/keras/src/legacy/preprocessing/image.py @@ -8,12 +8,12 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.trainers.data_adapters.py_dataset_adapter import PyDataset -from keras.utils import image_utils -from keras.utils import io_utils -from keras.utils.module_utils import scipy +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset +from keras.src.utils import image_utils +from keras.src.utils import io_utils +from keras.src.utils.module_utils import scipy @keras_export("keras._legacy.preprocessing.image.Iterator") diff --git a/keras/legacy/preprocessing/sequence.py b/keras/src/legacy/preprocessing/sequence.py similarity index 98% rename from keras/legacy/preprocessing/sequence.py rename to keras/src/legacy/preprocessing/sequence.py index f9f52fa802f2..1d0f360c50c7 100644 --- a/keras/legacy/preprocessing/sequence.py +++ b/keras/src/legacy/preprocessing/sequence.py @@ -5,8 +5,8 @@ import numpy as np -from keras.api_export import keras_export -from keras.trainers.data_adapters.py_dataset_adapter import PyDataset +from keras.src.api_export import keras_export +from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset @keras_export("keras._legacy.preprocessing.sequence.TimeseriesGenerator") diff --git a/keras/legacy/preprocessing/text.py b/keras/src/legacy/preprocessing/text.py similarity index 99% rename from keras/legacy/preprocessing/text.py rename to keras/src/legacy/preprocessing/text.py index 8f8a31bfa4fc..83946bfc03e1 100644 --- a/keras/legacy/preprocessing/text.py +++ b/keras/src/legacy/preprocessing/text.py @@ -7,7 +7,7 @@ import numpy as np -from keras.api_export import keras_export +from keras.src.api_export import keras_export @keras_export("keras._legacy.preprocessing.text.text_to_word_sequence") diff --git a/keras/legacy/saving/__init__.py b/keras/src/legacy/saving/__init__.py similarity index 100% rename from keras/legacy/saving/__init__.py rename to keras/src/legacy/saving/__init__.py diff --git a/keras/legacy/saving/json_utils.py b/keras/src/legacy/saving/json_utils.py similarity index 97% rename from keras/legacy/saving/json_utils.py rename to keras/src/legacy/saving/json_utils.py index 0a7f6254fdc5..0dbc578d25ab 100644 --- a/keras/legacy/saving/json_utils.py +++ b/keras/src/legacy/saving/json_utils.py @@ -7,9 +7,9 @@ import numpy as np -from keras.legacy.saving import serialization -from keras.saving import serialization_lib -from keras.utils.module_utils import tensorflow as tf +from keras.src.legacy.saving import serialization +from keras.src.saving import serialization_lib +from keras.src.utils.module_utils import tensorflow as tf _EXTENSION_TYPE_SPEC = "_EXTENSION_TYPE_SPEC" diff --git a/keras/legacy/saving/json_utils_test.py b/keras/src/legacy/saving/json_utils_test.py similarity index 96% rename from keras/legacy/saving/json_utils_test.py rename to keras/src/legacy/saving/json_utils_test.py index 8d82f52a40f7..3eca485bedc0 100644 --- a/keras/legacy/saving/json_utils_test.py +++ b/keras/src/legacy/saving/json_utils_test.py @@ -2,9 +2,9 @@ import pytest -from keras import backend -from keras import testing -from keras.legacy.saving import json_utils +from keras.src import backend +from keras.src import testing +from keras.src.legacy.saving import json_utils if backend.backend() == "tensorflow": import tensorflow as tf diff --git a/keras/legacy/saving/legacy_h5_format.py b/keras/src/legacy/saving/legacy_h5_format.py similarity index 98% rename from keras/legacy/saving/legacy_h5_format.py rename to keras/src/legacy/saving/legacy_h5_format.py index 1c4e897a42ea..05f7ceca22be 100644 --- a/keras/legacy/saving/legacy_h5_format.py +++ b/keras/src/legacy/saving/legacy_h5_format.py @@ -5,14 +5,14 @@ import numpy as np from absl import logging -from keras import backend -from keras import optimizers -from keras.backend.common import global_state -from keras.legacy.saving import json_utils -from keras.legacy.saving import saving_options -from keras.legacy.saving import saving_utils -from keras.saving import object_registration -from keras.utils import io_utils +from keras.src import backend +from keras.src import optimizers +from keras.src.backend.common import global_state +from keras.src.legacy.saving import json_utils +from keras.src.legacy.saving import saving_options +from keras.src.legacy.saving import saving_utils +from keras.src.saving import object_registration +from keras.src.utils import io_utils try: import h5py @@ -200,7 +200,7 @@ def save_weights_to_hdf5_group(f, model): f: HDF5 group. model: Model instance. """ - from keras import __version__ as keras_version + from keras.src import __version__ as keras_version save_attributes_to_hdf5_group( f, "layer_names", [layer.name.encode("utf8") for layer in model.layers] diff --git a/keras/legacy/saving/legacy_h5_format_test.py b/keras/src/legacy/saving/legacy_h5_format_test.py similarity index 98% rename from keras/legacy/saving/legacy_h5_format_test.py rename to keras/src/legacy/saving/legacy_h5_format_test.py index c4e48e5eb07d..225b06f9ba44 100644 --- a/keras/legacy/saving/legacy_h5_format_test.py +++ b/keras/src/legacy/saving/legacy_h5_format_test.py @@ -4,13 +4,13 @@ import pytest import keras -from keras import layers -from keras import models -from keras import ops -from keras import testing -from keras.legacy.saving import legacy_h5_format -from keras.saving import object_registration -from keras.saving import serialization_lib +from keras.src import layers +from keras.src import models +from keras.src import ops +from keras.src import testing +from keras.src.legacy.saving import legacy_h5_format +from keras.src.saving import object_registration +from keras.src.saving import serialization_lib # TODO: more thorough testing. Correctness depends # on exact weight ordering for each layer, so we need @@ -233,7 +233,7 @@ class RegisteredSubLayer(layers.Layer): ) model = models.Sequential([layer]) with self.subTest("test_JSON"): - from keras.models.model import model_from_json + from keras.src.models.model import model_from_json model_json = model.to_json() self.assertIn("Foo>RegisteredSubLayer", model_json) diff --git a/keras/legacy/saving/saving_options.py b/keras/src/legacy/saving/saving_options.py similarity index 89% rename from keras/legacy/saving/saving_options.py rename to keras/src/legacy/saving/saving_options.py index 30ba5fb464e1..6f270fb23290 100644 --- a/keras/legacy/saving/saving_options.py +++ b/keras/src/legacy/saving/saving_options.py @@ -1,6 +1,6 @@ import contextlib -from keras.backend.common import global_state +from keras.src.backend.common import global_state @contextlib.contextmanager diff --git a/keras/legacy/saving/saving_utils.py b/keras/src/legacy/saving/saving_utils.py similarity index 95% rename from keras/legacy/saving/saving_utils.py rename to keras/src/legacy/saving/saving_utils.py index 69e3553bdf15..aec107802138 100644 --- a/keras/legacy/saving/saving_utils.py +++ b/keras/src/legacy/saving/saving_utils.py @@ -3,15 +3,15 @@ from absl import logging -from keras import backend -from keras import layers -from keras import losses -from keras import metrics as metrics_module -from keras import models -from keras import optimizers -from keras import tree -from keras.legacy.saving import serialization -from keras.saving import object_registration +from keras.src import backend +from keras.src import layers +from keras.src import losses +from keras.src import metrics as metrics_module +from keras.src import models +from keras.src import optimizers +from keras.src import tree +from keras.src.legacy.saving import serialization +from keras.src.saving import object_registration MODULE_OBJECTS = threading.local() @@ -92,7 +92,7 @@ def model_from_config(config, custom_objects=None): def model_metadata(model, include_optimizer=True, require_config=True): """Returns a dictionary containing the model metadata.""" - from keras import __version__ as keras_version + from keras.src import __version__ as keras_version model_config = {"class_name": model.__class__.__name__} try: diff --git a/keras/legacy/saving/serialization.py b/keras/src/legacy/saving/serialization.py similarity index 99% rename from keras/legacy/saving/serialization.py rename to keras/src/legacy/saving/serialization.py index 98640c68ff59..7fa7eb44c507 100644 --- a/keras/legacy/saving/serialization.py +++ b/keras/src/legacy/saving/serialization.py @@ -7,8 +7,8 @@ import weakref # isort: off -from keras.api_export import keras_export -from keras.saving import object_registration +from keras.src.api_export import keras_export +from keras.src.saving import object_registration # Flag that determines whether to skip the NotImplementedError when calling # get_config in custom models and layers. This is only enabled when saving to diff --git a/keras/losses/__init__.py b/keras/src/losses/__init__.py similarity index 64% rename from keras/losses/__init__.py rename to keras/src/losses/__init__.py index e2cc81d49cef..9652ceb057bf 100644 --- a/keras/losses/__init__.py +++ b/keras/src/losses/__init__.py @@ -1,48 +1,48 @@ import inspect -from keras.api_export import keras_export -from keras.losses.loss import Loss -from keras.losses.losses import BinaryCrossentropy -from keras.losses.losses import BinaryFocalCrossentropy -from keras.losses.losses import CategoricalCrossentropy -from keras.losses.losses import CategoricalFocalCrossentropy -from keras.losses.losses import CategoricalHinge -from keras.losses.losses import CosineSimilarity -from keras.losses.losses import Dice -from keras.losses.losses import Hinge -from keras.losses.losses import Huber -from keras.losses.losses import KLDivergence -from keras.losses.losses import LogCosh -from keras.losses.losses import LossFunctionWrapper -from keras.losses.losses import MeanAbsoluteError -from keras.losses.losses import MeanAbsolutePercentageError -from keras.losses.losses import MeanSquaredError -from keras.losses.losses import MeanSquaredLogarithmicError -from keras.losses.losses import Poisson -from keras.losses.losses import SparseCategoricalCrossentropy -from keras.losses.losses import SquaredHinge -from keras.losses.losses import Tversky -from keras.losses.losses import binary_crossentropy -from keras.losses.losses import binary_focal_crossentropy -from keras.losses.losses import categorical_crossentropy -from keras.losses.losses import categorical_focal_crossentropy -from keras.losses.losses import categorical_hinge -from keras.losses.losses import cosine_similarity -from keras.losses.losses import ctc -from keras.losses.losses import dice -from keras.losses.losses import hinge -from keras.losses.losses import huber -from keras.losses.losses import kl_divergence -from keras.losses.losses import log_cosh -from keras.losses.losses import mean_absolute_error -from keras.losses.losses import mean_absolute_percentage_error -from keras.losses.losses import mean_squared_error -from keras.losses.losses import mean_squared_logarithmic_error -from keras.losses.losses import poisson -from keras.losses.losses import sparse_categorical_crossentropy -from keras.losses.losses import squared_hinge -from keras.losses.losses import tversky -from keras.saving import serialization_lib +from keras.src.api_export import keras_export +from keras.src.losses.loss import Loss +from keras.src.losses.losses import BinaryCrossentropy +from keras.src.losses.losses import BinaryFocalCrossentropy +from keras.src.losses.losses import CategoricalCrossentropy +from keras.src.losses.losses import CategoricalFocalCrossentropy +from keras.src.losses.losses import CategoricalHinge +from keras.src.losses.losses import CosineSimilarity +from keras.src.losses.losses import Dice +from keras.src.losses.losses import Hinge +from keras.src.losses.losses import Huber +from keras.src.losses.losses import KLDivergence +from keras.src.losses.losses import LogCosh +from keras.src.losses.losses import LossFunctionWrapper +from keras.src.losses.losses import MeanAbsoluteError +from keras.src.losses.losses import MeanAbsolutePercentageError +from keras.src.losses.losses import MeanSquaredError +from keras.src.losses.losses import MeanSquaredLogarithmicError +from keras.src.losses.losses import Poisson +from keras.src.losses.losses import SparseCategoricalCrossentropy +from keras.src.losses.losses import SquaredHinge +from keras.src.losses.losses import Tversky +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import cosine_similarity +from keras.src.losses.losses import ctc +from keras.src.losses.losses import dice +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.losses.losses import tversky +from keras.src.saving import serialization_lib ALL_OBJECTS = { # Base diff --git a/keras/losses/loss.py b/keras/src/losses/loss.py similarity index 97% rename from keras/losses/loss.py rename to keras/src/losses/loss.py index ad432e62dc37..ba4c78ebc5a5 100644 --- a/keras/losses/loss.py +++ b/keras/src/losses/loss.py @@ -1,8 +1,8 @@ -from keras import backend -from keras import ops -from keras import tree -from keras.api_export import keras_export -from keras.utils.naming import auto_name +from keras.src import backend +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.utils.naming import auto_name @keras_export(["keras.Loss", "keras.losses.Loss"]) diff --git a/keras/losses/loss_test.py b/keras/src/losses/loss_test.py similarity index 97% rename from keras/losses/loss_test.py rename to keras/src/losses/loss_test.py index 929785fcc420..1d5725ffd3a1 100644 --- a/keras/losses/loss_test.py +++ b/keras/src/losses/loss_test.py @@ -1,12 +1,12 @@ import numpy as np import pytest -from keras import backend -from keras import losses as losses_module -from keras import ops -from keras import testing -from keras.losses.loss import Loss -from keras.losses.loss import squeeze_or_expand_to_same_rank +from keras.src import backend +from keras.src import losses as losses_module +from keras.src import ops +from keras.src import testing +from keras.src.losses.loss import Loss +from keras.src.losses.loss import squeeze_or_expand_to_same_rank class ExampleLoss(Loss): diff --git a/keras/losses/losses.py b/keras/src/losses/losses.py similarity index 99% rename from keras/losses/losses.py rename to keras/src/losses/losses.py index ef0a2229dafc..f3f997616a00 100644 --- a/keras/losses/losses.py +++ b/keras/src/losses/losses.py @@ -1,12 +1,12 @@ import warnings -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.losses.loss import Loss -from keras.losses.loss import squeeze_or_expand_to_same_rank -from keras.saving import serialization_lib -from keras.utils.numerical_utils import normalize +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.losses.loss import Loss +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.saving import serialization_lib +from keras.src.utils.numerical_utils import normalize class LossFunctionWrapper(Loss): diff --git a/keras/losses/losses_test.py b/keras/src/losses/losses_test.py similarity index 99% rename from keras/losses/losses_test.py rename to keras/src/losses/losses_test.py index 59af6abf9c1e..b97a8a253c3e 100644 --- a/keras/losses/losses_test.py +++ b/keras/src/losses/losses_test.py @@ -1,9 +1,9 @@ import numpy as np import pytest -from keras import backend -from keras import testing -from keras.losses import losses +from keras.src import backend +from keras.src import testing +from keras.src.losses import losses class MeanSquaredErrorTest(testing.TestCase): diff --git a/keras/metrics/__init__.py b/keras/src/metrics/__init__.py similarity index 59% rename from keras/metrics/__init__.py rename to keras/src/metrics/__init__.py index 02e0f76dfcbe..fd5e89069770 100644 --- a/keras/metrics/__init__.py +++ b/keras/src/metrics/__init__.py @@ -1,52 +1,54 @@ import inspect -from keras.api_export import keras_export -from keras.metrics.accuracy_metrics import Accuracy -from keras.metrics.accuracy_metrics import BinaryAccuracy -from keras.metrics.accuracy_metrics import CategoricalAccuracy -from keras.metrics.accuracy_metrics import SparseCategoricalAccuracy -from keras.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy -from keras.metrics.accuracy_metrics import TopKCategoricalAccuracy -from keras.metrics.confusion_metrics import AUC -from keras.metrics.confusion_metrics import FalseNegatives -from keras.metrics.confusion_metrics import FalsePositives -from keras.metrics.confusion_metrics import Precision -from keras.metrics.confusion_metrics import PrecisionAtRecall -from keras.metrics.confusion_metrics import Recall -from keras.metrics.confusion_metrics import RecallAtPrecision -from keras.metrics.confusion_metrics import SensitivityAtSpecificity -from keras.metrics.confusion_metrics import SpecificityAtSensitivity -from keras.metrics.confusion_metrics import TrueNegatives -from keras.metrics.confusion_metrics import TruePositives -from keras.metrics.f_score_metrics import F1Score -from keras.metrics.f_score_metrics import FBetaScore -from keras.metrics.hinge_metrics import CategoricalHinge -from keras.metrics.hinge_metrics import Hinge -from keras.metrics.hinge_metrics import SquaredHinge -from keras.metrics.iou_metrics import BinaryIoU -from keras.metrics.iou_metrics import IoU -from keras.metrics.iou_metrics import MeanIoU -from keras.metrics.iou_metrics import OneHotIoU -from keras.metrics.iou_metrics import OneHotMeanIoU -from keras.metrics.metric import Metric -from keras.metrics.probabilistic_metrics import BinaryCrossentropy -from keras.metrics.probabilistic_metrics import CategoricalCrossentropy -from keras.metrics.probabilistic_metrics import KLDivergence -from keras.metrics.probabilistic_metrics import Poisson -from keras.metrics.probabilistic_metrics import SparseCategoricalCrossentropy -from keras.metrics.reduction_metrics import Mean -from keras.metrics.reduction_metrics import MeanMetricWrapper -from keras.metrics.reduction_metrics import Sum -from keras.metrics.regression_metrics import CosineSimilarity -from keras.metrics.regression_metrics import LogCoshError -from keras.metrics.regression_metrics import MeanAbsoluteError -from keras.metrics.regression_metrics import MeanAbsolutePercentageError -from keras.metrics.regression_metrics import MeanSquaredError -from keras.metrics.regression_metrics import MeanSquaredLogarithmicError -from keras.metrics.regression_metrics import R2Score -from keras.metrics.regression_metrics import RootMeanSquaredError -from keras.saving import serialization_lib -from keras.utils.naming import to_snake_case +from keras.src.api_export import keras_export +from keras.src.metrics.accuracy_metrics import Accuracy +from keras.src.metrics.accuracy_metrics import BinaryAccuracy +from keras.src.metrics.accuracy_metrics import CategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy +from keras.src.metrics.confusion_metrics import AUC +from keras.src.metrics.confusion_metrics import FalseNegatives +from keras.src.metrics.confusion_metrics import FalsePositives +from keras.src.metrics.confusion_metrics import Precision +from keras.src.metrics.confusion_metrics import PrecisionAtRecall +from keras.src.metrics.confusion_metrics import Recall +from keras.src.metrics.confusion_metrics import RecallAtPrecision +from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity +from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity +from keras.src.metrics.confusion_metrics import TrueNegatives +from keras.src.metrics.confusion_metrics import TruePositives +from keras.src.metrics.f_score_metrics import F1Score +from keras.src.metrics.f_score_metrics import FBetaScore +from keras.src.metrics.hinge_metrics import CategoricalHinge +from keras.src.metrics.hinge_metrics import Hinge +from keras.src.metrics.hinge_metrics import SquaredHinge +from keras.src.metrics.iou_metrics import BinaryIoU +from keras.src.metrics.iou_metrics import IoU +from keras.src.metrics.iou_metrics import MeanIoU +from keras.src.metrics.iou_metrics import OneHotIoU +from keras.src.metrics.iou_metrics import OneHotMeanIoU +from keras.src.metrics.metric import Metric +from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy +from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy +from keras.src.metrics.probabilistic_metrics import KLDivergence +from keras.src.metrics.probabilistic_metrics import Poisson +from keras.src.metrics.probabilistic_metrics import ( + SparseCategoricalCrossentropy, +) +from keras.src.metrics.reduction_metrics import Mean +from keras.src.metrics.reduction_metrics import MeanMetricWrapper +from keras.src.metrics.reduction_metrics import Sum +from keras.src.metrics.regression_metrics import CosineSimilarity +from keras.src.metrics.regression_metrics import LogCoshError +from keras.src.metrics.regression_metrics import MeanAbsoluteError +from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError +from keras.src.metrics.regression_metrics import MeanSquaredError +from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError +from keras.src.metrics.regression_metrics import R2Score +from keras.src.metrics.regression_metrics import RootMeanSquaredError +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case ALL_OBJECTS = { # Base diff --git a/keras/metrics/accuracy_metrics.py b/keras/src/metrics/accuracy_metrics.py similarity index 98% rename from keras/metrics/accuracy_metrics.py rename to keras/src/metrics/accuracy_metrics.py index 330c09ce775e..53a9b207cfdf 100644 --- a/keras/metrics/accuracy_metrics.py +++ b/keras/src/metrics/accuracy_metrics.py @@ -1,8 +1,8 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.losses.loss import squeeze_or_expand_to_same_rank -from keras.metrics import reduction_metrics +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.metrics import reduction_metrics def accuracy(y_true, y_pred): diff --git a/keras/metrics/accuracy_metrics_test.py b/keras/src/metrics/accuracy_metrics_test.py similarity index 99% rename from keras/metrics/accuracy_metrics_test.py rename to keras/src/metrics/accuracy_metrics_test.py index 06207e3e8114..a2adc1d4f23b 100644 --- a/keras/metrics/accuracy_metrics_test.py +++ b/keras/src/metrics/accuracy_metrics_test.py @@ -2,8 +2,8 @@ import numpy as np -from keras import testing -from keras.metrics import accuracy_metrics +from keras.src import testing +from keras.src.metrics import accuracy_metrics class AccuracyTest(testing.TestCase): diff --git a/keras/metrics/confusion_metrics.py b/keras/src/metrics/confusion_metrics.py similarity index 99% rename from keras/metrics/confusion_metrics.py rename to keras/src/metrics/confusion_metrics.py index 18791977d463..175932e47127 100644 --- a/keras/metrics/confusion_metrics.py +++ b/keras/src/metrics/confusion_metrics.py @@ -1,13 +1,13 @@ import numpy as np -from keras import activations -from keras import backend -from keras import initializers -from keras import ops -from keras.api_export import keras_export -from keras.metrics import metrics_utils -from keras.metrics.metric import Metric -from keras.utils.python_utils import to_list +from keras.src import activations +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics import metrics_utils +from keras.src.metrics.metric import Metric +from keras.src.utils.python_utils import to_list class _ConfusionMatrixConditionCount(Metric): diff --git a/keras/metrics/confusion_metrics_test.py b/keras/src/metrics/confusion_metrics_test.py similarity index 99% rename from keras/metrics/confusion_metrics_test.py rename to keras/src/metrics/confusion_metrics_test.py index 849aef9d6a2b..ba5934cc342f 100644 --- a/keras/metrics/confusion_metrics_test.py +++ b/keras/src/metrics/confusion_metrics_test.py @@ -5,12 +5,12 @@ from absl import logging from absl.testing import parameterized -from keras import layers -from keras import metrics -from keras import models -from keras import ops -from keras import testing -from keras.metrics import metrics_utils +from keras.src import layers +from keras.src import metrics +from keras.src import models +from keras.src import ops +from keras.src import testing +from keras.src.metrics import metrics_utils class FalsePositivesTest(testing.TestCase): diff --git a/keras/metrics/f_score_metrics.py b/keras/src/metrics/f_score_metrics.py similarity index 98% rename from keras/metrics/f_score_metrics.py rename to keras/src/metrics/f_score_metrics.py index eda9c2576593..a51119cb48e4 100644 --- a/keras/metrics/f_score_metrics.py +++ b/keras/src/metrics/f_score_metrics.py @@ -1,8 +1,8 @@ -from keras import backend -from keras import initializers -from keras import ops -from keras.api_export import keras_export -from keras.metrics.metric import Metric +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics.metric import Metric @keras_export("keras.metrics.FBetaScore") diff --git a/keras/metrics/f_score_metrics_test.py b/keras/src/metrics/f_score_metrics_test.py similarity index 99% rename from keras/metrics/f_score_metrics_test.py rename to keras/src/metrics/f_score_metrics_test.py index c5b700664cdf..baf7acced00b 100644 --- a/keras/metrics/f_score_metrics_test.py +++ b/keras/src/metrics/f_score_metrics_test.py @@ -1,8 +1,8 @@ import numpy as np from absl.testing import parameterized -from keras import testing -from keras.metrics import f_score_metrics +from keras.src import testing +from keras.src.metrics import f_score_metrics class FBetaScoreTest(parameterized.TestCase, testing.TestCase): diff --git a/keras/metrics/hinge_metrics.py b/keras/src/metrics/hinge_metrics.py similarity index 92% rename from keras/metrics/hinge_metrics.py rename to keras/src/metrics/hinge_metrics.py index 8ca0530ce2e7..4678b3fa1718 100644 --- a/keras/metrics/hinge_metrics.py +++ b/keras/src/metrics/hinge_metrics.py @@ -1,8 +1,8 @@ -from keras.api_export import keras_export -from keras.losses.losses import categorical_hinge -from keras.losses.losses import hinge -from keras.losses.losses import squared_hinge -from keras.metrics import reduction_metrics +from keras.src.api_export import keras_export +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import hinge +from keras.src.losses.losses import squared_hinge +from keras.src.metrics import reduction_metrics @keras_export("keras.metrics.Hinge") diff --git a/keras/metrics/hinge_metrics_test.py b/keras/src/metrics/hinge_metrics_test.py similarity index 98% rename from keras/metrics/hinge_metrics_test.py rename to keras/src/metrics/hinge_metrics_test.py index 6e217d33ea35..26d67b98ee6d 100644 --- a/keras/metrics/hinge_metrics_test.py +++ b/keras/src/metrics/hinge_metrics_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import testing -from keras.metrics import hinge_metrics +from keras.src import testing +from keras.src.metrics import hinge_metrics class HingeTest(testing.TestCase): diff --git a/keras/metrics/iou_metrics.py b/keras/src/metrics/iou_metrics.py similarity index 99% rename from keras/metrics/iou_metrics.py rename to keras/src/metrics/iou_metrics.py index e8dd9594b5d7..f39222ede85c 100644 --- a/keras/metrics/iou_metrics.py +++ b/keras/src/metrics/iou_metrics.py @@ -1,9 +1,9 @@ -from keras import backend -from keras import initializers -from keras import ops -from keras.api_export import keras_export -from keras.metrics.metric import Metric -from keras.metrics.metrics_utils import confusion_matrix +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics.metric import Metric +from keras.src.metrics.metrics_utils import confusion_matrix class _IoUBase(Metric): diff --git a/keras/metrics/iou_metrics_test.py b/keras/src/metrics/iou_metrics_test.py similarity index 99% rename from keras/metrics/iou_metrics_test.py rename to keras/src/metrics/iou_metrics_test.py index 907af12ceac7..76887dfc0655 100644 --- a/keras/metrics/iou_metrics_test.py +++ b/keras/src/metrics/iou_metrics_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras import layers -from keras import models -from keras import testing -from keras.metrics import iou_metrics as metrics +from keras.src import layers +from keras.src import models +from keras.src import testing +from keras.src.metrics import iou_metrics as metrics class IoUTest(testing.TestCase): diff --git a/keras/metrics/metric.py b/keras/src/metrics/metric.py similarity index 97% rename from keras/metrics/metric.py rename to keras/src/metrics/metric.py index c4c43f8549f1..27f39f94d799 100644 --- a/keras/metrics/metric.py +++ b/keras/src/metrics/metric.py @@ -1,9 +1,9 @@ -from keras import backend -from keras import initializers -from keras import ops -from keras.api_export import keras_export -from keras.utils.naming import auto_name -from keras.utils.tracking import Tracker +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.utils.naming import auto_name +from keras.src.utils.tracking import Tracker @keras_export(["keras.Metric", "keras.metrics.Metric"]) diff --git a/keras/metrics/metric_test.py b/keras/src/metrics/metric_test.py similarity index 96% rename from keras/metrics/metric_test.py rename to keras/src/metrics/metric_test.py index 56d3737e597f..346e6140c89b 100644 --- a/keras/metrics/metric_test.py +++ b/keras/src/metrics/metric_test.py @@ -1,11 +1,11 @@ import numpy as np -from keras import backend -from keras import initializers -from keras import metrics as metrics_module -from keras import ops -from keras import testing -from keras.metrics.metric import Metric +from keras.src import backend +from keras.src import initializers +from keras.src import metrics as metrics_module +from keras.src import ops +from keras.src import testing +from keras.src.metrics.metric import Metric class ExampleMetric(Metric): diff --git a/keras/metrics/metrics_utils.py b/keras/src/metrics/metrics_utils.py similarity index 99% rename from keras/metrics/metrics_utils.py rename to keras/src/metrics/metrics_utils.py index 8018363277e3..32f02bbb3ccb 100644 --- a/keras/metrics/metrics_utils.py +++ b/keras/src/metrics/metrics_utils.py @@ -2,10 +2,10 @@ import numpy as np -from keras import backend -from keras import ops -from keras.losses.loss import squeeze_or_expand_to_same_rank -from keras.utils.python_utils import to_list +from keras.src import backend +from keras.src import ops +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.utils.python_utils import to_list NEG_INF = -1e10 diff --git a/keras/metrics/probabilistic_metrics.py b/keras/src/metrics/probabilistic_metrics.py similarity index 96% rename from keras/metrics/probabilistic_metrics.py rename to keras/src/metrics/probabilistic_metrics.py index b6f4551e0796..1abcd55623fc 100644 --- a/keras/metrics/probabilistic_metrics.py +++ b/keras/src/metrics/probabilistic_metrics.py @@ -1,10 +1,10 @@ -from keras.api_export import keras_export -from keras.losses.losses import binary_crossentropy -from keras.losses.losses import categorical_crossentropy -from keras.losses.losses import kl_divergence -from keras.losses.losses import poisson -from keras.losses.losses import sparse_categorical_crossentropy -from keras.metrics import reduction_metrics +from keras.src.api_export import keras_export +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.metrics import reduction_metrics @keras_export("keras.metrics.KLDivergence") diff --git a/keras/metrics/probabilistic_metrics_test.py b/keras/src/metrics/probabilistic_metrics_test.py similarity index 99% rename from keras/metrics/probabilistic_metrics_test.py rename to keras/src/metrics/probabilistic_metrics_test.py index 1ff76a846f99..0950b277893b 100644 --- a/keras/metrics/probabilistic_metrics_test.py +++ b/keras/src/metrics/probabilistic_metrics_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import metrics -from keras import testing +from keras.src import metrics +from keras.src import testing class KLDivergenceTest(testing.TestCase): diff --git a/keras/metrics/reduction_metrics.py b/keras/src/metrics/reduction_metrics.py similarity index 96% rename from keras/metrics/reduction_metrics.py rename to keras/src/metrics/reduction_metrics.py index 88c6af118e5a..90f45c008d96 100644 --- a/keras/metrics/reduction_metrics.py +++ b/keras/src/metrics/reduction_metrics.py @@ -1,10 +1,10 @@ -from keras import backend -from keras import initializers -from keras import losses -from keras import ops -from keras.api_export import keras_export -from keras.metrics.metric import Metric -from keras.saving import serialization_lib +from keras.src import backend +from keras.src import initializers +from keras.src import losses +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics.metric import Metric +from keras.src.saving import serialization_lib def reduce_to_samplewise_values(values, sample_weight, reduce_fn, dtype): diff --git a/keras/metrics/reduction_metrics_test.py b/keras/src/metrics/reduction_metrics_test.py similarity index 97% rename from keras/metrics/reduction_metrics_test.py rename to keras/src/metrics/reduction_metrics_test.py index 7f56fde5f04b..eb0443a6d85c 100644 --- a/keras/metrics/reduction_metrics_test.py +++ b/keras/src/metrics/reduction_metrics_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras import testing -from keras.metrics import reduction_metrics -from keras.saving import register_keras_serializable +from keras.src import testing +from keras.src.metrics import reduction_metrics +from keras.src.saving import register_keras_serializable class SumTest(testing.TestCase): diff --git a/keras/metrics/regression_metrics.py b/keras/src/metrics/regression_metrics.py similarity index 97% rename from keras/metrics/regression_metrics.py rename to keras/src/metrics/regression_metrics.py index 7997be11d901..220e87c20929 100644 --- a/keras/metrics/regression_metrics.py +++ b/keras/src/metrics/regression_metrics.py @@ -1,16 +1,16 @@ import warnings -from keras import initializers -from keras import ops -from keras.api_export import keras_export -from keras.losses.loss import squeeze_or_expand_to_same_rank -from keras.losses.losses import log_cosh -from keras.losses.losses import mean_absolute_error -from keras.losses.losses import mean_absolute_percentage_error -from keras.losses.losses import mean_squared_error -from keras.losses.losses import mean_squared_logarithmic_error -from keras.metrics import reduction_metrics -from keras.utils.numerical_utils import normalize +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.metrics import reduction_metrics +from keras.src.utils.numerical_utils import normalize @keras_export("keras.metrics.MeanSquaredError") diff --git a/keras/metrics/regression_metrics_test.py b/keras/src/metrics/regression_metrics_test.py similarity index 99% rename from keras/metrics/regression_metrics_test.py rename to keras/src/metrics/regression_metrics_test.py index 7a3dbc5b5cbe..e7276f58c70e 100644 --- a/keras/metrics/regression_metrics_test.py +++ b/keras/src/metrics/regression_metrics_test.py @@ -1,8 +1,8 @@ import numpy as np from absl.testing import parameterized -from keras import testing -from keras.metrics import regression_metrics as metrics +from keras.src import testing +from keras.src.metrics import regression_metrics as metrics class MeanSquaredErrorTest(testing.TestCase): diff --git a/keras/src/models/__init__.py b/keras/src/models/__init__.py new file mode 100644 index 000000000000..1f3f73c99961 --- /dev/null +++ b/keras/src/models/__init__.py @@ -0,0 +1,3 @@ +from keras.src.models.functional import Functional +from keras.src.models.model import Model +from keras.src.models.sequential import Sequential diff --git a/keras/models/cloning.py b/keras/src/models/cloning.py similarity index 96% rename from keras/models/cloning.py rename to keras/src/models/cloning.py index e17655dc582e..3875b3522a6e 100644 --- a/keras/models/cloning.py +++ b/keras/src/models/cloning.py @@ -1,13 +1,13 @@ -from keras import backend -from keras import tree -from keras import utils -from keras.api_export import keras_export -from keras.layers import Input -from keras.layers import InputLayer -from keras.models.functional import Functional -from keras.models.functional import functional_like_constructor -from keras.models.sequential import Sequential -from keras.saving import serialization_lib +from keras.src import backend +from keras.src import tree +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.layers import Input +from keras.src.layers import InputLayer +from keras.src.models.functional import Functional +from keras.src.models.functional import functional_like_constructor +from keras.src.models.sequential import Sequential +from keras.src.saving import serialization_lib @keras_export("keras.models.clone_model") diff --git a/keras/models/cloning_test.py b/keras/src/models/cloning_test.py similarity index 96% rename from keras/models/cloning_test.py rename to keras/src/models/cloning_test.py index 340856f33a89..d9a46ac29cfa 100644 --- a/keras/models/cloning_test.py +++ b/keras/src/models/cloning_test.py @@ -2,11 +2,11 @@ import pytest from absl.testing import parameterized -from keras import layers -from keras import models -from keras import testing -from keras import tree -from keras.models.cloning import clone_model +from keras.src import layers +from keras.src import models +from keras.src import testing +from keras.src import tree +from keras.src.models.cloning import clone_model def get_mlp_functional_model(shared_layers=False): diff --git a/keras/models/functional.py b/keras/src/models/functional.py similarity index 97% rename from keras/models/functional.py rename to keras/src/models/functional.py index de6c8d82a5f1..20e2266e00e6 100644 --- a/keras/models/functional.py +++ b/keras/src/models/functional.py @@ -3,24 +3,24 @@ import typing import warnings -from keras import backend -from keras import ops -from keras import tree -from keras.backend.common import global_state -from keras.layers.core.input_layer import Input -from keras.layers.core.input_layer import InputLayer -from keras.layers.input_spec import InputSpec -from keras.layers.layer import Layer -from keras.legacy.saving import saving_utils -from keras.legacy.saving import serialization as legacy_serialization -from keras.models.model import Model -from keras.ops.function import Function -from keras.ops.function import _build_map -from keras.ops.function import make_node_key -from keras.ops.node import KerasHistory -from keras.ops.node import Node -from keras.saving import serialization_lib -from keras.utils import tracking +from keras.src import backend +from keras.src import ops +from keras.src import tree +from keras.src.backend.common import global_state +from keras.src.layers.core.input_layer import Input +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.legacy.saving import saving_utils +from keras.src.legacy.saving import serialization as legacy_serialization +from keras.src.models.model import Model +from keras.src.ops.function import Function +from keras.src.ops.function import _build_map +from keras.src.ops.function import make_node_key +from keras.src.ops.node import KerasHistory +from keras.src.ops.node import Node +from keras.src.saving import serialization_lib +from keras.src.utils import tracking class Functional(Function, Model): diff --git a/keras/models/functional_test.py b/keras/src/models/functional_test.py similarity index 98% rename from keras/models/functional_test.py rename to keras/src/models/functional_test.py index 432cc3273772..d8acbcae4f3c 100644 --- a/keras/models/functional_test.py +++ b/keras/src/models/functional_test.py @@ -3,13 +3,13 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras import testing -from keras.layers.core.input_layer import Input -from keras.layers.input_spec import InputSpec -from keras.models import Functional -from keras.models import Model +from keras.src import backend +from keras.src import layers +from keras.src import testing +from keras.src.layers.core.input_layer import Input +from keras.src.layers.input_spec import InputSpec +from keras.src.models import Functional +from keras.src.models import Model class FunctionalTest(testing.TestCase): diff --git a/keras/models/model.py b/keras/src/models/model.py similarity index 94% rename from keras/models/model.py rename to keras/src/models/model.py index 65d915cba7bd..5678f34c6828 100644 --- a/keras/models/model.py +++ b/keras/src/models/model.py @@ -3,24 +3,26 @@ import typing import warnings -from keras import backend -from keras import utils -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.models.variable_mapping import map_trackable_variables -from keras.saving import saving_api -from keras.trainers import trainer as base_trainer -from keras.utils import summary_utils -from keras.utils import traceback_utils +from keras.src import backend +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.models.variable_mapping import map_trackable_variables +from keras.src.saving import saving_api +from keras.src.trainers import trainer as base_trainer +from keras.src.utils import summary_utils +from keras.src.utils import traceback_utils if backend.backend() == "tensorflow": - from keras.backend.tensorflow.trainer import TensorFlowTrainer as Trainer + from keras.src.backend.tensorflow.trainer import ( + TensorFlowTrainer as Trainer, + ) elif backend.backend() == "jax": - from keras.backend.jax.trainer import JAXTrainer as Trainer + from keras.src.backend.jax.trainer import JAXTrainer as Trainer elif backend.backend() == "torch": - from keras.backend.torch.trainer import TorchTrainer as Trainer + from keras.src.backend.torch.trainer import TorchTrainer as Trainer elif backend.backend() == "numpy": - from keras.backend.numpy.trainer import NumpyTrainer as Trainer + from keras.src.backend.numpy.trainer import NumpyTrainer as Trainer else: raise RuntimeError( f"Backend '{backend.backend()}' must implement the Trainer class." @@ -136,14 +138,14 @@ def call(self, inputs, training=False): def __new__(cls, *args, **kwargs): # Signature detection for usage of `Model` as a `Functional` if functional_init_arguments(args, kwargs) and cls == Model: - from keras.models import functional + from keras.src.models import functional return functional.Functional(*args, **kwargs) return typing.cast(Model, super().__new__(cls)) def __init__(self, *args, **kwargs): Trainer.__init__(self) - from keras.models import functional + from keras.src.models import functional # Signature detection for usage of a `Model` subclass # as a `Functional` subclass @@ -357,7 +359,7 @@ def quantize(self, mode): mode: The mode of the quantization. Only 'int8' is supported at this time. """ - from keras.dtype_policies import QUANTIZATION_MODES + from keras.src.dtype_policies import QUANTIZATION_MODES if not self.built: raise ValueError( @@ -442,7 +444,7 @@ def to_json(self, **kwargs): Returns: A JSON string. """ - from keras.saving import serialization_lib + from keras.src.saving import serialization_lib model_config = serialization_lib.serialize_keras_object(self) return json.dumps(model_config, **kwargs) @@ -481,13 +483,13 @@ def export(self, filepath, format="tf_saved_model"): use the lower-level `keras.export.ExportArchive` class. The `export()` method relies on `ExportArchive` internally. """ - from keras.export import export_lib + from keras.src.export import export_lib export_lib.export_model(self, filepath) @classmethod def from_config(cls, config, custom_objects=None): - from keras.models.functional import Functional + from keras.src.models.functional import Functional functional_config_keys = [ "name", @@ -510,7 +512,7 @@ def from_config(cls, config, custom_objects=None): if is_functional_config and revivable_as_functional: # Revive Functional model # (but not Functional subclasses with a custom __init__) - from keras.models.functional import functional_from_config + from keras.src.models.functional import functional_from_config return functional_from_config( cls, config, custom_objects=custom_objects @@ -567,7 +569,7 @@ def model_from_json(json_string, custom_objects=None): Returns: A Keras model instance (uncompiled). """ - from keras.saving import serialization_lib + from keras.src.saving import serialization_lib model_config = json.loads(json_string) return serialization_lib.deserialize_keras_object( @@ -585,7 +587,7 @@ def functional_init_arguments(args, kwargs): def inject_functional_model_class(cls): """Inject `Functional` into the hierarchy of this class if needed.""" - from keras.models import functional + from keras.src.models import functional if cls == Model: return functional.Functional diff --git a/keras/models/model_test.py b/keras/src/models/model_test.py similarity index 98% rename from keras/models/model_test.py rename to keras/src/models/model_test.py index 23f647b7d067..871fc4bff19e 100644 --- a/keras/models/model_test.py +++ b/keras/src/models/model_test.py @@ -2,13 +2,13 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import testing -from keras.layers.core.input_layer import Input -from keras.models.functional import Functional -from keras.models.model import Model -from keras.models.model import model_from_json +from keras.src import backend +from keras.src import layers +from keras.src import testing +from keras.src.layers.core.input_layer import Input +from keras.src.models.functional import Functional +from keras.src.models.model import Model +from keras.src.models.model import model_from_json def _get_model(): diff --git a/keras/models/sequential.py b/keras/src/models/sequential.py similarity index 96% rename from keras/models/sequential.py rename to keras/src/models/sequential.py index e094a5df5eee..ecdaa4058e8d 100644 --- a/keras/models/sequential.py +++ b/keras/src/models/sequential.py @@ -2,16 +2,16 @@ import inspect import typing -from keras import tree -from keras.api_export import keras_export -from keras.backend.common import global_state -from keras.layers.core.input_layer import InputLayer -from keras.layers.layer import Layer -from keras.legacy.saving import saving_utils -from keras.legacy.saving import serialization as legacy_serialization -from keras.models.functional import Functional -from keras.models.model import Model -from keras.saving import serialization_lib +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.layer import Layer +from keras.src.legacy.saving import saving_utils +from keras.src.legacy.saving import serialization as legacy_serialization +from keras.src.models.functional import Functional +from keras.src.models.model import Model +from keras.src.saving import serialization_lib @keras_export(["keras.Sequential", "keras.models.Sequential"]) diff --git a/keras/models/sequential_test.py b/keras/src/models/sequential_test.py similarity index 97% rename from keras/models/sequential_test.py rename to keras/src/models/sequential_test.py index 0df2d64cd925..12c0703ab45a 100644 --- a/keras/models/sequential_test.py +++ b/keras/src/models/sequential_test.py @@ -1,12 +1,12 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras import testing -from keras.layers.core.input_layer import Input -from keras.models.functional import Functional -from keras.models.sequential import Sequential +from keras.src import backend +from keras.src import layers +from keras.src import testing +from keras.src.layers.core.input_layer import Input +from keras.src.models.functional import Functional +from keras.src.models.sequential import Sequential @pytest.mark.requires_trainable_backend diff --git a/keras/models/variable_mapping.py b/keras/src/models/variable_mapping.py similarity index 91% rename from keras/models/variable_mapping.py rename to keras/src/models/variable_mapping.py index ec04015a74b2..ed9deb7340ec 100644 --- a/keras/models/variable_mapping.py +++ b/keras/src/models/variable_mapping.py @@ -1,7 +1,7 @@ -from keras.layers.layer import Layer -from keras.metrics.metric import Metric -from keras.optimizers.optimizer import Optimizer -from keras.saving import saving_lib +from keras.src.layers.layer import Layer +from keras.src.metrics.metric import Metric +from keras.src.optimizers.optimizer import Optimizer +from keras.src.saving import saving_lib def map_trackable_variables(trackable, store, visited_trackables): diff --git a/keras/models/variable_mapping_test.py b/keras/src/models/variable_mapping_test.py similarity index 94% rename from keras/models/variable_mapping_test.py rename to keras/src/models/variable_mapping_test.py index 6eadce71bade..652e578289ce 100644 --- a/keras/models/variable_mapping_test.py +++ b/keras/src/models/variable_mapping_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import testing -from keras.saving import saving_lib_test +from keras.src import testing +from keras.src.saving import saving_lib_test class VariableMappingTest(testing.TestCase): diff --git a/keras/src/ops/__init__.py b/keras/src/ops/__init__.py new file mode 100644 index 000000000000..754923c9fea5 --- /dev/null +++ b/keras/src/ops/__init__.py @@ -0,0 +1,16 @@ +# from keras.src.ops.numpy import Matmul, matmul +# from keras.src.ops.numpy import Add, add +# from keras.src.ops.numpy import Multiply, multiply + +from keras.src.backend import cast +from keras.src.backend import cond +from keras.src.backend import is_tensor +from keras.src.backend import name_scope +from keras.src.backend import random +from keras.src.ops import image +from keras.src.ops import operation_utils +from keras.src.ops.core import * # noqa: F403 +from keras.src.ops.linalg import * # noqa: F403 +from keras.src.ops.math import * # noqa: F403 +from keras.src.ops.nn import * # noqa: F403 +from keras.src.ops.numpy import * # noqa: F403 diff --git a/keras/ops/core.py b/keras/src/ops/core.py similarity index 98% rename from keras/ops/core.py rename to keras/src/ops/core.py index c542f79f8847..5f581fa8678f 100644 --- a/keras/ops/core.py +++ b/keras/src/ops/core.py @@ -16,13 +16,13 @@ import numpy as np -from keras import backend -from keras import tree -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.backend import any_symbolic_tensors -from keras.ops.operation import Operation -from keras.utils import traceback_utils +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.ops.operation import Operation +from keras.src.utils import traceback_utils class Scatter(Operation): diff --git a/keras/ops/core_test.py b/keras/src/ops/core_test.py similarity index 98% rename from keras/ops/core_test.py rename to keras/src/ops/core_test.py index fe1284d1252b..31d553853b63 100644 --- a/keras/ops/core_test.py +++ b/keras/src/ops/core_test.py @@ -5,17 +5,17 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import losses -from keras import models -from keras import ops -from keras import optimizers -from keras import testing -from keras import tree -from keras.backend.common import dtypes -from keras.backend.common.keras_tensor import KerasTensor -from keras.ops import core +from keras.src import backend +from keras.src import layers +from keras.src import losses +from keras.src import models +from keras.src import ops +from keras.src import optimizers +from keras.src import testing +from keras.src import tree +from keras.src.backend.common import dtypes +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.ops import core class CoreOpsStaticShapeTest(testing.TestCase): diff --git a/keras/ops/function.py b/keras/src/ops/function.py similarity index 98% rename from keras/ops/function.py rename to keras/src/ops/function.py index 7ae8244c77c2..8b6930ac12ab 100644 --- a/keras/ops/function.py +++ b/keras/src/ops/function.py @@ -1,10 +1,10 @@ import collections -from keras import tree -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.backend.config import backend -from keras.ops.operation import Operation +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend.config import backend +from keras.src.ops.operation import Operation @keras_export("keras.Function") diff --git a/keras/ops/function_test.py b/keras/src/ops/function_test.py similarity index 94% rename from keras/ops/function_test.py rename to keras/src/ops/function_test.py index 34de13c814e7..d1ebc838d314 100644 --- a/keras/ops/function_test.py +++ b/keras/src/ops/function_test.py @@ -2,13 +2,13 @@ import numpy as np -from keras import testing -from keras.backend.common import keras_tensor -from keras.layers import Dense -from keras.layers import Input -from keras.models import Model -from keras.ops import function -from keras.ops import numpy as knp +from keras.src import testing +from keras.src.backend.common import keras_tensor +from keras.src.layers import Dense +from keras.src.layers import Input +from keras.src.models import Model +from keras.src.ops import function +from keras.src.ops import numpy as knp class FunctionTest(testing.TestCase): diff --git a/keras/ops/image.py b/keras/src/ops/image.py similarity index 99% rename from keras/ops/image.py rename to keras/src/ops/image.py index 960290500729..398defe615a4 100644 --- a/keras/ops/image.py +++ b/keras/src/ops/image.py @@ -1,10 +1,10 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.backend import any_symbolic_tensors -from keras.ops.operation import Operation -from keras.ops.operation_utils import compute_conv_output_shape +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.ops.operation import Operation +from keras.src.ops.operation_utils import compute_conv_output_shape class RGBToGrayscale(Operation): @@ -78,7 +78,7 @@ def rgb_to_grayscale( Examples: >>> import numpy as np - >>> from keras import ops + >>> from keras.src import ops >>> x = np.random.random((2, 4, 4, 3)) >>> y = ops.image.rgb_to_grayscale(x) >>> y.shape diff --git a/keras/ops/image_test.py b/keras/src/ops/image_test.py similarity index 99% rename from keras/ops/image_test.py rename to keras/src/ops/image_test.py index 2f8b24a3e1ec..1f3fb7938063 100644 --- a/keras/ops/image_test.py +++ b/keras/src/ops/image_test.py @@ -6,10 +6,10 @@ import tensorflow as tf from absl.testing import parameterized -from keras import backend -from keras import testing -from keras.backend.common.keras_tensor import KerasTensor -from keras.ops import image as kimage +from keras.src import backend +from keras.src import testing +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.ops import image as kimage class ImageOpsDynamicShapeTest(testing.TestCase): diff --git a/keras/ops/linalg.py b/keras/src/ops/linalg.py similarity index 98% rename from keras/ops/linalg.py rename to keras/src/ops/linalg.py index 0636b185cc85..cd2ac98c7090 100644 --- a/keras/ops/linalg.py +++ b/keras/src/ops/linalg.py @@ -1,9 +1,9 @@ -from keras import backend -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.backend import any_symbolic_tensors -from keras.ops.operation import Operation -from keras.ops.operation_utils import reduce_shape +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.ops.operation import Operation +from keras.src.ops.operation_utils import reduce_shape class Cholesky(Operation): diff --git a/keras/ops/linalg_test.py b/keras/src/ops/linalg_test.py similarity index 98% rename from keras/ops/linalg_test.py rename to keras/src/ops/linalg_test.py index 30f7368cef8f..36ab32b202d9 100644 --- a/keras/ops/linalg_test.py +++ b/keras/src/ops/linalg_test.py @@ -1,12 +1,12 @@ import numpy as np from absl.testing import parameterized -from keras import backend -from keras import ops -from keras import testing -from keras.backend.common.keras_tensor import KerasTensor -from keras.ops import linalg -from keras.testing.test_utils import named_product +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.ops import linalg +from keras.src.testing.test_utils import named_product class LinalgOpsDynamicShapeTest(testing.TestCase): diff --git a/keras/ops/math.py b/keras/src/ops/math.py similarity index 99% rename from keras/ops/math.py rename to keras/src/ops/math.py index 5d81eed75770..15ecad5acb0e 100644 --- a/keras/ops/math.py +++ b/keras/src/ops/math.py @@ -1,11 +1,11 @@ """Commonly used math operations not included in NumPy.""" -from keras import backend -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.backend import any_symbolic_tensors -from keras.ops.operation import Operation -from keras.ops.operation_utils import reduce_shape +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.ops.operation import Operation +from keras.src.ops.operation_utils import reduce_shape class SegmentSum(Operation): diff --git a/keras/ops/math_test.py b/keras/src/ops/math_test.py similarity index 99% rename from keras/ops/math_test.py rename to keras/src/ops/math_test.py index 5fdebb5f63d7..60db9fc70f6c 100644 --- a/keras/ops/math_test.py +++ b/keras/src/ops/math_test.py @@ -5,11 +5,11 @@ import scipy.signal from absl.testing import parameterized -from keras import backend -from keras import testing -from keras.backend.common import dtypes -from keras.backend.common.keras_tensor import KerasTensor -from keras.ops import math as kmath +from keras.src import backend +from keras.src import testing +from keras.src.backend.common import dtypes +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.ops import math as kmath def _stft( diff --git a/keras/ops/nn.py b/keras/src/ops/nn.py similarity index 99% rename from keras/ops/nn.py rename to keras/src/ops/nn.py index d1a724d94070..4ec642b018a6 100644 --- a/keras/ops/nn.py +++ b/keras/src/ops/nn.py @@ -2,17 +2,17 @@ import warnings -from keras import backend -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.backend import any_symbolic_tensors -from keras.backend import standardize_data_format -from keras.backend.common.backend_utils import ( +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.backend import standardize_data_format +from keras.src.backend.common.backend_utils import ( compute_conv_transpose_output_shape, ) -from keras.ops import operation_utils -from keras.ops.operation import Operation -from keras.ops.operation_utils import reduce_shape +from keras.src.ops import operation_utils +from keras.src.ops.operation import Operation +from keras.src.ops.operation_utils import reduce_shape class Relu(Operation): diff --git a/keras/ops/nn_test.py b/keras/src/ops/nn_test.py similarity index 98% rename from keras/ops/nn_test.py rename to keras/src/ops/nn_test.py index 43c95af174fd..0e13aac7163a 100644 --- a/keras/ops/nn_test.py +++ b/keras/src/ops/nn_test.py @@ -3,28 +3,34 @@ from absl.testing import parameterized import keras -from keras import backend -from keras import layers -from keras import losses -from keras import models -from keras import ops -from keras import testing -from keras.backend.common import dtypes -from keras.backend.common import standardize_dtype -from keras.backend.common.keras_tensor import KerasTensor -from keras.layers.convolutional.conv_test import np_conv1d -from keras.layers.convolutional.conv_test import np_conv2d -from keras.layers.convolutional.conv_test import np_conv3d -from keras.layers.convolutional.conv_transpose_test import np_conv1d_transpose -from keras.layers.convolutional.conv_transpose_test import np_conv2d_transpose -from keras.layers.convolutional.depthwise_conv_test import np_depthwise_conv2d -from keras.layers.pooling.average_pooling_test import np_avgpool1d -from keras.layers.pooling.average_pooling_test import np_avgpool2d -from keras.layers.pooling.max_pooling_test import np_maxpool1d -from keras.layers.pooling.max_pooling_test import np_maxpool2d -from keras.ops import nn as knn -from keras.ops import numpy as knp -from keras.testing.test_utils import named_product +from keras.src import backend +from keras.src import layers +from keras.src import losses +from keras.src import models +from keras.src import ops +from keras.src import testing +from keras.src.backend.common import dtypes +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.convolutional.conv_test import np_conv1d +from keras.src.layers.convolutional.conv_test import np_conv2d +from keras.src.layers.convolutional.conv_test import np_conv3d +from keras.src.layers.convolutional.conv_transpose_test import ( + np_conv1d_transpose, +) +from keras.src.layers.convolutional.conv_transpose_test import ( + np_conv2d_transpose, +) +from keras.src.layers.convolutional.depthwise_conv_test import ( + np_depthwise_conv2d, +) +from keras.src.layers.pooling.average_pooling_test import np_avgpool1d +from keras.src.layers.pooling.average_pooling_test import np_avgpool2d +from keras.src.layers.pooling.max_pooling_test import np_maxpool1d +from keras.src.layers.pooling.max_pooling_test import np_maxpool2d +from keras.src.ops import nn as knn +from keras.src.ops import numpy as knp +from keras.src.testing.test_utils import named_product class NNOpsDynamicShapeTest(testing.TestCase, parameterized.TestCase): @@ -1809,7 +1815,7 @@ def test_moments_sync(self): reason="synchronized=True only implemented for TF backend", ) def test_moments_sync_with_distribution_strategy(self, dtype): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf # Config 2 CPUs for testing. logical_cpus = tf.config.list_logical_devices("CPU") diff --git a/keras/ops/node.py b/keras/src/ops/node.py similarity index 97% rename from keras/ops/node.py rename to keras/src/ops/node.py index dc775920c0f9..7e05de88fcf5 100644 --- a/keras/ops/node.py +++ b/keras/src/ops/node.py @@ -1,8 +1,8 @@ import collections -from keras import tree -from keras.backend import KerasTensor -from keras.ops.symbolic_arguments import SymbolicArguments +from keras.src import tree +from keras.src.backend import KerasTensor +from keras.src.ops.symbolic_arguments import SymbolicArguments class Node: diff --git a/keras/ops/node_test.py b/keras/src/ops/node_test.py similarity index 93% rename from keras/ops/node_test.py rename to keras/src/ops/node_test.py index b00781f5f9c2..7ed8227b3c2f 100644 --- a/keras/ops/node_test.py +++ b/keras/src/ops/node_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras import Layer -from keras import testing -from keras.backend import KerasTensor -from keras.ops.node import Node +from keras.src import Layer +from keras.src import testing +from keras.src.backend import KerasTensor +from keras.src.ops.node import Node class DummyLayer(Layer): diff --git a/keras/ops/numpy.py b/keras/src/ops/numpy.py similarity index 99% rename from keras/ops/numpy.py rename to keras/src/ops/numpy.py index 94306eaa702d..266eb3047024 100644 --- a/keras/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -147,17 +147,17 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.backend import KerasTensor -from keras.backend import any_symbolic_tensors -from keras.backend.common import dtypes -from keras.backend.common.backend_utils import canonicalize_axis -from keras.backend.common.backend_utils import to_tuple_or_list -from keras.ops import operation_utils -from keras.ops.operation import Operation -from keras.ops.operation_utils import broadcast_shapes -from keras.ops.operation_utils import reduce_shape +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend import any_symbolic_tensors +from keras.src.backend.common import dtypes +from keras.src.backend.common.backend_utils import canonicalize_axis +from keras.src.backend.common.backend_utils import to_tuple_or_list +from keras.src.ops import operation_utils +from keras.src.ops.operation import Operation +from keras.src.ops.operation_utils import broadcast_shapes +from keras.src.ops.operation_utils import reduce_shape def shape_equal(shape1, shape2, axis=None, allow_none=True): @@ -1884,7 +1884,7 @@ def diag(x, k=0): The extracted diagonal or constructed diagonal tensor. Examples: - >>> from keras import ops + >>> from keras.src import ops >>> x = ops.arange(9).reshape((3, 3)) >>> x array([[0, 1, 2], @@ -1978,7 +1978,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1): Tensor of diagonals. Examples: - >>> from keras import ops + >>> from keras.src import ops >>> x = ops.arange(4).reshape((2, 2)) >>> x array([[0, 1], @@ -2047,7 +2047,7 @@ def diff(a, n=1, axis=-1): Tensor of diagonals. Examples: - >>> from keras import ops + >>> from keras.src import ops >>> x = ops.convert_to_tensor([1, 2, 4, 7, 0]) >>> ops.diff(x) array([ 1, 2, 3, -7]) @@ -2366,7 +2366,7 @@ def einsum(subscripts, *operands): The calculation based on the Einstein summation convention. Example: - >>> from keras import ops + >>> from keras.src import ops >>> a = ops.arange(25).reshape(5, 5) >>> b = ops.arange(5) >>> c = ops.arange(6).reshape(2, 3) @@ -3748,7 +3748,7 @@ def meshgrid(*x, indexing="xy"): Sequence of N tensors. Example: - >>> from keras import ops + >>> from keras.src import ops >>> x = ops.array([1, 2, 3]) >>> y = ops.array([4, 5, 6]) diff --git a/keras/ops/numpy_test.py b/keras/src/ops/numpy_test.py similarity index 99% rename from keras/ops/numpy_test.py rename to keras/src/ops/numpy_test.py index 5f8ed2fc0856..6cea9ef86066 100644 --- a/keras/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -9,13 +9,13 @@ from absl.testing import parameterized import keras -from keras import backend -from keras import testing -from keras.backend.common import dtypes -from keras.backend.common import standardize_dtype -from keras.backend.common.keras_tensor import KerasTensor -from keras.ops import numpy as knp -from keras.testing.test_utils import named_product +from keras.src import backend +from keras.src import testing +from keras.src.backend.common import dtypes +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.ops import numpy as knp +from keras.src.testing.test_utils import named_product class NumpyTwoInputOpsDynamicShapeTest(testing.TestCase): diff --git a/keras/ops/operation.py b/keras/src/ops/operation.py similarity index 95% rename from keras/ops/operation.py rename to keras/src/ops/operation.py index 547e8f3f387d..10b79d591406 100644 --- a/keras/ops/operation.py +++ b/keras/src/ops/operation.py @@ -1,15 +1,15 @@ import inspect import textwrap -from keras import backend -from keras import dtype_policies -from keras import tree -from keras.api_export import keras_export -from keras.backend.common.keras_tensor import any_symbolic_tensors -from keras.ops.node import Node -from keras.utils import python_utils -from keras.utils import traceback_utils -from keras.utils.naming import auto_name +from keras.src import backend +from keras.src import dtype_policies +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import any_symbolic_tensors +from keras.src.ops.node import Node +from keras.src.utils import python_utils +from keras.src.utils import traceback_utils +from keras.src.utils.naming import auto_name @keras_export("keras.Operation") @@ -120,7 +120,7 @@ def __new__(cls, *args, **kwargs): try: instance._lock = False if auto_config: - from keras.saving import serialization_lib + from keras.src.saving import serialization_lib instance._auto_config = serialization_lib.SerializableDict( **kwargs diff --git a/keras/ops/operation_test.py b/keras/src/ops/operation_test.py similarity index 96% rename from keras/ops/operation_test.py rename to keras/src/ops/operation_test.py index e01ef075e3cb..63183759fecb 100644 --- a/keras/ops/operation_test.py +++ b/keras/src/ops/operation_test.py @@ -1,10 +1,10 @@ import numpy as np -from keras import backend -from keras import testing -from keras.backend.common import keras_tensor -from keras.ops import numpy as knp -from keras.ops import operation +from keras.src import backend +from keras.src import testing +from keras.src.backend.common import keras_tensor +from keras.src.ops import numpy as knp +from keras.src.ops import operation class OpWithMultipleInputs(operation.Operation): diff --git a/keras/ops/operation_utils.py b/keras/src/ops/operation_utils.py similarity index 98% rename from keras/ops/operation_utils.py rename to keras/src/ops/operation_utils.py index e8d335bb286a..ac0961da4854 100644 --- a/keras/ops/operation_utils.py +++ b/keras/src/ops/operation_utils.py @@ -2,10 +2,10 @@ import numpy as np -from keras import tree -from keras.api_export import keras_export -from keras.backend.common.backend_utils import canonicalize_axis -from keras.backend.common.backend_utils import to_tuple_or_list +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common.backend_utils import canonicalize_axis +from keras.src.backend.common.backend_utils import to_tuple_or_list def broadcast_shapes(shape1, shape2): diff --git a/keras/ops/operation_utils_test.py b/keras/src/ops/operation_utils_test.py similarity index 97% rename from keras/ops/operation_utils_test.py rename to keras/src/ops/operation_utils_test.py index 20c140c02b5a..b5acf9d29260 100644 --- a/keras/ops/operation_utils_test.py +++ b/keras/src/ops/operation_utils_test.py @@ -1,8 +1,8 @@ -from keras import backend -from keras import ops -from keras import testing -from keras.layers.core import input_layer -from keras.ops import operation_utils +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.layers.core import input_layer +from keras.src.ops import operation_utils class OperationUtilsTest(testing.TestCase): diff --git a/keras/ops/symbolic_arguments.py b/keras/src/ops/symbolic_arguments.py similarity index 95% rename from keras/ops/symbolic_arguments.py rename to keras/src/ops/symbolic_arguments.py index 33ec86998672..b5ff9b9687f8 100644 --- a/keras/ops/symbolic_arguments.py +++ b/keras/src/ops/symbolic_arguments.py @@ -1,5 +1,5 @@ -from keras import tree -from keras.backend import KerasTensor +from keras.src import tree +from keras.src.backend import KerasTensor class SymbolicArguments: diff --git a/keras/ops/symbolic_arguments_test.py b/keras/src/ops/symbolic_arguments_test.py similarity index 95% rename from keras/ops/symbolic_arguments_test.py rename to keras/src/ops/symbolic_arguments_test.py index 3fda859e6c58..b212032154ec 100644 --- a/keras/ops/symbolic_arguments_test.py +++ b/keras/src/ops/symbolic_arguments_test.py @@ -1,7 +1,7 @@ -from keras import testing -from keras import tree -from keras.backend import KerasTensor -from keras.ops.symbolic_arguments import SymbolicArguments +from keras.src import testing +from keras.src import tree +from keras.src.backend import KerasTensor +from keras.src.ops.symbolic_arguments import SymbolicArguments class SymbolicArgumentsTest(testing.TestCase): diff --git a/keras/optimizers/__init__.py b/keras/src/optimizers/__init__.py similarity index 81% rename from keras/optimizers/__init__.py rename to keras/src/optimizers/__init__.py index 6d94932a530b..d00c96d98954 100644 --- a/keras/optimizers/__init__.py +++ b/keras/src/optimizers/__init__.py @@ -1,18 +1,18 @@ -from keras.api_export import keras_export -from keras.optimizers.adadelta import Adadelta -from keras.optimizers.adafactor import Adafactor -from keras.optimizers.adagrad import Adagrad -from keras.optimizers.adam import Adam -from keras.optimizers.adamax import Adamax -from keras.optimizers.adamw import AdamW -from keras.optimizers.ftrl import Ftrl -from keras.optimizers.lion import Lion -from keras.optimizers.loss_scale_optimizer import LossScaleOptimizer -from keras.optimizers.nadam import Nadam -from keras.optimizers.optimizer import Optimizer -from keras.optimizers.rmsprop import RMSprop -from keras.optimizers.sgd import SGD -from keras.saving import serialization_lib +from keras.src.api_export import keras_export +from keras.src.optimizers.adadelta import Adadelta +from keras.src.optimizers.adafactor import Adafactor +from keras.src.optimizers.adagrad import Adagrad +from keras.src.optimizers.adam import Adam +from keras.src.optimizers.adamax import Adamax +from keras.src.optimizers.adamw import AdamW +from keras.src.optimizers.ftrl import Ftrl +from keras.src.optimizers.lion import Lion +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.src.optimizers.nadam import Nadam +from keras.src.optimizers.optimizer import Optimizer +from keras.src.optimizers.rmsprop import RMSprop +from keras.src.optimizers.sgd import SGD +from keras.src.saving import serialization_lib ALL_OBJECTS = { Optimizer, diff --git a/keras/optimizers/adadelta.py b/keras/src/optimizers/adadelta.py similarity index 97% rename from keras/optimizers/adadelta.py rename to keras/src/optimizers/adadelta.py index fa56ee5b4b70..1f2f3835aec4 100644 --- a/keras/optimizers/adadelta.py +++ b/keras/src/optimizers/adadelta.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.Adadelta"]) diff --git a/keras/optimizers/adadelta_test.py b/keras/src/optimizers/adadelta_test.py similarity index 95% rename from keras/optimizers/adadelta_test.py rename to keras/src/optimizers/adadelta_test.py index c7c9dd7d32bf..9da72612fc87 100644 --- a/keras/optimizers/adadelta_test.py +++ b/keras/src/optimizers/adadelta_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras import backend -from keras import ops -from keras import testing -from keras.optimizers.adadelta import Adadelta +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.optimizers.adadelta import Adadelta class AdadeltaTest(testing.TestCase): diff --git a/keras/optimizers/adafactor.py b/keras/src/optimizers/adafactor.py similarity index 98% rename from keras/optimizers/adafactor.py rename to keras/src/optimizers/adafactor.py index 7782f35e66ea..8635f1d9d8c0 100644 --- a/keras/optimizers/adafactor.py +++ b/keras/src/optimizers/adafactor.py @@ -1,7 +1,7 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.Adafactor"]) diff --git a/keras/optimizers/adafactor_test.py b/keras/src/optimizers/adafactor_test.py similarity index 97% rename from keras/optimizers/adafactor_test.py rename to keras/src/optimizers/adafactor_test.py index b5621963f15b..b928400c34e4 100644 --- a/keras/optimizers/adafactor_test.py +++ b/keras/src/optimizers/adafactor_test.py @@ -3,9 +3,9 @@ import numpy as np -from keras import backend -from keras import testing -from keras.optimizers.adafactor import Adafactor +from keras.src import backend +from keras.src import testing +from keras.src.optimizers.adafactor import Adafactor class AdafactorTest(testing.TestCase): diff --git a/keras/optimizers/adagrad.py b/keras/src/optimizers/adagrad.py similarity index 95% rename from keras/optimizers/adagrad.py rename to keras/src/optimizers/adagrad.py index 9e28644f9951..836356ba13c2 100644 --- a/keras/optimizers/adagrad.py +++ b/keras/src/optimizers/adagrad.py @@ -1,7 +1,7 @@ -from keras import initializers -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.Adagrad"]) diff --git a/keras/optimizers/adagrad_test.py b/keras/src/optimizers/adagrad_test.py similarity index 95% rename from keras/optimizers/adagrad_test.py rename to keras/src/optimizers/adagrad_test.py index a724540fad16..43d2bcbd7afa 100644 --- a/keras/optimizers/adagrad_test.py +++ b/keras/src/optimizers/adagrad_test.py @@ -3,10 +3,10 @@ import numpy as np -from keras import backend -from keras import ops -from keras import testing -from keras.optimizers.adagrad import Adagrad +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.optimizers.adagrad import Adagrad class AdagradTest(testing.TestCase): diff --git a/keras/optimizers/adam.py b/keras/src/optimizers/adam.py similarity index 97% rename from keras/optimizers/adam.py rename to keras/src/optimizers/adam.py index f1db87ff9b77..585819322404 100644 --- a/keras/optimizers/adam.py +++ b/keras/src/optimizers/adam.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.Adam"]) diff --git a/keras/optimizers/adam_test.py b/keras/src/optimizers/adam_test.py similarity index 96% rename from keras/optimizers/adam_test.py rename to keras/src/optimizers/adam_test.py index 1dcc876a1ddc..6f8430d3c75d 100644 --- a/keras/optimizers/adam_test.py +++ b/keras/src/optimizers/adam_test.py @@ -2,10 +2,10 @@ import pytest import keras -from keras import backend -from keras import ops -from keras import testing -from keras.optimizers.adam import Adam +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.optimizers.adam import Adam class AdamTest(testing.TestCase): diff --git a/keras/optimizers/adamax.py b/keras/src/optimizers/adamax.py similarity index 97% rename from keras/optimizers/adamax.py rename to keras/src/optimizers/adamax.py index 870dfc0aeeab..338afcc5735c 100644 --- a/keras/optimizers/adamax.py +++ b/keras/src/optimizers/adamax.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.Adamax"]) diff --git a/keras/optimizers/adamax_test.py b/keras/src/optimizers/adamax_test.py similarity index 95% rename from keras/optimizers/adamax_test.py rename to keras/src/optimizers/adamax_test.py index f040d508b6a2..4084ade7450d 100644 --- a/keras/optimizers/adamax_test.py +++ b/keras/src/optimizers/adamax_test.py @@ -3,10 +3,10 @@ import numpy as np -from keras import backend -from keras import ops -from keras import testing -from keras.optimizers.adamax import Adamax +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.optimizers.adamax import Adamax class AdamaxTest(testing.TestCase): diff --git a/keras/optimizers/adamw.py b/keras/src/optimizers/adamw.py similarity index 96% rename from keras/optimizers/adamw.py rename to keras/src/optimizers/adamw.py index cda4e5bd4558..e52d3b7188a1 100644 --- a/keras/optimizers/adamw.py +++ b/keras/src/optimizers/adamw.py @@ -1,6 +1,6 @@ -from keras.api_export import keras_export -from keras.optimizers import adam -from keras.optimizers import optimizer +from keras.src.api_export import keras_export +from keras.src.optimizers import adam +from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.AdamW"]) diff --git a/keras/optimizers/adamw_test.py b/keras/src/optimizers/adamw_test.py similarity index 95% rename from keras/optimizers/adamw_test.py rename to keras/src/optimizers/adamw_test.py index 37ee3865a27c..efe71ef87e38 100644 --- a/keras/optimizers/adamw_test.py +++ b/keras/src/optimizers/adamw_test.py @@ -3,10 +3,10 @@ import numpy as np -from keras import backend -from keras import ops -from keras import testing -from keras.optimizers.adamw import AdamW +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.optimizers.adamw import AdamW class AdamWTest(testing.TestCase): diff --git a/keras/optimizers/base_optimizer.py b/keras/src/optimizers/base_optimizer.py similarity index 99% rename from keras/optimizers/base_optimizer.py rename to keras/src/optimizers/base_optimizer.py index 8da8ffaf52a3..b368a203bebf 100644 --- a/keras/optimizers/base_optimizer.py +++ b/keras/src/optimizers/base_optimizer.py @@ -3,13 +3,13 @@ import numpy as np -from keras import backend -from keras import initializers -from keras import ops -from keras.optimizers.schedules import learning_rate_schedule -from keras.saving import serialization_lib -from keras.utils import tracking -from keras.utils.naming import auto_name +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.optimizers.schedules import learning_rate_schedule +from keras.src.saving import serialization_lib +from keras.src.utils import tracking +from keras.src.utils.naming import auto_name class BaseOptimizer: diff --git a/keras/optimizers/ftrl.py b/keras/src/optimizers/ftrl.py similarity index 98% rename from keras/optimizers/ftrl.py rename to keras/src/optimizers/ftrl.py index 05930db69fac..1bb56518dc9d 100644 --- a/keras/optimizers/ftrl.py +++ b/keras/src/optimizers/ftrl.py @@ -1,7 +1,7 @@ -from keras import initializers -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.Ftrl"]) diff --git a/keras/optimizers/ftrl_test.py b/keras/src/optimizers/ftrl_test.py similarity index 96% rename from keras/optimizers/ftrl_test.py rename to keras/src/optimizers/ftrl_test.py index c1688b8c2bef..4e27f25d0ff3 100644 --- a/keras/optimizers/ftrl_test.py +++ b/keras/src/optimizers/ftrl_test.py @@ -3,9 +3,9 @@ import numpy as np -from keras import backend -from keras import testing -from keras.optimizers.ftrl import Ftrl +from keras.src import backend +from keras.src import testing +from keras.src.optimizers.ftrl import Ftrl class FtrlTest(testing.TestCase): diff --git a/keras/optimizers/lion.py b/keras/src/optimizers/lion.py similarity index 97% rename from keras/optimizers/lion.py rename to keras/src/optimizers/lion.py index 49fb7f309d22..d63e736266b9 100644 --- a/keras/optimizers/lion.py +++ b/keras/src/optimizers/lion.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.Lion"]) diff --git a/keras/optimizers/lion_test.py b/keras/src/optimizers/lion_test.py similarity index 95% rename from keras/optimizers/lion_test.py rename to keras/src/optimizers/lion_test.py index b9cf8f1263cd..b62773a426f2 100644 --- a/keras/optimizers/lion_test.py +++ b/keras/src/optimizers/lion_test.py @@ -2,10 +2,10 @@ import pytest import keras -from keras import backend -from keras import ops -from keras import testing -from keras.optimizers.lion import Lion +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.optimizers.lion import Lion class LionTest(testing.TestCase): diff --git a/keras/optimizers/loss_scale_optimizer.py b/keras/src/optimizers/loss_scale_optimizer.py similarity index 97% rename from keras/optimizers/loss_scale_optimizer.py rename to keras/src/optimizers/loss_scale_optimizer.py index e032ca6c0412..42306685eee1 100644 --- a/keras/optimizers/loss_scale_optimizer.py +++ b/keras/src/optimizers/loss_scale_optimizer.py @@ -1,10 +1,10 @@ -from keras import backend -from keras import initializers -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer -from keras.saving import serialization_lib -from keras.utils import tracking +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer +from keras.src.saving import serialization_lib +from keras.src.utils import tracking @keras_export( @@ -207,7 +207,7 @@ def _common_apply(self, grads, trainable_variables=None): def _tf_apply(self, grads, trainable_variables=None): """Tensorflow specific logic for apply, which handles distribution.""" - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf if tf.distribute.in_cross_replica_context(): raise ValueError("apply() must be called in a replica context.") diff --git a/keras/optimizers/loss_scale_optimizer_test.py b/keras/src/optimizers/loss_scale_optimizer_test.py similarity index 95% rename from keras/optimizers/loss_scale_optimizer_test.py rename to keras/src/optimizers/loss_scale_optimizer_test.py index 28a81ea8c0f9..ace067f1ab1d 100644 --- a/keras/optimizers/loss_scale_optimizer_test.py +++ b/keras/src/optimizers/loss_scale_optimizer_test.py @@ -1,11 +1,11 @@ import numpy as np from absl.testing import parameterized -from keras import backend -from keras import ops -from keras import testing -from keras.optimizers.loss_scale_optimizer import LossScaleOptimizer -from keras.optimizers.sgd import SGD +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.src.optimizers.sgd import SGD class LossScaleOptimizerTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/optimizers/nadam.py b/keras/src/optimizers/nadam.py similarity index 97% rename from keras/optimizers/nadam.py rename to keras/src/optimizers/nadam.py index d9b74d9d1942..77454e9f94f9 100644 --- a/keras/optimizers/nadam.py +++ b/keras/src/optimizers/nadam.py @@ -1,7 +1,7 @@ -from keras import backend -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.Nadam"]) diff --git a/keras/optimizers/nadam_test.py b/keras/src/optimizers/nadam_test.py similarity index 96% rename from keras/optimizers/nadam_test.py rename to keras/src/optimizers/nadam_test.py index c50d070b8ca3..8a6c85034472 100644 --- a/keras/optimizers/nadam_test.py +++ b/keras/src/optimizers/nadam_test.py @@ -3,10 +3,10 @@ import numpy as np -from keras import backend -from keras import ops -from keras import testing -from keras.optimizers.nadam import Nadam +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.optimizers.nadam import Nadam class NadamTest(testing.TestCase): diff --git a/keras/optimizers/optimizer.py b/keras/src/optimizers/optimizer.py similarity index 61% rename from keras/optimizers/optimizer.py rename to keras/src/optimizers/optimizer.py index 7fafbdf1bbeb..cd9c29cfba29 100644 --- a/keras/optimizers/optimizer.py +++ b/keras/src/optimizers/optimizer.py @@ -1,17 +1,17 @@ -from keras import backend -from keras.api_export import keras_export -from keras.optimizers import base_optimizer +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.optimizers import base_optimizer if backend.backend() == "tensorflow": - from keras.backend.tensorflow.optimizer import ( + from keras.src.backend.tensorflow.optimizer import ( TFOptimizer as BackendOptimizer, ) elif backend.backend() == "torch": - from keras.backend.torch.optimizers import ( + from keras.src.backend.torch.optimizers import ( TorchOptimizer as BackendOptimizer, ) elif backend.backend() == "jax": - from keras.backend.jax.optimizer import JaxOptimizer as BackendOptimizer + from keras.src.backend.jax.optimizer import JaxOptimizer as BackendOptimizer else: class BackendOptimizer(base_optimizer.BaseOptimizer): diff --git a/keras/optimizers/optimizer_sparse_test.py b/keras/src/optimizers/optimizer_sparse_test.py similarity index 98% rename from keras/optimizers/optimizer_sparse_test.py rename to keras/src/optimizers/optimizer_sparse_test.py index a398c53009f8..c06dbd52d6bb 100644 --- a/keras/optimizers/optimizer_sparse_test.py +++ b/keras/src/optimizers/optimizer_sparse_test.py @@ -3,10 +3,10 @@ import pytest from absl.testing import parameterized -from keras import backend -from keras import ops -from keras import optimizers -from keras import testing +from keras.src import backend +from keras.src import ops +from keras.src import optimizers +from keras.src import testing class ScatterUpdateOptimizer(optimizers.Optimizer): diff --git a/keras/optimizers/optimizer_test.py b/keras/src/optimizers/optimizer_test.py similarity index 98% rename from keras/optimizers/optimizer_test.py rename to keras/src/optimizers/optimizer_test.py index 2f8ef0c85151..23d47477907c 100644 --- a/keras/optimizers/optimizer_test.py +++ b/keras/src/optimizers/optimizer_test.py @@ -3,12 +3,12 @@ import numpy as np import pytest -from keras import backend -from keras import constraints -from keras import layers -from keras import models -from keras import optimizers -from keras import testing +from keras.src import backend +from keras.src import constraints +from keras.src import layers +from keras.src import models +from keras.src import optimizers +from keras.src import testing class OptimizerTest(testing.TestCase): diff --git a/keras/optimizers/rmsprop.py b/keras/src/optimizers/rmsprop.py similarity index 98% rename from keras/optimizers/rmsprop.py rename to keras/src/optimizers/rmsprop.py index bf7b13eea244..ad7c4a079c4d 100644 --- a/keras/optimizers/rmsprop.py +++ b/keras/src/optimizers/rmsprop.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.RMSprop"]) diff --git a/keras/optimizers/rmsprop_test.py b/keras/src/optimizers/rmsprop_test.py similarity index 95% rename from keras/optimizers/rmsprop_test.py rename to keras/src/optimizers/rmsprop_test.py index 862e0b8687dc..f22dc82801bc 100644 --- a/keras/optimizers/rmsprop_test.py +++ b/keras/src/optimizers/rmsprop_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras import backend -from keras import ops -from keras import testing -from keras.optimizers.rmsprop import RMSprop +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.optimizers.rmsprop import RMSprop class RMSpropTest(testing.TestCase): diff --git a/keras/src/optimizers/schedules/__init__.py b/keras/src/optimizers/schedules/__init__.py new file mode 100644 index 000000000000..a6812ebb0827 --- /dev/null +++ b/keras/src/optimizers/schedules/__init__.py @@ -0,0 +1,16 @@ +from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay +from keras.src.optimizers.schedules.learning_rate_schedule import ( + CosineDecayRestarts, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + ExponentialDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + InverseTimeDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + PiecewiseConstantDecay, +) +from keras.src.optimizers.schedules.learning_rate_schedule import ( + PolynomialDecay, +) diff --git a/keras/optimizers/schedules/learning_rate_schedule.py b/keras/src/optimizers/schedules/learning_rate_schedule.py similarity index 99% rename from keras/optimizers/schedules/learning_rate_schedule.py rename to keras/src/optimizers/schedules/learning_rate_schedule.py index 3b95bf59ea0c..74c13aafbe53 100644 --- a/keras/optimizers/schedules/learning_rate_schedule.py +++ b/keras/src/optimizers/schedules/learning_rate_schedule.py @@ -2,9 +2,9 @@ import math -from keras import ops -from keras.api_export import keras_export -from keras.saving import serialization_lib +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.saving import serialization_lib @keras_export("keras.optimizers.schedules.LearningRateSchedule") diff --git a/keras/optimizers/schedules/learning_rate_schedule_test.py b/keras/src/optimizers/schedules/learning_rate_schedule_test.py similarity index 98% rename from keras/optimizers/schedules/learning_rate_schedule_test.py rename to keras/src/optimizers/schedules/learning_rate_schedule_test.py index 56e0c2d3c28f..052db9e93945 100644 --- a/keras/optimizers/schedules/learning_rate_schedule_test.py +++ b/keras/src/optimizers/schedules/learning_rate_schedule_test.py @@ -5,12 +5,12 @@ import numpy as np import pytest -from keras import backend -from keras import layers -from keras import optimizers -from keras import testing -from keras.models import Sequential -from keras.optimizers import schedules +from keras.src import backend +from keras.src import layers +from keras.src import optimizers +from keras.src import testing +from keras.src.models import Sequential +from keras.src.optimizers import schedules class TestFitLRSchedulesFlow(testing.TestCase): diff --git a/keras/optimizers/sgd.py b/keras/src/optimizers/sgd.py similarity index 97% rename from keras/optimizers/sgd.py rename to keras/src/optimizers/sgd.py index 3880bb7caa2d..85a8c8647445 100644 --- a/keras/optimizers/sgd.py +++ b/keras/src/optimizers/sgd.py @@ -1,6 +1,6 @@ -from keras import ops -from keras.api_export import keras_export -from keras.optimizers import optimizer +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.optimizers import optimizer @keras_export("keras.optimizers.SGD") diff --git a/keras/optimizers/sgd_test.py b/keras/src/optimizers/sgd_test.py similarity index 96% rename from keras/optimizers/sgd_test.py rename to keras/src/optimizers/sgd_test.py index b8eaeaff7d53..a0fc2d46c53b 100644 --- a/keras/optimizers/sgd_test.py +++ b/keras/src/optimizers/sgd_test.py @@ -2,10 +2,10 @@ import numpy as np -from keras import backend -from keras import ops -from keras import testing -from keras.optimizers.sgd import SGD +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.optimizers.sgd import SGD class SGDTest(testing.TestCase): diff --git a/keras/quantizers/__init__.py b/keras/src/quantizers/__init__.py similarity index 70% rename from keras/quantizers/__init__.py rename to keras/src/quantizers/__init__.py index 1cecf1902d15..b12d5cc84d70 100644 --- a/keras/quantizers/__init__.py +++ b/keras/src/quantizers/__init__.py @@ -1,14 +1,14 @@ import inspect -from keras.api_export import keras_export -from keras.quantizers.quantizers import AbsMaxQuantizer -from keras.quantizers.quantizers import Quantizer -from keras.quantizers.quantizers import abs_max_quantize -from keras.quantizers.quantizers import compute_float8_amax_history -from keras.quantizers.quantizers import compute_float8_scale -from keras.quantizers.quantizers import quantize_and_dequantize -from keras.saving import serialization_lib -from keras.utils.naming import to_snake_case +from keras.src.api_export import keras_export +from keras.src.quantizers.quantizers import AbsMaxQuantizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.quantizers.quantizers import abs_max_quantize +from keras.src.quantizers.quantizers import compute_float8_amax_history +from keras.src.quantizers.quantizers import compute_float8_scale +from keras.src.quantizers.quantizers import quantize_and_dequantize +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case ALL_OBJECTS = {Quantizer, AbsMaxQuantizer} ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} diff --git a/keras/quantizers/quantizers.py b/keras/src/quantizers/quantizers.py similarity index 97% rename from keras/quantizers/quantizers.py rename to keras/src/quantizers/quantizers.py index 5c8a4425cbad..ccf7cc42de06 100644 --- a/keras/quantizers/quantizers.py +++ b/keras/src/quantizers/quantizers.py @@ -1,8 +1,8 @@ import ml_dtypes -from keras import backend -from keras import ops -from keras.api_export import keras_export +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export """Int8-related classes and methods""" diff --git a/keras/quantizers/quantizers_test.py b/keras/src/quantizers/quantizers_test.py similarity index 96% rename from keras/quantizers/quantizers_test.py rename to keras/src/quantizers/quantizers_test.py index bf68fd36fbd1..83702bc20f56 100644 --- a/keras/quantizers/quantizers_test.py +++ b/keras/src/quantizers/quantizers_test.py @@ -1,7 +1,7 @@ -from keras import ops -from keras import quantizers -from keras import random -from keras import testing +from keras.src import ops +from keras.src import quantizers +from keras.src import random +from keras.src import testing class QuantizersTest(testing.TestCase): diff --git a/keras/src/random/__init__.py b/keras/src/random/__init__.py new file mode 100644 index 000000000000..4ba54c78837c --- /dev/null +++ b/keras/src/random/__init__.py @@ -0,0 +1,9 @@ +from keras.src.random.random import categorical +from keras.src.random.random import dropout +from keras.src.random.random import gamma +from keras.src.random.random import normal +from keras.src.random.random import randint +from keras.src.random.random import shuffle +from keras.src.random.random import truncated_normal +from keras.src.random.random import uniform +from keras.src.random.seed_generator import SeedGenerator diff --git a/keras/random/random.py b/keras/src/random/random.py similarity index 99% rename from keras/random/random.py rename to keras/src/random/random.py index 5e874f9e46db..72282921de7b 100644 --- a/keras/random/random.py +++ b/keras/src/random/random.py @@ -1,5 +1,5 @@ -from keras import backend -from keras.api_export import keras_export +from keras.src import backend +from keras.src.api_export import keras_export @keras_export("keras.random.normal") diff --git a/keras/random/random_test.py b/keras/src/random/random_test.py similarity index 97% rename from keras/random/random_test.py rename to keras/src/random/random_test.py index 95fdfe831121..a7358edbc253 100644 --- a/keras/random/random_test.py +++ b/keras/src/random/random_test.py @@ -3,15 +3,15 @@ from absl.testing import parameterized import keras -from keras import backend -from keras import ops -from keras import testing -from keras.backend.common import dtypes -from keras.backend.common import standardize_dtype -from keras.random import random -from keras.random import seed_generator -from keras.testing.test_utils import named_product -from keras.utils.rng_utils import set_random_seed +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.backend.common import dtypes +from keras.src.backend.common import standardize_dtype +from keras.src.random import random +from keras.src.random import seed_generator +from keras.src.testing.test_utils import named_product +from keras.src.utils.rng_utils import set_random_seed class RandomTest(testing.TestCase, parameterized.TestCase): @@ -129,7 +129,7 @@ def test_dropout_jax_jit_stateless(self): @jax.jit def train_step(x): - with keras.backend.StatelessScope(): + with keras.src.backend.StatelessScope(): x = keras.layers.Dropout(rate=0.1)(x, training=True) return x diff --git a/keras/random/seed_generator.py b/keras/src/random/seed_generator.py similarity index 94% rename from keras/random/seed_generator.py rename to keras/src/random/seed_generator.py index ce4e4b1880a2..3dfcd5615640 100644 --- a/keras/random/seed_generator.py +++ b/keras/src/random/seed_generator.py @@ -2,11 +2,11 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.backend.common import global_state -from keras.utils import jax_utils -from keras.utils.naming import auto_name +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state +from keras.src.utils import jax_utils +from keras.src.utils.naming import auto_name @keras_export("keras.random.SeedGenerator") @@ -132,7 +132,7 @@ def make_default_seed(): def draw_seed(seed): - from keras.backend import convert_to_tensor + from keras.src.backend import convert_to_tensor if isinstance(seed, SeedGenerator): return seed.next() diff --git a/keras/random/seed_generator_test.py b/keras/src/random/seed_generator_test.py similarity index 95% rename from keras/random/seed_generator_test.py rename to keras/src/random/seed_generator_test.py index f0a6dd179f57..9ab5c132fdd0 100644 --- a/keras/random/seed_generator_test.py +++ b/keras/src/random/seed_generator_test.py @@ -1,10 +1,10 @@ import numpy as np import pytest -from keras import backend -from keras import ops -from keras import testing -from keras.random import seed_generator +from keras.src import backend +from keras.src import ops +from keras.src import testing +from keras.src.random import seed_generator class SeedGeneratorTest(testing.TestCase): diff --git a/keras/regularizers/__init__.py b/keras/src/regularizers/__init__.py similarity index 75% rename from keras/regularizers/__init__.py rename to keras/src/regularizers/__init__.py index b31d1904bfe3..64ffad22a6e4 100644 --- a/keras/regularizers/__init__.py +++ b/keras/src/regularizers/__init__.py @@ -1,13 +1,13 @@ import inspect -from keras.api_export import keras_export -from keras.regularizers.regularizers import L1 -from keras.regularizers.regularizers import L1L2 -from keras.regularizers.regularizers import L2 -from keras.regularizers.regularizers import OrthogonalRegularizer -from keras.regularizers.regularizers import Regularizer -from keras.saving import serialization_lib -from keras.utils.naming import to_snake_case +from keras.src.api_export import keras_export +from keras.src.regularizers.regularizers import L1 +from keras.src.regularizers.regularizers import L1L2 +from keras.src.regularizers.regularizers import L2 +from keras.src.regularizers.regularizers import OrthogonalRegularizer +from keras.src.regularizers.regularizers import Regularizer +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case ALL_OBJECTS = { Regularizer, diff --git a/keras/regularizers/regularizers.py b/keras/src/regularizers/regularizers.py similarity index 98% rename from keras/regularizers/regularizers.py rename to keras/src/regularizers/regularizers.py index 56d3ec6d09b8..99459fe32fb7 100644 --- a/keras/regularizers/regularizers.py +++ b/keras/src/regularizers/regularizers.py @@ -1,8 +1,8 @@ import math -from keras import ops -from keras.api_export import keras_export -from keras.utils.numerical_utils import normalize +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.utils.numerical_utils import normalize @keras_export(["keras.Regularizer", "keras.regularizers.Regularizer"]) diff --git a/keras/regularizers/regularizers_test.py b/keras/src/regularizers/regularizers_test.py similarity index 97% rename from keras/regularizers/regularizers_test.py rename to keras/src/regularizers/regularizers_test.py index c728f3fefa3c..288f494ede2f 100644 --- a/keras/regularizers/regularizers_test.py +++ b/keras/src/regularizers/regularizers_test.py @@ -1,9 +1,9 @@ import numpy as np -from keras import backend -from keras import regularizers -from keras import testing -from keras.regularizers.regularizers import validate_float_arg +from keras.src import backend +from keras.src import regularizers +from keras.src import testing +from keras.src.regularizers.regularizers import validate_float_arg class RegularizersTest(testing.TestCase): diff --git a/keras/src/saving/__init__.py b/keras/src/saving/__init__.py new file mode 100644 index 000000000000..3af25ce633af --- /dev/null +++ b/keras/src/saving/__init__.py @@ -0,0 +1,9 @@ +from keras.src.saving.object_registration import CustomObjectScope +from keras.src.saving.object_registration import custom_object_scope +from keras.src.saving.object_registration import get_custom_objects +from keras.src.saving.object_registration import get_registered_name +from keras.src.saving.object_registration import get_registered_object +from keras.src.saving.object_registration import register_keras_serializable +from keras.src.saving.saving_api import load_model +from keras.src.saving.serialization_lib import deserialize_keras_object +from keras.src.saving.serialization_lib import serialize_keras_object diff --git a/keras/saving/object_registration.py b/keras/src/saving/object_registration.py similarity index 98% rename from keras/saving/object_registration.py rename to keras/src/saving/object_registration.py index ee910ed954b7..978e4f762a67 100644 --- a/keras/saving/object_registration.py +++ b/keras/src/saving/object_registration.py @@ -1,7 +1,7 @@ import inspect -from keras.api_export import keras_export -from keras.backend.common import global_state +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state GLOBAL_CUSTOM_OBJECTS = {} GLOBAL_CUSTOM_NAMES = {} diff --git a/keras/saving/object_registration_test.py b/keras/src/saving/object_registration_test.py similarity index 97% rename from keras/saving/object_registration_test.py rename to keras/src/saving/object_registration_test.py index 4da7606dd454..ece59e7e208a 100644 --- a/keras/saving/object_registration_test.py +++ b/keras/src/saving/object_registration_test.py @@ -1,7 +1,7 @@ import keras -from keras import testing -from keras.saving import object_registration -from keras.saving import serialization_lib +from keras.src import testing +from keras.src.saving import object_registration +from keras.src.saving import serialization_lib class TestObjectRegistration(testing.TestCase): diff --git a/keras/saving/saving_api.py b/keras/src/saving/saving_api.py similarity index 97% rename from keras/saving/saving_api.py rename to keras/src/saving/saving_api.py index cec7c3e793ee..a99ddae8008d 100644 --- a/keras/saving/saving_api.py +++ b/keras/src/saving/saving_api.py @@ -3,11 +3,11 @@ from absl import logging -from keras.api_export import keras_export -from keras.legacy.saving import legacy_h5_format -from keras.saving import saving_lib -from keras.utils import file_utils -from keras.utils import io_utils +from keras.src.api_export import keras_export +from keras.src.legacy.saving import legacy_h5_format +from keras.src.saving import saving_lib +from keras.src.utils import file_utils +from keras.src.utils import io_utils try: import h5py diff --git a/keras/saving/saving_api_test.py b/keras/src/saving/saving_api_test.py similarity index 97% rename from keras/saving/saving_api_test.py rename to keras/src/saving/saving_api_test.py index 8201c3e0a57a..024fae99678d 100644 --- a/keras/saving/saving_api_test.py +++ b/keras/src/saving/saving_api_test.py @@ -5,11 +5,11 @@ from absl import logging from absl.testing import parameterized -from keras import layers -from keras.models import Sequential -from keras.saving import saving_api -from keras.testing import test_case -from keras.testing.test_utils import named_product +from keras.src import layers +from keras.src.models import Sequential +from keras.src.saving import saving_api +from keras.src.testing import test_case +from keras.src.testing.test_utils import named_product class SaveModelTests(test_case.TestCase): diff --git a/keras/saving/saving_lib.py b/keras/src/saving/saving_lib.py similarity index 97% rename from keras/saving/saving_lib.py rename to keras/src/saving/saving_lib.py index 187e58aa9dea..7de68802e91c 100644 --- a/keras/saving/saving_lib.py +++ b/keras/src/saving/saving_lib.py @@ -10,19 +10,19 @@ import ml_dtypes import numpy as np -from keras import backend -from keras.backend.common import global_state -from keras.layers.layer import Layer -from keras.losses.loss import Loss -from keras.metrics.metric import Metric -from keras.optimizers.optimizer import Optimizer -from keras.saving.serialization_lib import ObjectSharingScope -from keras.saving.serialization_lib import deserialize_keras_object -from keras.saving.serialization_lib import serialize_keras_object -from keras.trainers.compile_utils import CompileMetrics -from keras.utils import file_utils -from keras.utils import naming -from keras.version import __version__ as keras_version +from keras.src import backend +from keras.src.backend.common import global_state +from keras.src.layers.layer import Layer +from keras.src.losses.loss import Loss +from keras.src.metrics.metric import Metric +from keras.src.optimizers.optimizer import Optimizer +from keras.src.saving.serialization_lib import ObjectSharingScope +from keras.src.saving.serialization_lib import deserialize_keras_object +from keras.src.saving.serialization_lib import serialize_keras_object +from keras.src.trainers.compile_utils import CompileMetrics +from keras.src.utils import file_utils +from keras.src.utils import naming +from keras.src.version import __version__ as keras_version try: import h5py @@ -319,8 +319,8 @@ def _name_key(name): def _walk_trackable(trackable): - from keras.models import Functional - from keras.models import Sequential + from keras.src.models import Functional + from keras.src.models import Sequential if isinstance(trackable, Sequential): obj_type = "Sequential" diff --git a/keras/saving/saving_lib_test.py b/keras/src/saving/saving_lib_test.py similarity index 99% rename from keras/saving/saving_lib_test.py rename to keras/src/saving/saving_lib_test.py index 5b3ee214824f..6b2d483d7591 100644 --- a/keras/saving/saving_lib_test.py +++ b/keras/src/saving/saving_lib_test.py @@ -12,9 +12,9 @@ import pytest import keras -from keras import ops -from keras import testing -from keras.saving import saving_lib +from keras.src import ops +from keras.src import testing +from keras.src.saving import saving_lib @keras.saving.register_keras_serializable(package="my_custom_package") @@ -614,7 +614,7 @@ def test_save_to_fileobj(self) -> None: @pytest.mark.requires_trainable_backend class SavingAPITest(testing.TestCase): def test_saving_api_errors(self): - from keras.saving import saving_api + from keras.src.saving import saving_api model = _get_basic_functional_model() diff --git a/keras/saving/serialization_lib.py b/keras/src/saving/serialization_lib.py similarity index 98% rename from keras/saving/serialization_lib.py rename to keras/src/saving/serialization_lib.py index 327cefcc0949..40125572809b 100644 --- a/keras/saving/serialization_lib.py +++ b/keras/src/saving/serialization_lib.py @@ -7,13 +7,13 @@ import numpy as np -from keras import api_export -from keras import backend -from keras.api_export import keras_export -from keras.backend.common import global_state -from keras.saving import object_registration -from keras.utils import python_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import api_export +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state +from keras.src.saving import object_registration +from keras.src.utils import python_utils +from keras.src.utils.module_utils import tensorflow as tf PLAIN_TYPES = (str, int, float, bool) diff --git a/keras/saving/serialization_lib_test.py b/keras/src/saving/serialization_lib_test.py similarity index 99% rename from keras/saving/serialization_lib_test.py rename to keras/src/saving/serialization_lib_test.py index 701a903d661f..06ed6ac7198f 100644 --- a/keras/saving/serialization_lib_test.py +++ b/keras/src/saving/serialization_lib_test.py @@ -6,9 +6,9 @@ import pytest import keras -from keras import ops -from keras import testing -from keras.saving import serialization_lib +from keras.src import ops +from keras.src import testing +from keras.src.saving import serialization_lib def custom_fn(x): diff --git a/keras/src/testing/__init__.py b/keras/src/testing/__init__.py new file mode 100644 index 000000000000..ae554ff85857 --- /dev/null +++ b/keras/src/testing/__init__.py @@ -0,0 +1,5 @@ +from keras.src.testing.test_case import TestCase +from keras.src.testing.test_case import jax_uses_gpu +from keras.src.testing.test_case import tensorflow_uses_gpu +from keras.src.testing.test_case import torch_uses_gpu +from keras.src.testing.test_case import uses_gpu diff --git a/keras/testing/test_case.py b/keras/src/testing/test_case.py similarity index 97% rename from keras/testing/test_case.py rename to keras/src/testing/test_case.py index 74656e6f0efb..0b6fd9d40f3f 100644 --- a/keras/testing/test_case.py +++ b/keras/src/testing/test_case.py @@ -5,17 +5,17 @@ import numpy as np -from keras import backend -from keras import distribution -from keras import ops -from keras import tree -from keras import utils -from keras.backend.common import is_float_dtype -from keras.backend.common import standardize_dtype -from keras.backend.common.global_state import clear_session -from keras.backend.common.keras_tensor import KerasTensor -from keras.models import Model -from keras.utils import traceback_utils +from keras.src import backend +from keras.src import distribution +from keras.src import ops +from keras.src import tree +from keras.src import utils +from keras.src.backend.common import is_float_dtype +from keras.src.backend.common import standardize_dtype +from keras.src.backend.common.global_state import clear_session +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.models import Model +from keras.src.utils import traceback_utils class TestCase(unittest.TestCase): @@ -100,9 +100,9 @@ def assertSparse(self, x, sparse=True): ) def run_class_serialization_test(self, instance, custom_objects=None): - from keras.saving import custom_object_scope - from keras.saving import deserialize_keras_object - from keras.saving import serialize_keras_object + from keras.src.saving import custom_object_scope + from keras.src.saving import deserialize_keras_object + from keras.src.saving import serialize_keras_object # get_config roundtrip cls = instance.__class__ @@ -599,7 +599,7 @@ def create_keras_tensors(input_shape, dtype, sparse): def create_eager_tensors(input_shape, dtype, sparse): - from keras.backend import random + from keras.src.backend import random if set(tree.flatten(dtype)).difference( ["float16", "float32", "float64", "int16", "int32", "int64"] diff --git a/keras/testing/test_utils.py b/keras/src/testing/test_utils.py similarity index 100% rename from keras/testing/test_utils.py rename to keras/src/testing/test_utils.py diff --git a/keras/testing/test_utils_test.py b/keras/src/testing/test_utils_test.py similarity index 99% rename from keras/testing/test_utils_test.py rename to keras/src/testing/test_utils_test.py index c7bb9b6a86e2..f0b6591c79de 100644 --- a/keras/testing/test_utils_test.py +++ b/keras/src/testing/test_utils_test.py @@ -1,8 +1,8 @@ import numpy as np from absl.testing import parameterized -from keras.testing import test_case -from keras.testing import test_utils +from keras.src.testing import test_case +from keras.src.testing import test_utils class GetTestDataTest(test_case.TestCase): diff --git a/keras/trainers/__init__.py b/keras/src/trainers/__init__.py similarity index 100% rename from keras/trainers/__init__.py rename to keras/src/trainers/__init__.py diff --git a/keras/trainers/compile_utils.py b/keras/src/trainers/compile_utils.py similarity index 99% rename from keras/trainers/compile_utils.py rename to keras/src/trainers/compile_utils.py index 2049d4b8546b..729c26921336 100644 --- a/keras/trainers/compile_utils.py +++ b/keras/src/trainers/compile_utils.py @@ -1,9 +1,9 @@ -from keras import backend -from keras import losses as losses_module -from keras import metrics as metrics_module -from keras import ops -from keras import tree -from keras.utils.naming import get_object_name +from keras.src import backend +from keras.src import losses as losses_module +from keras.src import metrics as metrics_module +from keras.src import ops +from keras.src import tree +from keras.src.utils.naming import get_object_name class MetricsList(metrics_module.Metric): diff --git a/keras/trainers/compile_utils_test.py b/keras/src/trainers/compile_utils_test.py similarity index 97% rename from keras/trainers/compile_utils_test.py rename to keras/src/trainers/compile_utils_test.py index 0d53dcacc352..122a9f357f69 100644 --- a/keras/trainers/compile_utils_test.py +++ b/keras/src/trainers/compile_utils_test.py @@ -1,13 +1,13 @@ import numpy as np from absl.testing import parameterized -from keras import backend -from keras import metrics as losses_module -from keras import metrics as metrics_module -from keras import ops -from keras import testing -from keras.trainers.compile_utils import CompileLoss -from keras.trainers.compile_utils import CompileMetrics +from keras.src import backend +from keras.src import metrics as losses_module +from keras.src import metrics as metrics_module +from keras.src import ops +from keras.src import testing +from keras.src.trainers.compile_utils import CompileLoss +from keras.src.trainers.compile_utils import CompileMetrics class TestCompileMetrics(testing.TestCase): diff --git a/keras/trainers/data_adapters/__init__.py b/keras/src/trainers/data_adapters/__init__.py similarity index 89% rename from keras/trainers/data_adapters/__init__.py rename to keras/src/trainers/data_adapters/__init__.py index 84088342895d..41f2a91f11a0 100644 --- a/keras/trainers/data_adapters/__init__.py +++ b/keras/src/trainers/data_adapters/__init__.py @@ -1,15 +1,15 @@ import types -from keras.distribution import distribution_lib -from keras.trainers.data_adapters import array_data_adapter -from keras.trainers.data_adapters import py_dataset_adapter -from keras.trainers.data_adapters.array_data_adapter import ArrayDataAdapter -from keras.trainers.data_adapters.generator_data_adapter import ( +from keras.src.distribution import distribution_lib +from keras.src.trainers.data_adapters import array_data_adapter +from keras.src.trainers.data_adapters import py_dataset_adapter +from keras.src.trainers.data_adapters.array_data_adapter import ArrayDataAdapter +from keras.src.trainers.data_adapters.generator_data_adapter import ( GeneratorDataAdapter, ) -from keras.trainers.data_adapters.py_dataset_adapter import PyDatasetAdapter -from keras.trainers.data_adapters.tf_dataset_adapter import TFDatasetAdapter -from keras.trainers.data_adapters.torch_data_loader_adapter import ( +from keras.src.trainers.data_adapters.py_dataset_adapter import PyDatasetAdapter +from keras.src.trainers.data_adapters.tf_dataset_adapter import TFDatasetAdapter +from keras.src.trainers.data_adapters.torch_data_loader_adapter import ( TorchDataLoaderAdapter, ) diff --git a/keras/trainers/data_adapters/array_data_adapter.py b/keras/src/trainers/data_adapters/array_data_adapter.py similarity index 97% rename from keras/trainers/data_adapters/array_data_adapter.py rename to keras/src/trainers/data_adapters/array_data_adapter.py index 26cc77e609e2..3832d8e553ae 100644 --- a/keras/trainers/data_adapters/array_data_adapter.py +++ b/keras/src/trainers/data_adapters/array_data_adapter.py @@ -3,10 +3,10 @@ import numpy as np -from keras import tree -from keras.trainers.data_adapters import array_slicing -from keras.trainers.data_adapters import data_adapter_utils -from keras.trainers.data_adapters.data_adapter import DataAdapter +from keras.src import tree +from keras.src.trainers.data_adapters import array_slicing +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.data_adapters.data_adapter import DataAdapter class ArrayDataAdapter(DataAdapter): @@ -103,7 +103,7 @@ def slice_and_convert_to_numpy(sliceable, indices=None): return self._get_iterator(slice_and_convert_to_numpy, inputs) def get_tf_dataset(self): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf shuffle = self._shuffle batch_size = self._batch_size @@ -243,7 +243,7 @@ def grab_one(x): return dataset.prefetch(tf.data.AUTOTUNE) def get_jax_iterator(self): - from keras.backend.jax.core import convert_to_tensor + from keras.src.backend.jax.core import convert_to_tensor inputs = array_slicing.convert_to_sliceable( self._inputs, target_backend="jax" @@ -260,7 +260,7 @@ def slice_and_convert_to_jax(sliceable, indices=None): def get_torch_dataloader(self): import torch - from keras.backend.torch.core import convert_to_tensor + from keras.src.backend.torch.core import convert_to_tensor class ArrayDataset(torch.utils.data.Dataset): def __init__(self, array): diff --git a/keras/trainers/data_adapters/array_data_adapter_test.py b/keras/src/trainers/data_adapters/array_data_adapter_test.py similarity index 98% rename from keras/trainers/data_adapters/array_data_adapter_test.py rename to keras/src/trainers/data_adapters/array_data_adapter_test.py index c1eccc6c290b..46eb4fcc194e 100644 --- a/keras/trainers/data_adapters/array_data_adapter_test.py +++ b/keras/src/trainers/data_adapters/array_data_adapter_test.py @@ -7,10 +7,10 @@ import torch from absl.testing import parameterized -from keras import backend -from keras import testing -from keras.testing.test_utils import named_product -from keras.trainers.data_adapters import array_data_adapter +from keras.src import backend +from keras.src import testing +from keras.src.testing.test_utils import named_product +from keras.src.trainers.data_adapters import array_data_adapter class TestArrayDataAdapter(testing.TestCase, parameterized.TestCase): diff --git a/keras/trainers/data_adapters/array_slicing.py b/keras/src/trainers/data_adapters/array_slicing.py similarity index 95% rename from keras/trainers/data_adapters/array_slicing.py rename to keras/src/trainers/data_adapters/array_slicing.py index eab94fa3ed5b..50279aa11087 100644 --- a/keras/trainers/data_adapters/array_slicing.py +++ b/keras/src/trainers/data_adapters/array_slicing.py @@ -3,9 +3,9 @@ import numpy as np -from keras import backend -from keras import tree -from keras.trainers.data_adapters import data_adapter_utils +from keras.src import backend +from keras.src import tree +from keras.src.trainers.data_adapters import data_adapter_utils try: import pandas @@ -124,7 +124,7 @@ class NumpySliceable(Sliceable): class TensorflowSliceable(Sliceable): def __getitem__(self, indices): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf if isinstance(indices, slice): return self.array[indices] @@ -133,13 +133,13 @@ def __getitem__(self, indices): @classmethod def cast(cls, x, dtype): - from keras.backend.tensorflow.core import cast + from keras.src.backend.tensorflow.core import cast return cast(x, dtype) @classmethod def convert_to_numpy(cls, x): - from keras.backend.tensorflow.core import convert_to_numpy + from keras.src.backend.tensorflow.core import convert_to_numpy return convert_to_numpy(x) @@ -175,7 +175,7 @@ def convert_to_jax_compatible(cls, x): @classmethod def convert_to_torch_compatible(cls, x): - from keras.backend.tensorflow import sparse as tf_sparse + from keras.src.backend.tensorflow import sparse as tf_sparse return tf_sparse.sparse_to_dense(x) @@ -186,7 +186,7 @@ def __getitem__(self, indices): @classmethod def convert_to_numpy(cls, x): - from keras.backend.jax.core import convert_to_numpy + from keras.src.backend.jax.core import convert_to_numpy return convert_to_numpy(x) @@ -206,13 +206,13 @@ def convert_to_torch_compatible(cls, x): class TorchSliceable(Sliceable): @classmethod def cast(cls, x, dtype): - from keras.backend.torch.core import cast + from keras.src.backend.torch.core import cast return cast(x, dtype) @classmethod def convert_to_numpy(cls, x): - from keras.backend.torch.core import convert_to_numpy + from keras.src.backend.torch.core import convert_to_numpy return convert_to_numpy(x) @@ -293,7 +293,7 @@ def convert_to_torch_compatible(cls, x): def to_tensorflow_sparse_wrapper(sparse): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf row_ids = sparse.indices[:, 0] row_splits = tf.experimental.RowPartition.from_value_rowids( @@ -308,7 +308,7 @@ def to_tensorflow_sparse_wrapper(sparse): def slice_tensorflow_sparse_wrapper(sparse_wrapper, indices): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf if isinstance(indices, slice): sparse_indices = sparse_wrapper.ragged_indices[indices] diff --git a/keras/trainers/data_adapters/data_adapter.py b/keras/src/trainers/data_adapters/data_adapter.py similarity index 100% rename from keras/trainers/data_adapters/data_adapter.py rename to keras/src/trainers/data_adapters/data_adapter.py diff --git a/keras/trainers/data_adapters/data_adapter_utils.py b/keras/src/trainers/data_adapters/data_adapter_utils.py similarity index 95% rename from keras/trainers/data_adapters/data_adapter_utils.py rename to keras/src/trainers/data_adapters/data_adapter_utils.py index 7768c9295e24..83dae01e146d 100644 --- a/keras/trainers/data_adapters/data_adapter_utils.py +++ b/keras/src/trainers/data_adapters/data_adapter_utils.py @@ -1,8 +1,8 @@ import numpy as np -from keras import backend -from keras import tree -from keras.api_export import keras_export +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export NUM_BATCHES_FOR_TENSOR_SPEC = 2 @@ -135,7 +135,7 @@ def get_tensor_spec(batches): identical, but the shape at each leaf may be different. Returns: the common tensor spec for all the batches. """ - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf def get_single_tensor_spec(*tensors): x = tensors[0] @@ -176,7 +176,7 @@ def get_single_tensor_spec(*tensors): def get_jax_iterator(iterable): - from keras.backend.jax.core import convert_to_tensor + from keras.src.backend.jax.core import convert_to_tensor for batch in iterable: yield tree.map_structure(convert_to_tensor, batch) @@ -201,7 +201,7 @@ def convert_to_numpy(x): def get_torch_dataloader(iterable): import torch.utils.data as torch_data - from keras.backend.torch.core import convert_to_tensor + from keras.src.backend.torch.core import convert_to_tensor class ConverterIterableDataset(torch_data.IterableDataset): def __init__(self, iterable): @@ -279,7 +279,7 @@ def is_scipy_sparse(x): def scipy_sparse_to_tf_sparse(x): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf coo = x.tocoo() indices = np.concatenate( @@ -303,6 +303,6 @@ def tf_sparse_to_jax_sparse(x): def jax_sparse_to_tf_sparse(x): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf return tf.SparseTensor(x.indices, x.data, x.shape) diff --git a/keras/trainers/data_adapters/generator_data_adapter.py b/keras/src/trainers/data_adapters/generator_data_adapter.py similarity index 89% rename from keras/trainers/data_adapters/generator_data_adapter.py rename to keras/src/trainers/data_adapters/generator_data_adapter.py index 6e26c9585ccd..0cbf3d64cf49 100644 --- a/keras/trainers/data_adapters/generator_data_adapter.py +++ b/keras/src/trainers/data_adapters/generator_data_adapter.py @@ -1,8 +1,8 @@ import itertools -from keras import tree -from keras.trainers.data_adapters import data_adapter_utils -from keras.trainers.data_adapters.data_adapter import DataAdapter +from keras.src import tree +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.data_adapters.data_adapter import DataAdapter class GeneratorDataAdapter(DataAdapter): @@ -26,7 +26,7 @@ def get_numpy_iterator(self): return data_adapter_utils.get_numpy_iterator(self.generator) def get_jax_iterator(self): - from keras.backend.jax.core import convert_to_tensor + from keras.src.backend.jax.core import convert_to_tensor def convert_to_jax(x): if data_adapter_utils.is_scipy_sparse(x): @@ -39,7 +39,7 @@ def convert_to_jax(x): yield tree.map_structure(convert_to_jax, batch) def get_tf_dataset(self): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf def convert_to_tf(x): if data_adapter_utils.is_scipy_sparse(x): diff --git a/keras/trainers/data_adapters/generator_data_adapter_test.py b/keras/src/trainers/data_adapters/generator_data_adapter_test.py similarity index 97% rename from keras/trainers/data_adapters/generator_data_adapter_test.py rename to keras/src/trainers/data_adapters/generator_data_adapter_test.py index f06e79610429..4d6ebdc5597c 100644 --- a/keras/trainers/data_adapters/generator_data_adapter_test.py +++ b/keras/src/trainers/data_adapters/generator_data_adapter_test.py @@ -9,9 +9,9 @@ from absl.testing import parameterized from jax import numpy as jnp -from keras import testing -from keras.testing.test_utils import named_product -from keras.trainers.data_adapters import generator_data_adapter +from keras.src import testing +from keras.src.testing.test_utils import named_product +from keras.src.trainers.data_adapters import generator_data_adapter def example_generator(x, y, sample_weight=None, batch_size=32): diff --git a/keras/trainers/data_adapters/py_dataset_adapter.py b/keras/src/trainers/data_adapters/py_dataset_adapter.py similarity index 98% rename from keras/trainers/data_adapters/py_dataset_adapter.py rename to keras/src/trainers/data_adapters/py_dataset_adapter.py index 451d8aa50c0d..71ab2a67736a 100644 --- a/keras/trainers/data_adapters/py_dataset_adapter.py +++ b/keras/src/trainers/data_adapters/py_dataset_adapter.py @@ -9,9 +9,9 @@ import numpy as np -from keras.api_export import keras_export -from keras.trainers.data_adapters import data_adapter_utils -from keras.trainers.data_adapters.data_adapter import DataAdapter +from keras.src.api_export import keras_export +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.data_adapters.data_adapter import DataAdapter @keras_export(["keras.utils.PyDataset", "keras.utils.Sequence"]) @@ -260,7 +260,7 @@ def get_jax_iterator(self): return data_adapter_utils.get_jax_iterator(self._get_iterator()) def get_tf_dataset(self): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf if self._output_signature is None: num_samples = min( diff --git a/keras/trainers/data_adapters/py_dataset_adapter_test.py b/keras/src/trainers/data_adapters/py_dataset_adapter_test.py similarity index 97% rename from keras/trainers/data_adapters/py_dataset_adapter_test.py rename to keras/src/trainers/data_adapters/py_dataset_adapter_test.py index cc88631853dd..b1be7002ac54 100644 --- a/keras/trainers/data_adapters/py_dataset_adapter_test.py +++ b/keras/src/trainers/data_adapters/py_dataset_adapter_test.py @@ -7,10 +7,10 @@ import torch from absl.testing import parameterized -from keras import testing -from keras.testing.test_utils import named_product -from keras.trainers.data_adapters import py_dataset_adapter -from keras.utils.rng_utils import set_random_seed +from keras.src import testing +from keras.src.testing.test_utils import named_product +from keras.src.trainers.data_adapters import py_dataset_adapter +from keras.src.utils.rng_utils import set_random_seed class ExamplePyDataset(py_dataset_adapter.PyDataset): diff --git a/keras/trainers/data_adapters/tf_dataset_adapter.py b/keras/src/trainers/data_adapters/tf_dataset_adapter.py similarity index 89% rename from keras/trainers/data_adapters/tf_dataset_adapter.py rename to keras/src/trainers/data_adapters/tf_dataset_adapter.py index 6f2362dc9b4d..fcd4c9893852 100644 --- a/keras/trainers/data_adapters/tf_dataset_adapter.py +++ b/keras/src/trainers/data_adapters/tf_dataset_adapter.py @@ -1,6 +1,6 @@ -from keras import tree -from keras.trainers.data_adapters import data_adapter_utils -from keras.trainers.data_adapters.data_adapter import DataAdapter +from keras.src import tree +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.data_adapters.data_adapter import DataAdapter class TFDatasetAdapter(DataAdapter): @@ -17,7 +17,7 @@ def __init__(self, dataset, class_weight=None, distribution=None): shard the input dataset into per worker/process dataset instance. """ - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf if not isinstance( dataset, (tf.data.Dataset, tf.distribute.DistributedDataset) @@ -35,7 +35,7 @@ def __init__(self, dataset, class_weight=None, distribution=None): self._dataset = dataset def get_numpy_iterator(self): - from keras.backend.tensorflow.core import convert_to_numpy + from keras.src.backend.tensorflow.core import convert_to_numpy for batch in self._dataset: yield tree.map_structure(convert_to_numpy, batch) @@ -43,9 +43,9 @@ def get_numpy_iterator(self): def get_jax_iterator(self): import jax.experimental.sparse as jax_sparse - from keras.backend.jax.core import convert_to_tensor - from keras.backend.tensorflow.core import convert_to_numpy - from keras.utils.module_utils import tensorflow as tf + from keras.src.backend.jax.core import convert_to_tensor + from keras.src.backend.tensorflow.core import convert_to_numpy + from keras.src.utils.module_utils import tensorflow as tf def convert_to_jax(x): # We use numpy as an intermediary because the conversion @@ -107,7 +107,7 @@ def make_class_weight_map_fn(class_weight): A function that can be used with `tf.data.Dataset.map` to apply class weighting. """ - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf class_weight_tensor = tf.convert_to_tensor( [ diff --git a/keras/trainers/data_adapters/tf_dataset_adapter_test.py b/keras/src/trainers/data_adapters/tf_dataset_adapter_test.py similarity index 98% rename from keras/trainers/data_adapters/tf_dataset_adapter_test.py rename to keras/src/trainers/data_adapters/tf_dataset_adapter_test.py index 5d4a7576f45b..ad48c2d3c241 100644 --- a/keras/trainers/data_adapters/tf_dataset_adapter_test.py +++ b/keras/src/trainers/data_adapters/tf_dataset_adapter_test.py @@ -6,9 +6,9 @@ import torch from absl.testing import parameterized -from keras import testing -from keras.testing.test_utils import named_product -from keras.trainers.data_adapters import tf_dataset_adapter +from keras.src import testing +from keras.src.testing.test_utils import named_product +from keras.src.trainers.data_adapters import tf_dataset_adapter class TestTFDatasetAdapter(testing.TestCase, parameterized.TestCase): diff --git a/keras/trainers/data_adapters/torch_data_loader_adapter.py b/keras/src/trainers/data_adapters/torch_data_loader_adapter.py similarity index 91% rename from keras/trainers/data_adapters/torch_data_loader_adapter.py rename to keras/src/trainers/data_adapters/torch_data_loader_adapter.py index 78df412c68f3..59a89050c5ce 100644 --- a/keras/trainers/data_adapters/torch_data_loader_adapter.py +++ b/keras/src/trainers/data_adapters/torch_data_loader_adapter.py @@ -2,9 +2,9 @@ import numpy as np -from keras import tree -from keras.trainers.data_adapters import data_adapter_utils -from keras.trainers.data_adapters.data_adapter import DataAdapter +from keras.src import tree +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.trainers.data_adapters.data_adapter import DataAdapter class TorchDataLoaderAdapter(DataAdapter): @@ -44,7 +44,7 @@ def get_jax_iterator(self): return data_adapter_utils.get_jax_iterator(self.get_numpy_iterator()) def get_tf_dataset(self): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf if self._output_signature is None: batches = list( diff --git a/keras/trainers/data_adapters/torch_data_loader_adapter_test.py b/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py similarity index 97% rename from keras/trainers/data_adapters/torch_data_loader_adapter_test.py rename to keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py index 9922667cecc3..e86f570d6925 100644 --- a/keras/trainers/data_adapters/torch_data_loader_adapter_test.py +++ b/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py @@ -6,9 +6,9 @@ import torch from absl.testing import parameterized -from keras import testing -from keras.testing.test_utils import named_product -from keras.trainers.data_adapters.torch_data_loader_adapter import ( +from keras.src import testing +from keras.src.testing.test_utils import named_product +from keras.src.trainers.data_adapters.torch_data_loader_adapter import ( TorchDataLoaderAdapter, ) diff --git a/keras/trainers/epoch_iterator.py b/keras/src/trainers/epoch_iterator.py similarity index 98% rename from keras/trainers/epoch_iterator.py rename to keras/src/trainers/epoch_iterator.py index 9711a6cfa12d..fa55d78d864f 100644 --- a/keras/trainers/epoch_iterator.py +++ b/keras/src/trainers/epoch_iterator.py @@ -41,7 +41,7 @@ import warnings -from keras.trainers import data_adapters +from keras.src.trainers import data_adapters class EpochIterator: diff --git a/keras/trainers/epoch_iterator_test.py b/keras/src/trainers/epoch_iterator_test.py similarity index 97% rename from keras/trainers/epoch_iterator_test.py rename to keras/src/trainers/epoch_iterator_test.py index 0c731b0bc79b..832520e19fc3 100644 --- a/keras/trainers/epoch_iterator_test.py +++ b/keras/src/trainers/epoch_iterator_test.py @@ -2,10 +2,10 @@ import pytest import tensorflow as tf -from keras import backend -from keras import testing -from keras.trainers import data_adapters -from keras.trainers import epoch_iterator +from keras.src import backend +from keras.src import testing +from keras.src.trainers import data_adapters +from keras.src.trainers import epoch_iterator class TestEpochIterator(testing.TestCase): diff --git a/keras/trainers/trainer.py b/keras/src/trainers/trainer.py similarity index 98% rename from keras/trainers/trainer.py rename to keras/src/trainers/trainer.py index 0ee156f9c5e2..0d8fdb85cd6d 100644 --- a/keras/trainers/trainer.py +++ b/keras/src/trainers/trainer.py @@ -1,18 +1,18 @@ import platform import warnings -from keras import backend -from keras import metrics as metrics_module -from keras import ops -from keras import optimizers -from keras import tree -from keras.optimizers.loss_scale_optimizer import LossScaleOptimizer -from keras.saving import serialization_lib -from keras.trainers.compile_utils import CompileLoss -from keras.trainers.compile_utils import CompileMetrics -from keras.trainers.data_adapters import data_adapter_utils -from keras.utils import traceback_utils -from keras.utils import tracking +from keras.src import backend +from keras.src import metrics as metrics_module +from keras.src import ops +from keras.src import optimizers +from keras.src import tree +from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer +from keras.src.saving import serialization_lib +from keras.src.trainers.compile_utils import CompileLoss +from keras.src.trainers.compile_utils import CompileMetrics +from keras.src.trainers.data_adapters import data_adapter_utils +from keras.src.utils import traceback_utils +from keras.src.utils import tracking class Trainer: @@ -995,7 +995,7 @@ def model_supports_jit(model): # XLA not supported with TF on MacOS GPU if platform.system() == "Darwin" and "arm" in platform.processor().lower(): if backend.backend() == "tensorflow": - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf if tf.config.list_physical_devices("GPU"): return False diff --git a/keras/trainers/trainer_test.py b/keras/src/trainers/trainer_test.py similarity index 98% rename from keras/trainers/trainer_test.py rename to keras/src/trainers/trainer_test.py index cf02da6b558a..6e125a31cf07 100644 --- a/keras/trainers/trainer_test.py +++ b/keras/src/trainers/trainer_test.py @@ -5,27 +5,29 @@ from absl.testing import parameterized import keras -from keras import backend -from keras import initializers -from keras import layers -from keras import losses -from keras import metrics -from keras import models -from keras import ops -from keras import optimizers -from keras import testing -from keras.callbacks.callback import Callback -from keras.optimizers.rmsprop import RMSprop -from keras.testing.test_utils import named_product +from keras.src import backend +from keras.src import initializers +from keras.src import layers +from keras.src import losses +from keras.src import metrics +from keras.src import models +from keras.src import ops +from keras.src import optimizers +from keras.src import testing +from keras.src.callbacks.callback import Callback +from keras.src.optimizers.rmsprop import RMSprop +from keras.src.testing.test_utils import named_product if backend.backend() == "jax": - from keras.backend.jax.trainer import JAXTrainer as Trainer + from keras.src.backend.jax.trainer import JAXTrainer as Trainer elif backend.backend() == "torch": - from keras.backend.torch.trainer import TorchTrainer as Trainer + from keras.src.backend.torch.trainer import TorchTrainer as Trainer elif backend.backend() == "tensorflow": - from keras.backend.tensorflow.trainer import TensorFlowTrainer as Trainer + from keras.src.backend.tensorflow.trainer import ( + TensorFlowTrainer as Trainer, + ) elif backend.backend() == "numpy": - from keras.backend.numpy.trainer import NumpyTrainer as Trainer + from keras.src.backend.numpy.trainer import NumpyTrainer as Trainer else: raise ImportError(f"Invalid backend: {backend.backend()}") @@ -966,7 +968,7 @@ def call(self, x): def get_functional(self): ExampleLayer = self.get_layer() - class ExampleFunctional(keras.Functional): + class ExampleFunctional(keras.src.Functional): def __init__(self, input_shape=(None,)): inputs = keras.Input(input_shape) outputs = ExampleLayer()(inputs) @@ -996,7 +998,7 @@ def __init__(self, input_shape=(None,)): reason="Only tensorflow supports raggeds", ) def test_trainer_with_raggeds(self, model_class): - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf def loss_fn(y, y_pred, sample_weight=None): return 0 diff --git a/keras/src/tree/__init__.py b/keras/src/tree/__init__.py new file mode 100644 index 000000000000..ba755043cb9b --- /dev/null +++ b/keras/src/tree/__init__.py @@ -0,0 +1,10 @@ +from keras.src.tree.tree_api import assert_same_structure +from keras.src.tree.tree_api import flatten +from keras.src.tree.tree_api import is_nested +from keras.src.tree.tree_api import lists_to_tuples +from keras.src.tree.tree_api import map_shape_structure +from keras.src.tree.tree_api import map_structure +from keras.src.tree.tree_api import map_structure_up_to +from keras.src.tree.tree_api import pack_sequence_as +from keras.src.tree.tree_api import register_tree_node_class +from keras.src.tree.tree_api import traverse diff --git a/keras/tree/dmtree_impl.py b/keras/src/tree/dmtree_impl.py similarity index 99% rename from keras/tree/dmtree_impl.py rename to keras/src/tree/dmtree_impl.py index 916fb35b257b..441e84a35073 100644 --- a/keras/tree/dmtree_impl.py +++ b/keras/src/tree/dmtree_impl.py @@ -1,4 +1,4 @@ -from keras.utils.module_utils import dmtree +from keras.src.utils.module_utils import dmtree def register_tree_node_class(cls): diff --git a/keras/tree/optree_impl.py b/keras/src/tree/optree_impl.py similarity index 99% rename from keras/tree/optree_impl.py rename to keras/src/tree/optree_impl.py index c823381b56f3..8ada42b0fb24 100644 --- a/keras/tree/optree_impl.py +++ b/keras/src/tree/optree_impl.py @@ -5,7 +5,7 @@ import optree import optree.utils -from keras.backend.config import backend +from keras.src.backend.config import backend def register_tree_node_class(cls): diff --git a/keras/tree/tree_api.py b/keras/src/tree/tree_api.py similarity index 97% rename from keras/tree/tree_api.py rename to keras/src/tree/tree_api.py index 09a99daa10e9..1bd833c8d0ab 100644 --- a/keras/tree/tree_api.py +++ b/keras/src/tree/tree_api.py @@ -1,11 +1,11 @@ -from keras.api_export import keras_export -from keras.utils.module_utils import dmtree -from keras.utils.module_utils import optree +from keras.src.api_export import keras_export +from keras.src.utils.module_utils import dmtree +from keras.src.utils.module_utils import optree if optree.available: - from keras.tree import optree_impl as tree_impl + from keras.src.tree import optree_impl as tree_impl elif dmtree.available: - from keras.tree import dmtree_impl as tree_impl + from keras.src.tree import dmtree_impl as tree_impl else: raise ImportError( "To use Keras, you need to have `optree` installed. " diff --git a/keras/tree/tree_test.py b/keras/src/tree/tree_test.py similarity index 99% rename from keras/tree/tree_test.py rename to keras/src/tree/tree_test.py index 7335b7d3f377..24ddbe5ce0fa 100644 --- a/keras/tree/tree_test.py +++ b/keras/src/tree/tree_test.py @@ -2,9 +2,9 @@ import numpy as np -from keras import ops -from keras import testing -from keras import tree +from keras.src import ops +from keras.src import testing +from keras.src import tree STRUCTURE1 = (((1, 2), 3), 4, (5, 6)) STRUCTURE2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) diff --git a/keras/src/utils/__init__.py b/keras/src/utils/__init__.py new file mode 100644 index 000000000000..c503a2043776 --- /dev/null +++ b/keras/src/utils/__init__.py @@ -0,0 +1,26 @@ +from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory +from keras.src.utils.dataset_utils import split_dataset +from keras.src.utils.file_utils import get_file +from keras.src.utils.image_dataset_utils import image_dataset_from_directory +from keras.src.utils.image_utils import array_to_img +from keras.src.utils.image_utils import img_to_array +from keras.src.utils.image_utils import load_img +from keras.src.utils.image_utils import save_img +from keras.src.utils.io_utils import disable_interactive_logging +from keras.src.utils.io_utils import enable_interactive_logging +from keras.src.utils.io_utils import is_interactive_logging_enabled +from keras.src.utils.model_visualization import model_to_dot +from keras.src.utils.model_visualization import plot_model +from keras.src.utils.numerical_utils import normalize +from keras.src.utils.numerical_utils import to_categorical +from keras.src.utils.progbar import Progbar +from keras.src.utils.python_utils import default +from keras.src.utils.python_utils import is_default +from keras.src.utils.python_utils import removeprefix +from keras.src.utils.python_utils import removesuffix +from keras.src.utils.rng_utils import set_random_seed +from keras.src.utils.sequence_utils import pad_sequences +from keras.src.utils.text_dataset_utils import text_dataset_from_directory +from keras.src.utils.timeseries_dataset_utils import ( + timeseries_dataset_from_array, +) diff --git a/keras/utils/argument_validation.py b/keras/src/utils/argument_validation.py similarity index 100% rename from keras/utils/argument_validation.py rename to keras/src/utils/argument_validation.py diff --git a/keras/utils/audio_dataset_utils.py b/keras/src/utils/audio_dataset_utils.py similarity index 98% rename from keras/utils/audio_dataset_utils.py rename to keras/src/utils/audio_dataset_utils.py index 3927b3aa383d..ac1bab223b82 100644 --- a/keras/utils/audio_dataset_utils.py +++ b/keras/src/utils/audio_dataset_utils.py @@ -1,9 +1,9 @@ import numpy as np -from keras.api_export import keras_export -from keras.utils import dataset_utils -from keras.utils.module_utils import tensorflow as tf -from keras.utils.module_utils import tensorflow_io as tfio +from keras.src.api_export import keras_export +from keras.src.utils import dataset_utils +from keras.src.utils.module_utils import tensorflow as tf +from keras.src.utils.module_utils import tensorflow_io as tfio ALLOWED_FORMATS = (".wav",) diff --git a/keras/utils/audio_dataset_utils_test.py b/keras/src/utils/audio_dataset_utils_test.py similarity index 99% rename from keras/utils/audio_dataset_utils_test.py rename to keras/src/utils/audio_dataset_utils_test.py index 668465f53c44..f382a24c14b7 100644 --- a/keras/utils/audio_dataset_utils_test.py +++ b/keras/src/utils/audio_dataset_utils_test.py @@ -2,9 +2,9 @@ import numpy as np -from keras import testing -from keras.utils import audio_dataset_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import testing +from keras.src.utils import audio_dataset_utils +from keras.src.utils.module_utils import tensorflow as tf class AudioDatasetFromDirectoryTest(testing.TestCase): diff --git a/keras/utils/backend_utils.py b/keras/src/utils/backend_utils.py similarity index 86% rename from keras/utils/backend_utils.py rename to keras/src/utils/backend_utils.py index 9d358354f3a7..9a82fd464eb2 100644 --- a/keras/utils/backend_utils.py +++ b/keras/src/utils/backend_utils.py @@ -3,9 +3,9 @@ import os import sys -from keras import backend as backend_module -from keras.api_export import keras_export -from keras.backend.common import global_state +from keras.src import backend as backend_module +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state def in_tf_graph(): @@ -13,7 +13,7 @@ def in_tf_graph(): return True if "tensorflow" in sys.modules: - from keras.utils.module_utils import tensorflow as tf + from keras.src.utils.module_utils import tensorflow as tf return not tf.executing_eagerly() return False @@ -67,23 +67,23 @@ def reset(self): def __getattr__(self, name): if self._backend == "tensorflow": - from keras.backend import tensorflow as tf_backend + from keras.src.backend import tensorflow as tf_backend return getattr(tf_backend, name) if self._backend == "jax": - from keras.backend import jax as jax_backend + from keras.src.backend import jax as jax_backend return getattr(jax_backend, name) if self._backend == "torch": - from keras.backend import torch as torch_backend + from keras.src.backend import torch as torch_backend return getattr(torch_backend, name) if self._backend == "numpy": # TODO (ariG23498): - # The import `from keras.backend import numpy as numpy_backend` + # The import `from keras.src.backend import numpy as numpy_backend` # is not working. This is a temporary fix. # The import is redirected to `keras.backend.numpy.numpy.py` - from keras import backend as numpy_backend + from keras.src import backend as numpy_backend return getattr(numpy_backend, name) diff --git a/keras/utils/code_stats.py b/keras/src/utils/code_stats.py similarity index 100% rename from keras/utils/code_stats.py rename to keras/src/utils/code_stats.py diff --git a/keras/utils/code_stats_test.py b/keras/src/utils/code_stats_test.py similarity index 98% rename from keras/utils/code_stats_test.py rename to keras/src/utils/code_stats_test.py index bc1ccec8fc56..c1639ca9f907 100644 --- a/keras/utils/code_stats_test.py +++ b/keras/src/utils/code_stats_test.py @@ -2,8 +2,8 @@ import sys from io import StringIO -from keras.testing import test_case -from keras.utils.code_stats import count_loc +from keras.src.testing import test_case +from keras.src.utils.code_stats import count_loc class TestCountLoc(test_case.TestCase): diff --git a/keras/utils/dataset_utils.py b/keras/src/utils/dataset_utils.py similarity index 99% rename from keras/utils/dataset_utils.py rename to keras/src/utils/dataset_utils.py index 22dda610ff8d..ff27552278f4 100644 --- a/keras/utils/dataset_utils.py +++ b/keras/src/utils/dataset_utils.py @@ -6,9 +6,9 @@ import numpy as np -from keras.api_export import keras_export -from keras.utils import io_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src.api_export import keras_export +from keras.src.utils import io_utils +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras.utils.split_dataset") @@ -248,7 +248,9 @@ def _get_next_sample( Yields: data_sample: The next sample. """ - from keras.trainers.data_adapters.data_adapter_utils import is_torch_tensor + from keras.src.trainers.data_adapters.data_adapter_utils import ( + is_torch_tensor, + ) try: dataset_iterator = iter(dataset_iterator) diff --git a/keras/utils/dataset_utils_test.py b/keras/src/utils/dataset_utils_test.py similarity index 98% rename from keras/utils/dataset_utils_test.py rename to keras/src/utils/dataset_utils_test.py index 9d044b1920b6..11cb275ff815 100644 --- a/keras/utils/dataset_utils_test.py +++ b/keras/src/utils/dataset_utils_test.py @@ -1,8 +1,8 @@ import numpy as np -from keras.testing import test_case -from keras.utils.dataset_utils import split_dataset -from keras.utils.module_utils import tensorflow as tf +from keras.src.testing import test_case +from keras.src.utils.dataset_utils import split_dataset +from keras.src.utils.module_utils import tensorflow as tf class DatasetUtilsTest(test_case.TestCase): diff --git a/keras/utils/dtype_utils.py b/keras/src/utils/dtype_utils.py similarity index 96% rename from keras/utils/dtype_utils.py rename to keras/src/utils/dtype_utils.py index cf6f54891b53..44ac7d4f65a3 100644 --- a/keras/utils/dtype_utils.py +++ b/keras/src/utils/dtype_utils.py @@ -1,5 +1,5 @@ -from keras import backend -from keras import ops +from keras.src import backend +from keras.src import ops DTYPE_TO_SIZE = { **{f"float{i}": i for i in (16, 32, 64)}, diff --git a/keras/utils/dtype_utils_test.py b/keras/src/utils/dtype_utils_test.py similarity index 97% rename from keras/utils/dtype_utils_test.py rename to keras/src/utils/dtype_utils_test.py index 29f2ef984203..390db6fd72d7 100644 --- a/keras/utils/dtype_utils_test.py +++ b/keras/src/utils/dtype_utils_test.py @@ -1,6 +1,6 @@ -from keras.backend.common.keras_tensor import KerasTensor -from keras.testing import test_case -from keras.utils import dtype_utils +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.testing import test_case +from keras.src.utils import dtype_utils class DtypeSizeTests(test_case.TestCase): diff --git a/keras/utils/file_utils.py b/keras/src/utils/file_utils.py similarity index 98% rename from keras/utils/file_utils.py rename to keras/src/utils/file_utils.py index 36623d088361..e625a9f131ce 100644 --- a/keras/utils/file_utils.py +++ b/keras/src/utils/file_utils.py @@ -9,11 +9,11 @@ import zipfile from urllib.request import urlretrieve -from keras.api_export import keras_export -from keras.backend import config -from keras.utils import io_utils -from keras.utils.module_utils import gfile -from keras.utils.progbar import Progbar +from keras.src.api_export import keras_export +from keras.src.backend import config +from keras.src.utils import io_utils +from keras.src.utils.module_utils import gfile +from keras.src.utils.progbar import Progbar def path_to_string(path): diff --git a/keras/utils/file_utils_test.py b/keras/src/utils/file_utils_test.py similarity index 99% rename from keras/utils/file_utils_test.py rename to keras/src/utils/file_utils_test.py index 852adbca3dd3..c09f47acd1aa 100644 --- a/keras/utils/file_utils_test.py +++ b/keras/src/utils/file_utils_test.py @@ -8,8 +8,8 @@ import zipfile from unittest.mock import patch -from keras.testing import test_case -from keras.utils import file_utils +from keras.src.testing import test_case +from keras.src.utils import file_utils class PathToStringTest(test_case.TestCase): diff --git a/keras/utils/image_dataset_utils.py b/keras/src/utils/image_dataset_utils.py similarity index 98% rename from keras/utils/image_dataset_utils.py rename to keras/src/utils/image_dataset_utils.py index 9f340301023c..30317c96780b 100755 --- a/keras/utils/image_dataset_utils.py +++ b/keras/src/utils/image_dataset_utils.py @@ -1,10 +1,10 @@ import numpy as np -from keras.api_export import keras_export -from keras.backend.config import standardize_data_format -from keras.utils import dataset_utils -from keras.utils import image_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src.api_export import keras_export +from keras.src.backend.config import standardize_data_format +from keras.src.utils import dataset_utils +from keras.src.utils import image_utils +from keras.src.utils.module_utils import tensorflow as tf ALLOWLIST_FORMATS = (".bmp", ".gif", ".jpeg", ".jpg", ".png") @@ -415,7 +415,7 @@ def load_image( ) if crop_to_aspect_ratio: - from keras.backend import tensorflow as tf_backend + from keras.src.backend import tensorflow as tf_backend if data_format == "channels_first": img = tf.transpose(img, (2, 0, 1)) diff --git a/keras/utils/image_dataset_utils_test.py b/keras/src/utils/image_dataset_utils_test.py similarity index 98% rename from keras/utils/image_dataset_utils_test.py rename to keras/src/utils/image_dataset_utils_test.py index f2b2981cabe8..e6d006ab7c0e 100644 --- a/keras/utils/image_dataset_utils_test.py +++ b/keras/src/utils/image_dataset_utils_test.py @@ -2,11 +2,11 @@ import numpy as np -from keras import backend -from keras import testing -from keras.utils import image_dataset_utils -from keras.utils import image_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src import testing +from keras.src.utils import image_dataset_utils +from keras.src.utils import image_utils +from keras.src.utils.module_utils import tensorflow as tf class ImageDatasetFromDirectoryTest(testing.TestCase): diff --git a/keras/utils/image_utils.py b/keras/src/utils/image_utils.py similarity index 99% rename from keras/utils/image_utils.py rename to keras/src/utils/image_utils.py index 59722afbfe97..8f5e805c5f75 100644 --- a/keras/utils/image_utils.py +++ b/keras/src/utils/image_utils.py @@ -6,8 +6,8 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export +from keras.src import backend +from keras.src.api_export import keras_export try: from PIL import Image as pil_image diff --git a/keras/utils/io_utils.py b/keras/src/utils/io_utils.py similarity index 97% rename from keras/utils/io_utils.py rename to keras/src/utils/io_utils.py index b2308230117c..32322f405c33 100644 --- a/keras/utils/io_utils.py +++ b/keras/src/utils/io_utils.py @@ -2,8 +2,8 @@ from absl import logging -from keras.api_export import keras_export -from keras.backend.common import global_state +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state @keras_export( diff --git a/keras/utils/io_utils_test.py b/keras/src/utils/io_utils_test.py similarity index 96% rename from keras/utils/io_utils_test.py rename to keras/src/utils/io_utils_test.py index 20eb2add559a..235314de3016 100644 --- a/keras/utils/io_utils_test.py +++ b/keras/src/utils/io_utils_test.py @@ -1,7 +1,7 @@ from unittest.mock import patch -from keras.testing import test_case -from keras.utils import io_utils +from keras.src.testing import test_case +from keras.src.utils import io_utils class TestIoUtils(test_case.TestCase): diff --git a/keras/utils/jax_layer.py b/keras/src/utils/jax_layer.py similarity index 98% rename from keras/utils/jax_layer.py rename to keras/src/utils/jax_layer.py index 83786b0b74f6..9c97f0ac28d4 100644 --- a/keras/utils/jax_layer.py +++ b/keras/src/utils/jax_layer.py @@ -2,14 +2,14 @@ import numpy as np -from keras import backend -from keras import tree -from keras.api_export import keras_export -from keras.layers.layer import Layer -from keras.saving import serialization_lib -from keras.utils import jax_utils -from keras.utils import tracking -from keras.utils.module_utils import jax +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib +from keras.src.utils import jax_utils +from keras.src.utils import tracking +from keras.src.utils.module_utils import jax @keras_export("keras.layers.JaxLayer") diff --git a/keras/utils/jax_layer_test.py b/keras/src/utils/jax_layer_test.py similarity index 98% rename from keras/utils/jax_layer_test.py rename to keras/src/utils/jax_layer_test.py index ba7104946b04..e3b088c78849 100644 --- a/keras/utils/jax_layer_test.py +++ b/keras/src/utils/jax_layer_test.py @@ -7,18 +7,18 @@ import tensorflow as tf from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import metrics -from keras import models -from keras import saving -from keras import testing -from keras import tree -from keras import utils -from keras.export import export_lib -from keras.saving import object_registration -from keras.utils.jax_layer import FlaxLayer -from keras.utils.jax_layer import JaxLayer +from keras.src import backend +from keras.src import layers +from keras.src import metrics +from keras.src import models +from keras.src import saving +from keras.src import testing +from keras.src import tree +from keras.src import utils +from keras.src.export import export_lib +from keras.src.saving import object_registration +from keras.src.utils.jax_layer import FlaxLayer +from keras.src.utils.jax_layer import JaxLayer try: import flax diff --git a/keras/utils/jax_utils.py b/keras/src/utils/jax_utils.py similarity index 88% rename from keras/utils/jax_utils.py rename to keras/src/utils/jax_utils.py index 35e26697914c..2ac944eb967d 100644 --- a/keras/utils/jax_utils.py +++ b/keras/src/utils/jax_utils.py @@ -1,4 +1,4 @@ -from keras import backend +from keras.src import backend def is_in_jax_tracing_scope(x=None): diff --git a/keras/utils/model_visualization.py b/keras/src/utils/model_visualization.py similarity index 98% rename from keras/utils/model_visualization.py rename to keras/src/utils/model_visualization.py index 332eb6d97cc6..ee0a47bfaa39 100644 --- a/keras/utils/model_visualization.py +++ b/keras/src/utils/model_visualization.py @@ -3,9 +3,9 @@ import os import sys -from keras import tree -from keras.api_export import keras_export -from keras.utils import io_utils +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.utils import io_utils try: # pydot-ng is a fork of pydot that is better maintained. @@ -202,7 +202,7 @@ def model_to_dot( a `pydot.Cluster` instance representing nested model if `subgraph=True`. """ - from keras.ops.function import make_node_key + from keras.src.ops.function import make_node_key if not model.built: raise ValueError( @@ -211,10 +211,10 @@ def model_to_dot( "the model on a batch of data." ) - from keras.models import functional - from keras.models import sequential + from keras.src.models import functional + from keras.src.models import sequential - # from keras.layers import Wrapper + # from keras.src.layers import Wrapper if not check_pydot(): raise ImportError( diff --git a/keras/utils/module_utils.py b/keras/src/utils/module_utils.py similarity index 100% rename from keras/utils/module_utils.py rename to keras/src/utils/module_utils.py diff --git a/keras/utils/naming.py b/keras/src/utils/naming.py similarity index 94% rename from keras/utils/naming.py rename to keras/src/utils/naming.py index b16f429fcd06..28107f0f30f4 100644 --- a/keras/utils/naming.py +++ b/keras/src/utils/naming.py @@ -1,8 +1,8 @@ import collections import re -from keras.api_export import keras_export -from keras.backend.common import global_state +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state def auto_name(prefix): diff --git a/keras/utils/naming_test.py b/keras/src/utils/naming_test.py similarity index 98% rename from keras/utils/naming_test.py rename to keras/src/utils/naming_test.py index 6be61fdbefe3..00e3f6bdda30 100644 --- a/keras/utils/naming_test.py +++ b/keras/src/utils/naming_test.py @@ -1,5 +1,5 @@ -from keras.testing import test_case -from keras.utils import naming +from keras.src.testing import test_case +from keras.src.utils import naming class NamingUtilsTest(test_case.TestCase): diff --git a/keras/utils/numerical_utils.py b/keras/src/utils/numerical_utils.py similarity index 97% rename from keras/utils/numerical_utils.py rename to keras/src/utils/numerical_utils.py index db922c4f07ef..05fb82abc522 100644 --- a/keras/utils/numerical_utils.py +++ b/keras/src/utils/numerical_utils.py @@ -1,7 +1,7 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export +from keras.src import backend +from keras.src.api_export import keras_export @keras_export("keras.utils.normalize") @@ -19,7 +19,7 @@ def normalize(x, axis=-1, order=2): Returns: A normalized copy of the array. """ - from keras import ops + from keras.src import ops if isinstance(x, np.ndarray): # NumPy input diff --git a/keras/utils/numerical_utils_test.py b/keras/src/utils/numerical_utils_test.py similarity index 96% rename from keras/utils/numerical_utils_test.py rename to keras/src/utils/numerical_utils_test.py index d4cc7de575ad..2cb8c4c5e782 100644 --- a/keras/utils/numerical_utils_test.py +++ b/keras/src/utils/numerical_utils_test.py @@ -1,9 +1,9 @@ import numpy as np from absl.testing import parameterized -from keras import backend -from keras import testing -from keras.utils import numerical_utils +from keras.src import backend +from keras.src import testing +from keras.src.utils import numerical_utils NUM_CLASSES = 5 diff --git a/keras/utils/progbar.py b/keras/src/utils/progbar.py similarity index 98% rename from keras/utils/progbar.py rename to keras/src/utils/progbar.py index 1f60b1818e3d..e2b61a041b02 100644 --- a/keras/utils/progbar.py +++ b/keras/src/utils/progbar.py @@ -3,9 +3,9 @@ import sys import time -from keras import backend -from keras.api_export import keras_export -from keras.utils import io_utils +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.utils import io_utils @keras_export("keras.utils.Progbar") diff --git a/keras/utils/python_utils.py b/keras/src/utils/python_utils.py similarity index 100% rename from keras/utils/python_utils.py rename to keras/src/utils/python_utils.py diff --git a/keras/utils/python_utils_test.py b/keras/src/utils/python_utils_test.py similarity index 97% rename from keras/utils/python_utils_test.py rename to keras/src/utils/python_utils_test.py index 33d7e72e3d1a..2ca2a72d341c 100644 --- a/keras/utils/python_utils_test.py +++ b/keras/src/utils/python_utils_test.py @@ -1,8 +1,8 @@ import base64 import marshal -from keras import testing -from keras.utils import python_utils +from keras.src import testing +from keras.src.utils import python_utils class PythonUtilsTest(testing.TestCase): diff --git a/keras/utils/rng_utils.py b/keras/src/utils/rng_utils.py similarity index 92% rename from keras/utils/rng_utils.py rename to keras/src/utils/rng_utils.py index be438d587bcd..15804d0e43e6 100644 --- a/keras/utils/rng_utils.py +++ b/keras/src/utils/rng_utils.py @@ -2,9 +2,9 @@ import numpy as np -from keras import backend -from keras.api_export import keras_export -from keras.utils.module_utils import tensorflow as tf +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.utils.module_utils import tensorflow as tf @keras_export("keras.utils.set_random_seed") diff --git a/keras/utils/rng_utils_test.py b/keras/src/utils/rng_utils_test.py similarity index 89% rename from keras/utils/rng_utils_test.py rename to keras/src/utils/rng_utils_test.py index f6ec741c496c..aef96ddacc43 100644 --- a/keras/utils/rng_utils_test.py +++ b/keras/src/utils/rng_utils_test.py @@ -3,9 +3,9 @@ import tensorflow as tf import keras -from keras import backend -from keras.testing import test_case -from keras.utils import rng_utils +from keras.src import backend +from keras.src.testing import test_case +from keras.src.utils import rng_utils class TestRandomSeedSetting(test_case.TestCase): diff --git a/keras/utils/sequence_utils.py b/keras/src/utils/sequence_utils.py similarity index 99% rename from keras/utils/sequence_utils.py rename to keras/src/utils/sequence_utils.py index 5ba2903fe1ac..8d0c67a08e08 100644 --- a/keras/utils/sequence_utils.py +++ b/keras/src/utils/sequence_utils.py @@ -1,6 +1,6 @@ import numpy as np -from keras.api_export import keras_export +from keras.src.api_export import keras_export @keras_export( diff --git a/keras/utils/sequence_utils_test.py b/keras/src/utils/sequence_utils_test.py similarity index 98% rename from keras/utils/sequence_utils_test.py rename to keras/src/utils/sequence_utils_test.py index 7051331fd9d0..0714bd469a92 100644 --- a/keras/utils/sequence_utils_test.py +++ b/keras/src/utils/sequence_utils_test.py @@ -1,5 +1,5 @@ -from keras import testing -from keras.utils import sequence_utils +from keras.src import testing +from keras.src.utils import sequence_utils class PadSequencesTest(testing.TestCase): diff --git a/keras/utils/summary_utils.py b/keras/src/utils/summary_utils.py similarity index 98% rename from keras/utils/summary_utils.py rename to keras/src/utils/summary_utils.py index 18e3ac539cb3..94c82af7ff84 100644 --- a/keras/utils/summary_utils.py +++ b/keras/src/utils/summary_utils.py @@ -10,10 +10,10 @@ # for below imports import rich.table -from keras import backend -from keras import tree -from keras.utils import dtype_utils -from keras.utils import io_utils +from keras.src import backend +from keras.src import tree +from keras.src.utils import dtype_utils +from keras.src.utils import io_utils def count_params(weights): @@ -131,8 +131,8 @@ def print_summary( matches `layer_range[1]`. By default (`None`) all layers in the model are included in the summary. """ - from keras.models import Functional - from keras.models import Sequential + from keras.src.models import Functional + from keras.src.models import Sequential if not print_fn and not io_utils.is_interactive_logging_enabled(): print_fn = io_utils.print_msg diff --git a/keras/utils/summary_utils_test.py b/keras/src/utils/summary_utils_test.py similarity index 92% rename from keras/utils/summary_utils_test.py rename to keras/src/utils/summary_utils_test.py index 51f764a74a40..7b917da4f848 100644 --- a/keras/utils/summary_utils_test.py +++ b/keras/src/utils/summary_utils_test.py @@ -2,10 +2,10 @@ import pytest from absl.testing import parameterized -from keras import layers -from keras import models -from keras import testing -from keras.utils import summary_utils +from keras.src import layers +from keras.src import models +from keras.src import testing +from keras.src.utils import summary_utils class SummaryUtilsTest(testing.TestCase, parameterized.TestCase): diff --git a/keras/utils/text_dataset_utils.py b/keras/src/utils/text_dataset_utils.py similarity index 98% rename from keras/utils/text_dataset_utils.py rename to keras/src/utils/text_dataset_utils.py index 12d2aacd1dc0..d8e5ece971c5 100644 --- a/keras/utils/text_dataset_utils.py +++ b/keras/src/utils/text_dataset_utils.py @@ -1,8 +1,8 @@ import numpy as np -from keras.api_export import keras_export -from keras.utils import dataset_utils -from keras.utils.module_utils import tensorflow as tf +from keras.src.api_export import keras_export +from keras.src.utils import dataset_utils +from keras.src.utils.module_utils import tensorflow as tf @keras_export( diff --git a/keras/utils/text_dataset_utils_test.py b/keras/src/utils/text_dataset_utils_test.py similarity index 99% rename from keras/utils/text_dataset_utils_test.py rename to keras/src/utils/text_dataset_utils_test.py index 023c8f6c7345..6e59b1bb67a3 100644 --- a/keras/utils/text_dataset_utils_test.py +++ b/keras/src/utils/text_dataset_utils_test.py @@ -2,8 +2,8 @@ import random import string -from keras import testing -from keras.utils import text_dataset_utils +from keras.src import testing +from keras.src.utils import text_dataset_utils class TextDatasetFromDirectoryTest(testing.TestCase): diff --git a/keras/utils/tf_utils.py b/keras/src/utils/tf_utils.py similarity index 98% rename from keras/utils/tf_utils.py rename to keras/src/utils/tf_utils.py index 5a622a3a0efe..ea8e45aaf7c7 100644 --- a/keras/utils/tf_utils.py +++ b/keras/src/utils/tf_utils.py @@ -1,4 +1,4 @@ -from keras.utils.module_utils import tensorflow as tf +from keras.src.utils.module_utils import tensorflow as tf def expand_dims(inputs, axis): diff --git a/keras/utils/timeseries_dataset_utils.py b/keras/src/utils/timeseries_dataset_utils.py similarity index 98% rename from keras/utils/timeseries_dataset_utils.py rename to keras/src/utils/timeseries_dataset_utils.py index c0a60482fb32..bf0997b98bbe 100644 --- a/keras/utils/timeseries_dataset_utils.py +++ b/keras/src/utils/timeseries_dataset_utils.py @@ -1,7 +1,7 @@ import numpy as np -from keras.api_export import keras_export -from keras.utils.module_utils import tensorflow as tf +from keras.src.api_export import keras_export +from keras.src.utils.module_utils import tensorflow as tf @keras_export( diff --git a/keras/utils/timeseries_dataset_utils_test.py b/keras/src/utils/timeseries_dataset_utils_test.py similarity index 98% rename from keras/utils/timeseries_dataset_utils_test.py rename to keras/src/utils/timeseries_dataset_utils_test.py index a68ee36ae216..98c75a425e3c 100644 --- a/keras/utils/timeseries_dataset_utils_test.py +++ b/keras/src/utils/timeseries_dataset_utils_test.py @@ -1,7 +1,7 @@ import numpy as np -from keras import testing -from keras.utils import timeseries_dataset_utils +from keras.src import testing +from keras.src.utils import timeseries_dataset_utils class TimeseriesDatasetTest(testing.TestCase): diff --git a/keras/utils/torch_utils.py b/keras/src/utils/torch_utils.py similarity index 93% rename from keras/utils/torch_utils.py rename to keras/src/utils/torch_utils.py index 60d0f7642b65..11cc136f508f 100644 --- a/keras/utils/torch_utils.py +++ b/keras/src/utils/torch_utils.py @@ -2,10 +2,10 @@ from packaging.version import parse -from keras.api_export import keras_export -from keras.layers import Layer -from keras.ops import convert_to_numpy -from keras.ops import convert_to_tensor +from keras.src.api_export import keras_export +from keras.src.layers import Layer +from keras.src.ops import convert_to_numpy +from keras.src.ops import convert_to_tensor @keras_export("keras.layers.TorchModuleWrapper") @@ -33,7 +33,7 @@ class TorchModuleWrapper(Layer): import torch.nn.functional as F import keras - from keras.layers import TorchModuleWrapper + from keras.src.layers import TorchModuleWrapper class Classifier(keras.Model): def __init__(self, **kwargs): @@ -79,7 +79,7 @@ def __init__(self, module, name=None, **kwargs): super().__init__(name=name, **kwargs) import torch.nn as nn - from keras.backend.torch.core import get_device + from keras.src.backend.torch.core import get_device if ( isinstance(module, nn.modules.lazy.LazyModuleMixin) @@ -98,7 +98,7 @@ def parameters(self, recurse=True): return self.module.parameters(recurse=recurse) def _track_module_parameters(self): - from keras.backend.torch import Variable + from keras.src.backend.torch import Variable for param in self.module.parameters(): # The Variable will reuse the raw `param` diff --git a/keras/utils/torch_utils_test.py b/keras/src/utils/torch_utils_test.py similarity index 97% rename from keras/utils/torch_utils_test.py rename to keras/src/utils/torch_utils_test.py index 9b8ff31d7fd8..7e972f5b1b56 100644 --- a/keras/utils/torch_utils_test.py +++ b/keras/src/utils/torch_utils_test.py @@ -5,12 +5,12 @@ import torch from absl.testing import parameterized -from keras import backend -from keras import layers -from keras import models -from keras import saving -from keras import testing -from keras.utils.torch_utils import TorchModuleWrapper +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import saving +from keras.src import testing +from keras.src.utils.torch_utils import TorchModuleWrapper class Classifier(models.Model): diff --git a/keras/utils/traceback_utils.py b/keras/src/utils/traceback_utils.py similarity index 98% rename from keras/utils/traceback_utils.py rename to keras/src/utils/traceback_utils.py index 2b0bddefad46..88c3e9ac0ba2 100644 --- a/keras/utils/traceback_utils.py +++ b/keras/src/utils/traceback_utils.py @@ -4,10 +4,10 @@ import types from functools import wraps -from keras import backend -from keras import tree -from keras.api_export import keras_export -from keras.backend.common import global_state +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state _EXCLUDED_PATHS = ( os.path.abspath(os.path.join(__file__, "..", "..")), diff --git a/keras/utils/tracking.py b/keras/src/utils/tracking.py similarity index 96% rename from keras/utils/tracking.py rename to keras/src/utils/tracking.py index 4a3a76c15d6c..02678de336a0 100644 --- a/keras/utils/tracking.py +++ b/keras/src/utils/tracking.py @@ -1,9 +1,9 @@ from functools import wraps -from keras import tree -from keras.backend.common.global_state import get_global_attribute -from keras.backend.common.global_state import set_global_attribute -from keras.utils import python_utils +from keras.src import tree +from keras.src.backend.common.global_state import get_global_attribute +from keras.src.backend.common.global_state import set_global_attribute +from keras.src.utils import python_utils class DotNotTrackScope: @@ -234,7 +234,7 @@ def clear(self): super().clear() def tree_flatten(self): - from keras.utils.module_utils import optree + from keras.src.utils.module_utils import optree # For optree keys, values = optree.utils.unzip2( @@ -244,7 +244,7 @@ def tree_flatten(self): @classmethod def tree_unflatten(cls, keys, values): - from keras.utils.module_utils import optree + from keras.src.utils.module_utils import optree # For optree return cls(optree.utils.safe_zip(keys, values)) diff --git a/keras/utils/tracking_test.py b/keras/src/utils/tracking_test.py similarity index 96% rename from keras/utils/tracking_test.py rename to keras/src/utils/tracking_test.py index e05379a8971c..dd5e9fc90037 100644 --- a/keras/utils/tracking_test.py +++ b/keras/src/utils/tracking_test.py @@ -1,8 +1,8 @@ import collections -from keras import backend -from keras import testing -from keras.utils import tracking +from keras.src import backend +from keras.src import testing +from keras.src.utils import tracking class TrackingTest(testing.TestCase): diff --git a/keras/version.py b/keras/src/version.py similarity index 75% rename from keras/version.py rename to keras/src/version.py index 6d3cb0e144eb..3e7f01c4e942 100644 --- a/keras/version.py +++ b/keras/src/version.py @@ -1,4 +1,4 @@ -from keras.api_export import keras_export +from keras.src.api_export import keras_export # Unique source of truth for the version number. __version__ = "3.2.1" diff --git a/keras/testing/__init__.py b/keras/testing/__init__.py deleted file mode 100644 index 34dab7c0c52d..000000000000 --- a/keras/testing/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from keras.testing.test_case import TestCase -from keras.testing.test_case import jax_uses_gpu -from keras.testing.test_case import tensorflow_uses_gpu -from keras.testing.test_case import torch_uses_gpu -from keras.testing.test_case import uses_gpu diff --git a/keras/tree/__init__.py b/keras/tree/__init__.py deleted file mode 100644 index fc6a783879de..000000000000 --- a/keras/tree/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from keras.tree.tree_api import assert_same_structure -from keras.tree.tree_api import flatten -from keras.tree.tree_api import is_nested -from keras.tree.tree_api import lists_to_tuples -from keras.tree.tree_api import map_shape_structure -from keras.tree.tree_api import map_structure -from keras.tree.tree_api import map_structure_up_to -from keras.tree.tree_api import pack_sequence_as -from keras.tree.tree_api import register_tree_node_class -from keras.tree.tree_api import traverse diff --git a/keras/utils/__init__.py b/keras/utils/__init__.py deleted file mode 100644 index d02fc32c066d..000000000000 --- a/keras/utils/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from keras.utils.audio_dataset_utils import audio_dataset_from_directory -from keras.utils.dataset_utils import split_dataset -from keras.utils.file_utils import get_file -from keras.utils.image_dataset_utils import image_dataset_from_directory -from keras.utils.image_utils import array_to_img -from keras.utils.image_utils import img_to_array -from keras.utils.image_utils import load_img -from keras.utils.image_utils import save_img -from keras.utils.io_utils import disable_interactive_logging -from keras.utils.io_utils import enable_interactive_logging -from keras.utils.io_utils import is_interactive_logging_enabled -from keras.utils.model_visualization import model_to_dot -from keras.utils.model_visualization import plot_model -from keras.utils.numerical_utils import normalize -from keras.utils.numerical_utils import to_categorical -from keras.utils.progbar import Progbar -from keras.utils.python_utils import default -from keras.utils.python_utils import is_default -from keras.utils.python_utils import removeprefix -from keras.utils.python_utils import removesuffix -from keras.utils.rng_utils import set_random_seed -from keras.utils.sequence_utils import pad_sequences -from keras.utils.text_dataset_utils import text_dataset_from_directory -from keras.utils.timeseries_dataset_utils import timeseries_dataset_from_array diff --git a/pip_build.py b/pip_build.py index 2e3cb7f13e99..887c7119e269 100644 --- a/pip_build.py +++ b/pip_build.py @@ -20,10 +20,9 @@ import glob import os import pathlib +import re import shutil -import namex - # Needed because importing torch after TF causes the runtime to crash import torch # noqa: F401 @@ -33,112 +32,6 @@ to_copy = ["setup.py", "README.md"] -def ignore_files(_, filenames): - return [f for f in filenames if f.endswith("_test.py")] - - -def copy_source_to_build_directory(root_path): - # Copy sources (`keras/` directory and setup files) to build - # directory - os.chdir(root_path) - os.mkdir(build_directory) - shutil.copytree( - package, os.path.join(build_directory, package), ignore=ignore_files - ) - for fname in to_copy: - shutil.copy(fname, os.path.join(f"{build_directory}", fname)) - os.chdir(build_directory) - - -def run_namex_conversion(): - # Restructure the codebase so that source files live in `keras/src` - namex.convert_codebase(package, code_directory="src") - - # Generate API __init__.py files in `keras/` - namex.generate_api_files(package, code_directory="src", verbose=True) - - -def create_legacy_directory(): - # Make keras/_tf_keras/ by copying keras/ - tf_keras_dirpath_parent = os.path.join(package, "_tf_keras") - tf_keras_dirpath = os.path.join(tf_keras_dirpath_parent, "keras") - os.makedirs(tf_keras_dirpath) - with open(os.path.join(tf_keras_dirpath_parent, "__init__.py"), "w") as f: - f.write("from keras._tf_keras import keras\n") - with open(os.path.join(package, "__init__.py")) as f: - init_file = f.read() - init_file = init_file.replace( - "from keras import _legacy", - "from keras import _tf_keras", - ) - with open(os.path.join(package, "__init__.py"), "w") as f: - f.write(init_file) - with open(os.path.join(tf_keras_dirpath, "__init__.py"), "w") as f: - f.write(init_file) - for dirname in os.listdir(package): - dirpath = os.path.join(package, dirname) - if os.path.isdir(dirpath) and dirname not in ( - "_legacy", - "_tf_keras", - "src", - ): - shutil.copytree( - dirpath, - os.path.join(tf_keras_dirpath, dirname), - ignore=ignore_files, - ) - - # Copy keras/_legacy/ file contents to keras/_tf_keras/keras - legacy_submodules = [ - path[:-3] - for path in os.listdir(os.path.join(package, "src", "legacy")) - if path.endswith(".py") - ] - legacy_submodules += [ - path - for path in os.listdir(os.path.join(package, "src", "legacy")) - if os.path.isdir(os.path.join(package, "src", "legacy", path)) - ] - - for root, _, fnames in os.walk(os.path.join(package, "_legacy")): - for fname in fnames: - if fname.endswith(".py"): - legacy_fpath = os.path.join(root, fname) - tf_keras_root = root.replace("/_legacy", "/_tf_keras/keras") - core_api_fpath = os.path.join( - root.replace("/_legacy", ""), fname - ) - if not os.path.exists(tf_keras_root): - os.makedirs(tf_keras_root) - tf_keras_fpath = os.path.join(tf_keras_root, fname) - with open(legacy_fpath) as f: - legacy_contents = f.read() - legacy_contents = legacy_contents.replace( - "keras._legacy", "keras._tf_keras.keras" - ) - if os.path.exists(core_api_fpath): - with open(core_api_fpath) as f: - core_api_contents = f.read() - core_api_contents = core_api_contents.replace( - "from keras import _tf_keras\n", "" - ) - for legacy_submodule in legacy_submodules: - core_api_contents = core_api_contents.replace( - f"from keras import {legacy_submodule}\n", - "", - ) - core_api_contents = core_api_contents.replace( - f"keras.{legacy_submodule}", - f"keras._tf_keras.keras.{legacy_submodule}", - ) - legacy_contents = core_api_contents + "\n" + legacy_contents - with open(tf_keras_fpath, "w") as f: - f.write(legacy_contents) - - # Delete keras/_legacy/ - shutil.rmtree(os.path.join(package, "_legacy")) - - def export_version_string(version, is_nightly=False, rc_index=None): """Export Version and Package Name.""" if is_nightly: @@ -156,10 +49,15 @@ def export_version_string(version, is_nightly=False, rc_index=None): version += "rc" + str(rc_index) # Make sure to export the __version__ string - with open(os.path.join(package, "__init__.py")) as f: + with open(os.path.join(package, "src", "version.py")) as f: init_contents = f.read() - with open(os.path.join(package, "__init__.py"), "w") as f: - f.write(init_contents + "\n\n" + f'__version__ = "{version}"\n') + with open(os.path.join(package, "src", "version.py"), "w") as f: + init_contents = re.sub( + "\n__version__ = .*\n", + f'\n__version__ = "{version}"\n', + init_contents, + ) + f.write(init_contents) def build_and_save_output(root_path, __version__): @@ -188,20 +86,10 @@ def build_and_save_output(root_path, __version__): def build(root_path, is_nightly=False, rc_index=None): - if os.path.exists(build_directory): - raise ValueError(f"Directory already exists: {build_directory}") - - try: - copy_source_to_build_directory(root_path) - run_namex_conversion() - create_legacy_directory() - from keras.src.version import __version__ # noqa: E402 - - export_version_string(__version__, is_nightly, rc_index) - return build_and_save_output(root_path, __version__) - finally: - # Clean up: remove the build directory (no longer needed) - shutil.rmtree(build_directory) + from keras.src.version import __version__ # noqa: E402 + + export_version_string(__version__, is_nightly, rc_index) + return build_and_save_output(root_path, __version__) def install_whl(whl_fpath): diff --git a/setup.py b/setup.py index 1a3fbf43e8b7..a78f07dda269 100644 --- a/setup.py +++ b/setup.py @@ -23,10 +23,7 @@ def get_version(rel_path): HERE = pathlib.Path(__file__).parent README = (HERE / "README.md").read_text() -if os.path.exists("keras/version.py"): - VERSION = get_version("keras/version.py") -else: - VERSION = get_version("keras/__init__.py") +VERSION = get_version("keras/src/version.py") setup( name="keras", diff --git a/shell/api_gen.sh b/shell/api_gen.sh new file mode 100755 index 000000000000..92cf6c7c2471 --- /dev/null +++ b/shell/api_gen.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -Eeuo pipefail + +base_dir=$(dirname $(dirname $0)) + +echo "Generating api directory with public APIs..." +# Generate API Files +python3 "${base_dir}"/api_gen.py + +echo "Formatting api directory..." +# Format API Files +bash "${base_dir}"/shell/format.sh > /dev/null 2>&1 From 9fd057bfd4d70bee6061d957e6993eda6cccc0d1 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Tue, 16 Apr 2024 15:56:44 -0700 Subject: [PATCH 014/101] Update APIs --- keras/src/dtype_policies/dtype_policy.py | 11 ++--------- keras/src/quantizers/quantizers.py | 10 +++++----- keras/src/version.py | 2 +- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/keras/src/dtype_policies/dtype_policy.py b/keras/src/dtype_policies/dtype_policy.py index 75b39f075a26..2618e118e2bf 100644 --- a/keras/src/dtype_policies/dtype_policy.py +++ b/keras/src/dtype_policies/dtype_policy.py @@ -194,9 +194,7 @@ def __repr__(self): return f'' -@keras_export( - ["keras.QuantizedDTypePolicy", "keras.dtype_policies.QuantizedDTypePolicy"] -) +@keras_export("keras.dtype_policies.QuantizedDTypePolicy") class QuantizedDTypePolicy(DTypePolicy): def __init__(self, name): super().__init__(name) @@ -258,12 +256,7 @@ def _get_all_valid_policies(self): return valid_policies -@keras_export( - [ - "keras.QuantizedFloat8DTypePolicy", - "keras.dtype_policies.QuantizedFloat8DTypePolicy", - ] -) +@keras_export("keras.dtype_policies.QuantizedFloat8DTypePolicy") class QuantizedFloat8DTypePolicy(QuantizedDTypePolicy): def __init__(self, name, amax_history_length=1024): super().__init__(name) diff --git a/keras/src/quantizers/quantizers.py b/keras/src/quantizers/quantizers.py index ccf7cc42de06..a5fddfbaab3c 100644 --- a/keras/src/quantizers/quantizers.py +++ b/keras/src/quantizers/quantizers.py @@ -57,7 +57,7 @@ def get_config(self): raise NotImplementedError(f"{self} does not implement get_config()") -@keras_export(["keras.quantizers.abs_max_quantize"]) +@keras_export("keras.quantizers.abs_max_quantize") def abs_max_quantize( inputs, axis, @@ -75,7 +75,7 @@ def abs_max_quantize( return outputs, scale -@keras_export(["keras.AbsMaxQuantizer", "keras.quantizers.AbsMaxQuantizer"]) +@keras_export("keras.quantizers.AbsMaxQuantizer") class AbsMaxQuantizer(Quantizer): def __init__( self, @@ -109,7 +109,7 @@ def get_config(self): """Float8-related methods""" -@keras_export(["keras.quantizers.compute_float8_scale"]) +@keras_export("keras.quantizers.compute_float8_scale") def compute_float8_scale(amax, scale, dtype_max, margin=0): # The algorithm for computing the new scale is sourced from # https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/jax.html#transformer_engine.jax.update_fp8_metas @@ -122,7 +122,7 @@ def compute_float8_scale(amax, scale, dtype_max, margin=0): return ops.reciprocal(sf) -@keras_export(["keras.quantizers.compute_float8_amax_history"]) +@keras_export("keras.quantizers.compute_float8_amax_history") def compute_float8_amax_history(x, amax_history): amax_update = ops.cast(ops.max(ops.abs(x)), amax_history.dtype) new_amax_history = ops.scatter_update( @@ -133,7 +133,7 @@ def compute_float8_amax_history(x, amax_history): return new_amax_history -@keras_export(["keras.quantizers.quantize_and_dequantize"]) +@keras_export("keras.quantizers.quantize_and_dequantize") def quantize_and_dequantize(inputs, scale, quantized_dtype, compute_dtype): # Quantize quantized_dtype_max = ops.cast( diff --git a/keras/src/version.py b/keras/src/version.py index 3e7f01c4e942..c0168d651dae 100644 --- a/keras/src/version.py +++ b/keras/src/version.py @@ -1,7 +1,7 @@ from keras.src.api_export import keras_export # Unique source of truth for the version number. -__version__ = "3.2.1" +__version__ = "3.3.0" @keras_export("keras.version") From e57b138cec3c9a204f7f686355a1a5bf76fe63e5 Mon Sep 17 00:00:00 2001 From: hertschuh <1091026+hertschuh@users.noreply.github.com> Date: Tue, 16 Apr 2024 18:59:20 -0700 Subject: [PATCH 015/101] Added metrics from custom `train_step`/`test_step` are now returned. (#19529) This works the same way as in Keras 2, whereby the metrics are returned directly from the logs if the set of keys doesn't match the model metrics. --- keras/src/backend/jax/trainer.py | 12 +++-- keras/src/backend/numpy/trainer.py | 5 +- keras/src/backend/tensorflow/trainer.py | 14 ++--- keras/src/backend/torch/trainer.py | 12 +++-- keras/src/trainers/trainer.py | 31 +++++++++++ keras/src/trainers/trainer_test.py | 70 +++++++++++++++++++++++++ 6 files changed, 125 insertions(+), 19 deletions(-) diff --git a/keras/src/backend/jax/trainer.py b/keras/src/backend/jax/trainer.py index 89ff3b0d1b7f..dd805251995d 100644 --- a/keras/src/backend/jax/trainer.py +++ b/keras/src/backend/jax/trainer.py @@ -437,7 +437,8 @@ def fit( } # Callbacks - callbacks.on_train_batch_end(step, self._pythonify_logs(logs)) + logs = self._pythonify_logs(logs) + callbacks.on_train_batch_end(step, logs) if self.stop_training: break @@ -446,12 +447,12 @@ def fit( # bottleneck. self.jax_state_sync() - # Override with model metrics instead of last step logs + # Override with model metrics instead of last step logs if needed. # The jax spmd_mode is need for multi-process context, since the # metrics values are replicated, and we don't want to do a all # gather, and only need the local copy of the value. with jax.spmd_mode("allow_all"): - epoch_logs = self.get_metrics_result() + epoch_logs = dict(self._get_metrics_result_or_logs(logs)) # Run validation. if validation_data is not None and self._should_eval( @@ -585,7 +586,8 @@ def evaluate( "non_trainable_variables": non_trainable_variables, "metrics_variables": metrics_variables, } - callbacks.on_test_batch_end(step, self._pythonify_logs(logs)) + logs = self._pythonify_logs(logs) + callbacks.on_test_batch_end(step, logs) if self.stop_evaluating: break @@ -596,7 +598,7 @@ def evaluate( # metrics values are replicated, and we don't want to do a all # gather, and only need the local copy of the value. with jax.spmd_mode("allow_all"): - logs = self.get_metrics_result() + logs = self._get_metrics_result_or_logs(logs) callbacks.on_test_end(logs) self._jax_state = None if return_dict: diff --git a/keras/src/backend/numpy/trainer.py b/keras/src/backend/numpy/trainer.py index c92465874673..68eeba340f21 100644 --- a/keras/src/backend/numpy/trainer.py +++ b/keras/src/backend/numpy/trainer.py @@ -269,10 +269,11 @@ def evaluate( for step, data in epoch_iterator.enumerate_epoch(): callbacks.on_test_batch_begin(step) logs = self.test_function(data) - callbacks.on_test_batch_end(step, self._pythonify_logs(logs)) + logs = self._pythonify_logs(logs) + callbacks.on_test_batch_end(step, logs) if self.stop_evaluating: break - logs = self.get_metrics_result() + logs = self._get_metrics_result_or_logs(logs) callbacks.on_test_end(logs) if return_dict: diff --git a/keras/src/backend/tensorflow/trainer.py b/keras/src/backend/tensorflow/trainer.py index 5a6b4aed945d..0f7498f929d5 100644 --- a/keras/src/backend/tensorflow/trainer.py +++ b/keras/src/backend/tensorflow/trainer.py @@ -312,14 +312,13 @@ def fit( for step, iterator in epoch_iterator.enumerate_epoch(): callbacks.on_train_batch_begin(step) logs = self.train_function(iterator) - callbacks.on_train_batch_end( - step, self._pythonify_logs(logs) - ) + logs = self._pythonify_logs(logs) + callbacks.on_train_batch_end(step, logs) if self.stop_training: break - # Override with model metrics instead of last step logs - epoch_logs = self.get_metrics_result() + # Override with model metrics instead of last step logs if needed. + epoch_logs = dict(self._get_metrics_result_or_logs(logs)) # Run validation. if validation_data is not None and self._should_eval( @@ -424,10 +423,11 @@ def evaluate( for step, iterator in epoch_iterator.enumerate_epoch(): callbacks.on_test_batch_begin(step) logs = self.test_function(iterator) - callbacks.on_test_batch_end(step, self._pythonify_logs(logs)) + logs = self._pythonify_logs(logs) + callbacks.on_test_batch_end(step, logs) if self.stop_evaluating: break - logs = self.get_metrics_result() + logs = self._get_metrics_result_or_logs(logs) callbacks.on_test_end(logs) if return_dict: diff --git a/keras/src/backend/torch/trainer.py b/keras/src/backend/torch/trainer.py index 0fb3ff529a47..1572d3c909c1 100644 --- a/keras/src/backend/torch/trainer.py +++ b/keras/src/backend/torch/trainer.py @@ -252,14 +252,15 @@ def fit( callbacks.on_train_batch_begin(step) logs = self.train_function(data) + logs = self._pythonify_logs(logs) # Callbacks - callbacks.on_train_batch_end(step, self._pythonify_logs(logs)) + callbacks.on_train_batch_end(step, logs) if self.stop_training: break - # Override with model metrics instead of last step logs - epoch_logs = self.get_metrics_result() + # Override with model metrics instead of last step logs if needed. + epoch_logs = dict(self._get_metrics_result_or_logs(logs)) # Switch the torch Module back to testing mode. self.eval() @@ -368,10 +369,11 @@ def evaluate( for step, data in epoch_iterator.enumerate_epoch(): callbacks.on_test_batch_begin(step) logs = self.test_function(data) - callbacks.on_test_batch_end(step, self._pythonify_logs(logs)) + logs = self._pythonify_logs(logs) + callbacks.on_test_batch_end(step, logs) if self.stop_evaluating: break - logs = self.get_metrics_result() + logs = self._get_metrics_result_or_logs(logs) callbacks.on_test_end(logs) if return_dict: diff --git a/keras/src/trainers/trainer.py b/keras/src/trainers/trainer.py index 0d8fdb85cd6d..8475a72b0112 100644 --- a/keras/src/trainers/trainer.py +++ b/keras/src/trainers/trainer.py @@ -899,6 +899,37 @@ def _pythonify_logs(self, logs): result[key] = value return result + def _get_metrics_result_or_logs(self, logs): + """Returns model metrics as a dict if the keys match with input logs. + + When the training / evalution is performed with an asynchronous steps, + the last scheduled `train / test_step` may not give the latest metrics + because it is not guaranteed to be executed the last. This method gets + metrics from the model directly instead of relying on the return from + last step function. + + When the user has custom train / test step functions, the metrics + returned may be different from `Model.metrics`. In those instances, + this function will be no-op and return the logs passed in. + + Args: + logs: A `dict` of metrics returned by train / test step function. + + Returns: + A `dict` containing values of the metrics listed in `self.metrics` + when logs and model metrics keys match. Otherwise it returns input + `logs`. + """ + metric_logs = self.get_metrics_result() + # Verify that train / test step logs passed and metric logs have + # matching keys. It could be different when using custom step functions, + # in which case we return the logs from the last step. + if isinstance(logs, dict) and set(logs.keys()) == set( + metric_logs.keys() + ): + return metric_logs + return logs + def _flatten_metrics_in_order(self, logs): """Turns `logs` dict into a list as per key order of `metrics_names`.""" metric_names = [] diff --git a/keras/src/trainers/trainer_test.py b/keras/src/trainers/trainer_test.py index 6e125a31cf07..3aed3253ee90 100644 --- a/keras/src/trainers/trainer_test.py +++ b/keras/src/trainers/trainer_test.py @@ -44,6 +44,30 @@ def __init__(self, units): Trainer.__init__(self) +class CustomTrainTestStepModel(ExampleModel): + def train_step(self, data): + logs = super().train_step(data) + logs["my_custom_metric"] = 10.0 + return logs + + def test_step(self, data): + logs = super().test_step(data) + logs["my_custom_metric"] = 5.0 + return logs + + +class JaxCustomTrainTestStepModel(ExampleModel): + def train_step(self, state, data): + logs, state = super().train_step(state, data) + logs["my_custom_metric"] = 10.0 + return logs, state + + def test_step(self, state, data): + logs, state = super().test_step(state, data) + logs["my_custom_metric"] = 5.0 + return logs, state + + class StructModel(Trainer, layers.Layer): def __init__(self, units): layers.Layer.__init__(self) @@ -308,6 +332,27 @@ def test_fit_with_val_split( self.assertIn("loss", history) self.assertIn("val_loss", history) + @pytest.mark.requires_trainable_backend + def test_fit_with_custom_train_step(self): + if backend.backend() == "jax": + model = JaxCustomTrainTestStepModel(units=3) + else: + model = CustomTrainTestStepModel(units=3) + x = np.ones((100, 4)) + y = np.zeros((100, 3)) + batch_size = 16 + + model.compile( + optimizer=optimizers.SGD(), + loss=losses.MeanSquaredError(), + metrics=[metrics.MeanSquaredError()], + ) + history = model.fit(x, y, batch_size=batch_size) + history = history.history + self.assertIn("loss", history) + self.assertIn("mean_squared_error", history) + self.assertAllClose(history["my_custom_metric"], 10.0) + @parameterized.named_parameters( named_product( generator_type=["tf", "jax", "scipy"], mode=["eager", "graph"] @@ -375,6 +420,31 @@ def test_evaluate_flow(self, run_eagerly, jit_compile): self.assertIn("mean_squared_error", output) self.assertAllClose(output["mean_squared_error"], 16.0) + @parameterized.named_parameters([("flat", False), ("dict", True)]) + @pytest.mark.requires_trainable_backend + def test_evaluate_with_custom_test_step(self, return_dict): + if backend.backend() == "jax": + model = JaxCustomTrainTestStepModel(units=3) + else: + model = CustomTrainTestStepModel(units=3) + x = np.ones((100, 4)) + y = np.zeros((100, 3)) + batch_size = 16 + + model.compile( + optimizer=optimizers.SGD(), + loss=losses.MeanSquaredError(), + metrics=[metrics.MeanSquaredError()], + ) + output = model.evaluate( + x, y, batch_size=batch_size, return_dict=return_dict + ) + self.assertLen(output, 3) + if return_dict: + self.assertAllClose(output["my_custom_metric"], 5.0) + else: + self.assertAllClose(output[-1], 5.0) # Custom metrics go last. + @parameterized.named_parameters( named_product( generator_type=["tf", "jax", "scipy"], mode=["eager", "graph"] From 13cb10daf9ca6d4b9ebffc522c945a3502ea1c42 Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Tue, 16 Apr 2024 22:01:59 -0500 Subject: [PATCH 016/101] Use temp dir and abs path in `api_gen.py` (#19533) * Use temp dir and abs path * Use temp dir and abs path * Update Readme --- README.md | 6 ++++ api_gen.py | 98 ++++++++++++++++++++++++++++++++++-------------------- 2 files changed, 68 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index c89f3a7d86ae..b8a179b18f65 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,12 @@ pip install -r requirements.txt python pip_build.py --install ``` +3. Run API generation script when creating PRs that update `keras_export` public APIs: + +``` +./shell/api_gen.sh +``` + #### Adding GPU support The `requirements.txt` file will install a CPU-only version of TensorFlow, JAX, and PyTorch. For GPU support, we also diff --git a/api_gen.py b/api_gen.py index 51a0128861ac..28fac8fa4f10 100644 --- a/api_gen.py +++ b/api_gen.py @@ -18,28 +18,41 @@ def ignore_files(_, filenames): return [f for f in filenames if f.endswith("_test.py")] -def create_legacy_directory(): - API_DIR = os.path.join(package, "api") +def copy_source_to_build_directory(root_path): + # Copy sources (`keras/` directory and setup files) to build dir + build_dir = os.path.join(root_path, "tmp_build_dir") + if os.path.exists(build_dir): + shutil.rmtree(build_dir) + os.mkdir(build_dir) + shutil.copytree( + package, os.path.join(build_dir, package), ignore=ignore_files + ) + return build_dir + + +def create_legacy_directory(package_dir): + src_dir = os.path.join(package_dir, "src") + api_dir = os.path.join(package_dir, "api") # Make keras/_tf_keras/ by copying keras/ - tf_keras_dirpath_parent = os.path.join(API_DIR, "_tf_keras") + tf_keras_dirpath_parent = os.path.join(api_dir, "_tf_keras") tf_keras_dirpath = os.path.join(tf_keras_dirpath_parent, "keras") os.makedirs(tf_keras_dirpath, exist_ok=True) with open(os.path.join(tf_keras_dirpath_parent, "__init__.py"), "w") as f: f.write("from keras.api._tf_keras import keras\n") - with open(os.path.join(API_DIR, "__init__.py")) as f: + with open(os.path.join(api_dir, "__init__.py")) as f: init_file = f.read() init_file = init_file.replace( "from keras.api import _legacy", "from keras.api import _tf_keras", ) - with open(os.path.join(API_DIR, "__init__.py"), "w") as f: + with open(os.path.join(api_dir, "__init__.py"), "w") as f: f.write(init_file) # Remove the import of `_tf_keras` in `keras/_tf_keras/keras/__init__.py` init_file = init_file.replace("from keras.api import _tf_keras\n", "\n") with open(os.path.join(tf_keras_dirpath, "__init__.py"), "w") as f: f.write(init_file) - for dirname in os.listdir(API_DIR): - dirpath = os.path.join(API_DIR, dirname) + for dirname in os.listdir(api_dir): + dirpath = os.path.join(api_dir, dirname) if os.path.isdir(dirpath) and dirname not in ( "_legacy", "_tf_keras", @@ -57,16 +70,16 @@ def create_legacy_directory(): # Copy keras/_legacy/ file contents to keras/_tf_keras/keras legacy_submodules = [ path[:-3] - for path in os.listdir(os.path.join(package, "src", "legacy")) + for path in os.listdir(os.path.join(src_dir, "legacy")) if path.endswith(".py") ] legacy_submodules += [ path - for path in os.listdir(os.path.join(package, "src", "legacy")) - if os.path.isdir(os.path.join(package, "src", "legacy", path)) + for path in os.listdir(os.path.join(src_dir, "legacy")) + if os.path.isdir(os.path.join(src_dir, "legacy", path)) ] - for root, _, fnames in os.walk(os.path.join(package, "_legacy")): + for root, _, fnames in os.walk(os.path.join(package_dir, "_legacy")): for fname in fnames: if fname.endswith(".py"): legacy_fpath = os.path.join(root, fname) @@ -102,19 +115,18 @@ def create_legacy_directory(): f.write(legacy_contents) # Delete keras/api/_legacy/ - shutil.rmtree(os.path.join(API_DIR, "_legacy")) + shutil.rmtree(os.path.join(api_dir, "_legacy")) -def export_version_string(): - API_INIT = os.path.join(package, "api", "__init__.py") - with open(API_INIT) as f: +def export_version_string(api_init_fname): + with open(api_init_fname) as f: contents = f.read() - with open(API_INIT, "w") as f: + with open(api_init_fname, "w") as f: contents += "from keras.src.version import __version__\n" f.write(contents) -def update_package_init(): +def update_package_init(init_fname): contents = """ # Import everything from /api/ into keras. from keras.api import * # noqa: F403 @@ -142,34 +154,48 @@ def __dir__(): for name in globals().keys() if not (name.startswith("_") or name in ("src", "api")) ]""" - with open(os.path.join(package, "__init__.py")) as f: + with open(init_fname) as f: init_contents = f.read() - with open(os.path.join(package, "__init__.py"), "w") as f: + with open(init_fname, "w") as f: f.write(init_contents.replace("\nfrom keras import api", contents)) -if __name__ == "__main__": +def build(): # Backup the `keras/__init__.py` and restore it on error in api gen. - os.makedirs(os.path.join(package, "api"), exist_ok=True) - init_fname = os.path.join(package, "__init__.py") - backup_init_fname = os.path.join(package, "__init__.py.bak") + root_path = os.path.dirname(os.path.abspath(__file__)) + code_api_dir = os.path.join(root_path, package, "api") + code_init_fname = os.path.join(root_path, package, "__init__.py") + # Create temp build dir + build_dir = copy_source_to_build_directory(root_path) + build_api_dir = os.path.join(build_dir, package, "api") + build_init_fname = os.path.join(build_dir, package, "__init__.py") + build_api_init_fname = os.path.join(build_api_dir, "__init__.py") try: - if os.path.exists(init_fname): - shutil.move(init_fname, backup_init_fname) + os.chdir(build_dir) # Generates `keras/api` directory. + if os.path.exists(build_api_dir): + shutil.rmtree(build_api_dir) + if os.path.exists(build_init_fname): + os.remove(build_init_fname) + os.makedirs(build_api_dir) namex.generate_api_files( "keras", code_directory="src", target_directory="api" ) # Creates `keras/__init__.py` importing from `keras/api` - update_package_init() - except Exception as e: - if os.path.exists(backup_init_fname): - shutil.move(backup_init_fname, init_fname) - raise e + update_package_init(build_init_fname) + # Add __version__ to keras package + export_version_string(build_api_init_fname) + # Creates `_tf_keras` with full keras API + create_legacy_directory(package_dir=os.path.join(build_dir, package)) + # Copy back the keras/api and keras/__init__.py from build directory + if os.path.exists(code_api_dir): + shutil.rmtree(code_api_dir) + shutil.copytree(build_api_dir, code_api_dir) + shutil.copy(build_init_fname, code_init_fname) finally: - if os.path.exists(backup_init_fname): - os.remove(backup_init_fname) - # Add __version__ to keras package - export_version_string() - # Creates `_tf_keras` with full keras API - create_legacy_directory() + # Clean up: remove the build directory (no longer needed) + shutil.rmtree(build_dir) + + +if __name__ == "__main__": + build() From 9e640584c7c2bb0dbac12a637625f6492fb06431 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Tue, 16 Apr 2024 20:03:00 -0700 Subject: [PATCH 017/101] Update API --- keras/api/__init__.py | 3 --- keras/api/_tf_keras/keras/__init__.py | 3 --- 2 files changed, 6 deletions(-) diff --git a/keras/api/__init__.py b/keras/api/__init__.py index d93460c26c53..1750a42e8699 100644 --- a/keras/api/__init__.py +++ b/keras/api/__init__.py @@ -38,8 +38,6 @@ from keras.src.backend.exports import name_scope from keras.src.dtype_policies.dtype_policy import DTypePolicy from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy -from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy -from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy from keras.src.initializers.initializer import Initializer from keras.src.layers.core.input_layer import Input from keras.src.layers.input_spec import InputSpec @@ -51,7 +49,6 @@ from keras.src.ops.function import Function from keras.src.ops.operation import Operation from keras.src.optimizers.optimizer import Optimizer -from keras.src.quantizers.quantizers import AbsMaxQuantizer from keras.src.quantizers.quantizers import Quantizer from keras.src.regularizers.regularizers import Regularizer from keras.src.version import __version__ diff --git a/keras/api/_tf_keras/keras/__init__.py b/keras/api/_tf_keras/keras/__init__.py index 767853b2be3b..334dc282386a 100644 --- a/keras/api/_tf_keras/keras/__init__.py +++ b/keras/api/_tf_keras/keras/__init__.py @@ -37,8 +37,6 @@ from keras.src.backend.exports import name_scope from keras.src.dtype_policies.dtype_policy import DTypePolicy from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy -from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy -from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy from keras.src.initializers.initializer import Initializer from keras.src.layers.core.input_layer import Input from keras.src.layers.input_spec import InputSpec @@ -50,7 +48,6 @@ from keras.src.ops.function import Function from keras.src.ops.operation import Operation from keras.src.optimizers.optimizer import Optimizer -from keras.src.quantizers.quantizers import AbsMaxQuantizer from keras.src.quantizers.quantizers import Quantizer from keras.src.regularizers.regularizers import Regularizer from keras.src.version import __version__ From 4adfbd4a2968a0e4c18dc3a1222396893822b026 Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:54:51 +0800 Subject: [PATCH 018/101] Fix gradient accumulation when using `overwrite_with_gradient` during float8 training (#19534) * Fix gradient accumulation with `overwrite_with_gradient` in float8 training * Add comments * Fix annotation --- keras/src/backend/jax/optimizer.py | 23 +++---- keras/src/backend/tensorflow/optimizer.py | 4 +- .../optimizers/torch_parallel_optimizer.py | 4 +- keras/src/optimizers/adadelta.py | 4 ++ keras/src/optimizers/adafactor.py | 4 ++ keras/src/optimizers/adagrad.py | 4 ++ keras/src/optimizers/adam.py | 4 ++ keras/src/optimizers/adamax.py | 4 ++ keras/src/optimizers/adamw.py | 4 ++ keras/src/optimizers/base_optimizer.py | 64 +++++++++++++------ keras/src/optimizers/ftrl.py | 4 ++ keras/src/optimizers/lion.py | 4 ++ keras/src/optimizers/nadam.py | 4 ++ keras/src/optimizers/optimizer_test.py | 44 +++++++++++++ keras/src/optimizers/rmsprop.py | 6 +- keras/src/optimizers/sgd.py | 4 ++ 16 files changed, 149 insertions(+), 36 deletions(-) diff --git a/keras/src/backend/jax/optimizer.py b/keras/src/backend/jax/optimizer.py index cc461ce113a7..fb76e5b389ab 100644 --- a/keras/src/backend/jax/optimizer.py +++ b/keras/src/backend/jax/optimizer.py @@ -25,23 +25,24 @@ def _backend_apply_gradients(self, grads, trainable_variables): ] current_optimizer_vars_value = [v.value for v in self.variables] + # `trainable_variables` might have been filtered in previous + # processing steps, so we need to ensure the correct mapping between + # `self._accumulated_gradients` and `trainable_variables` + acc_grads = [ + self._accumulated_gradients[self._get_variable_index(v)] + for v in trainable_variables + ] + new_g_accs = jax.lax.cond( is_update_step, - lambda: [ - jnp.zeros(x.shape, dtype=x.dtype) - for x in self._accumulated_gradients - ], - lambda: [ - grads[i] + self._accumulated_gradients[i] - for i in range(len(grads)) - ], + lambda: [jnp.zeros(g.shape, dtype=g.dtype) for g in acc_grads], + lambda: [g + acc_g for g, acc_g in zip(grads, acc_grads)], ) grads = jax.lax.cond( is_update_step, lambda: [ - (grads[i] + self._accumulated_gradients[i]) / steps - for i in range(len(grads)) + (g + acc_g) / steps for g, acc_g in zip(grads, acc_grads) ], lambda: list(grads), ) @@ -66,7 +67,7 @@ def _backend_apply_gradients(self, grads, trainable_variables): for value, v in zip(new_opt_vars, self.variables): v.assign(value) - for n_g_acc, g_acc in zip(new_g_accs, self._accumulated_gradients): + for n_g_acc, g_acc in zip(new_g_accs, acc_grads): g_acc.assign(n_g_acc) else: diff --git a/keras/src/backend/tensorflow/optimizer.py b/keras/src/backend/tensorflow/optimizer.py index 1887ac52cf39..ca40a1f42b39 100644 --- a/keras/src/backend/tensorflow/optimizer.py +++ b/keras/src/backend/tensorflow/optimizer.py @@ -196,11 +196,11 @@ def _overwrite_model_variables_with_average_value( var, lambda a, b: a.assign(b), args=(average_var,) ) - def _backend_increment_gradient_accumulators(self, grads): + def _backend_increment_gradient_accumulators(self, grads, acc_grads): def update_accumulator(var, grad): var.assign(var + grad) - accumulators = [v.value for v in self._accumulated_gradients] + accumulators = [v.value for v in acc_grads] def _distributed_tf_increment_grad_acc( distribution, grads, accumulators diff --git a/keras/src/backend/torch/optimizers/torch_parallel_optimizer.py b/keras/src/backend/torch/optimizers/torch_parallel_optimizer.py index 4fe3802af226..a8fe778ee665 100644 --- a/keras/src/backend/torch/optimizers/torch_parallel_optimizer.py +++ b/keras/src/backend/torch/optimizers/torch_parallel_optimizer.py @@ -19,6 +19,6 @@ def _backend_reset_gradient_accumulators(self): torch._foreach_mul_(acc_list, 0.0) @torch_utils.no_grad - def _backend_increment_gradient_accumulators(self, grads): - acc_list = [v.value for v in self._accumulated_gradients] + def _backend_increment_gradient_accumulators(self, grads, acc_grads): + acc_list = [v.value for v in acc_grads] torch._foreach_add_(acc_list, grads, alpha=1.0) diff --git a/keras/src/optimizers/adadelta.py b/keras/src/optimizers/adadelta.py index 1f2f3835aec4..4ec7d936c242 100644 --- a/keras/src/optimizers/adadelta.py +++ b/keras/src/optimizers/adadelta.py @@ -49,6 +49,8 @@ def __init__( use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="adadelta", **kwargs, ): @@ -61,6 +63,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, name=name, **kwargs, ) diff --git a/keras/src/optimizers/adafactor.py b/keras/src/optimizers/adafactor.py index 8635f1d9d8c0..bf94b6f37fb5 100644 --- a/keras/src/optimizers/adafactor.py +++ b/keras/src/optimizers/adafactor.py @@ -56,6 +56,8 @@ def __init__( use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="adafactor", **kwargs, ): @@ -69,6 +71,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, **kwargs, ) self.beta_2_decay = beta_2_decay diff --git a/keras/src/optimizers/adagrad.py b/keras/src/optimizers/adagrad.py index 836356ba13c2..856a6c24e0b6 100644 --- a/keras/src/optimizers/adagrad.py +++ b/keras/src/optimizers/adagrad.py @@ -44,6 +44,8 @@ def __init__( use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="adagrad", **kwargs, ): @@ -56,6 +58,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, name=name, **kwargs, ) diff --git a/keras/src/optimizers/adam.py b/keras/src/optimizers/adam.py index 585819322404..b7da957e74ce 100644 --- a/keras/src/optimizers/adam.py +++ b/keras/src/optimizers/adam.py @@ -54,6 +54,8 @@ def __init__( use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="adam", **kwargs, ): @@ -67,6 +69,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, **kwargs, ) self.beta_1 = beta_1 diff --git a/keras/src/optimizers/adamax.py b/keras/src/optimizers/adamax.py index 338afcc5735c..d634c70224dc 100644 --- a/keras/src/optimizers/adamax.py +++ b/keras/src/optimizers/adamax.py @@ -63,6 +63,8 @@ def __init__( use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="adamax", **kwargs, ): @@ -76,6 +78,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, **kwargs, ) self.beta_1 = beta_1 diff --git a/keras/src/optimizers/adamw.py b/keras/src/optimizers/adamw.py index e52d3b7188a1..945002abdb87 100644 --- a/keras/src/optimizers/adamw.py +++ b/keras/src/optimizers/adamw.py @@ -64,6 +64,8 @@ def __init__( use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="adamw", **kwargs, ): @@ -81,6 +83,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, **kwargs, ) diff --git a/keras/src/optimizers/base_optimizer.py b/keras/src/optimizers/base_optimizer.py index b368a203bebf..94ca0ea1ceed 100644 --- a/keras/src/optimizers/base_optimizer.py +++ b/keras/src/optimizers/base_optimizer.py @@ -1,8 +1,6 @@ import re import warnings -import numpy as np - from keras.src import backend from keras.src import initializers from keras.src import ops @@ -375,27 +373,31 @@ def _backend_apply_gradients(self, grads, trainable_variables): is_update_step = ( self.iterations + 1 ) % self.gradient_accumulation_steps == 0 + # `trainable_variables` might have been filtered in previous + # processing steps, so we need to ensure the correct mapping between + # `self._accumulated_gradients` and `trainable_variables` + acc_grads = [ + self._accumulated_gradients[self._get_variable_index(v)] + for v in trainable_variables + ] - def _update_step_fn(self, grads, trainable_variables): + def _update_step_fn(grads, trainable_variables): # Run update step with accumulated grads + reset accumulators steps = self.gradient_accumulation_steps grads = [ - (grads[i] + self._accumulated_gradients[i]) / steps - for i in range(len(grads)) + (g + acc_g) / steps for g, acc_g in zip(grads, acc_grads) ] self._backend_update_step( grads, trainable_variables, self.learning_rate ) self._backend_reset_gradient_accumulators() - def _grad_accumulation_fn(self, grads): - # Update gradient accumulators - self._backend_increment_gradient_accumulators(grads) - ops.cond( is_update_step, - lambda: _update_step_fn(self, grads, trainable_variables), - lambda: _grad_accumulation_fn(self, grads), + lambda: _update_step_fn(grads, trainable_variables), + lambda: self._backend_increment_gradient_accumulators( + grads, acc_grads + ), ) else: # Run udpate step. @@ -434,14 +436,11 @@ def _backend_update_step(self, grads, trainable_variables, learning_rate): def _backend_reset_gradient_accumulators(self): for g_acc in self._accumulated_gradients: - g_acc.assign(np.zeros(g_acc.shape, dtype=g_acc.dtype)) - - def _backend_increment_gradient_accumulators(self, grads): - new_g_accs = [ - (grads[i] + self._accumulated_gradients[i]) - for i in range(len(grads)) - ] - for n_g_acc, g_acc in zip(new_g_accs, self._accumulated_gradients): + g_acc.assign(ops.zeros(g_acc.shape, dtype=g_acc.dtype)) + + def _backend_increment_gradient_accumulators(self, grads, acc_grads): + new_g_accs = [(g + acc_g) for g, acc_g in zip(grads, acc_grads)] + for n_g_acc, g_acc in zip(new_g_accs, acc_grads): g_acc.assign(n_g_acc) def stateless_apply(self, optimizer_variables, grads, trainable_variables): @@ -616,7 +615,32 @@ def _overwrite_variables_directly_with_gradients(self, grads, vars): for i in range(len(filtered_grads) - 1, -1, -1): g, v = filtered_grads[i], filtered_vars[i] if v.overwrite_with_gradient: - v.assign(g) + if self.gradient_accumulation_steps: + # Utilize a stateless manner for JAX compatibility + steps = self.gradient_accumulation_steps + is_update_step = (self.iterations + 1) % steps == 0 + acc_g = self._accumulated_gradients[ + self._get_variable_index(v) + ] + # `ops.maximum` is utilized for gradient accumulation for + # `overwrite_with_gradient=True` variables + new_g_acc = ops.cond( + is_update_step, + lambda: ops.zeros(g.shape, dtype=g.dtype), + lambda: ops.maximum(g, acc_g), + ) + new_g = ops.cond( + is_update_step, + lambda: ops.maximum(g, acc_g), + lambda: g, + ) + new_v = ops.cond( + is_update_step, lambda: new_g, lambda: v.value + ) + v.assign(new_v) + acc_g.assign(new_g_acc) + else: + v.assign(g) filtered_grads.pop(i) filtered_vars.pop(i) return filtered_grads, filtered_vars diff --git a/keras/src/optimizers/ftrl.py b/keras/src/optimizers/ftrl.py index 1bb56518dc9d..562e2ec03a08 100644 --- a/keras/src/optimizers/ftrl.py +++ b/keras/src/optimizers/ftrl.py @@ -91,6 +91,8 @@ def __init__( use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="ftrl", **kwargs, ): @@ -104,6 +106,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, **kwargs, ) diff --git a/keras/src/optimizers/lion.py b/keras/src/optimizers/lion.py index d63e736266b9..e9194b042660 100644 --- a/keras/src/optimizers/lion.py +++ b/keras/src/optimizers/lion.py @@ -53,6 +53,8 @@ def __init__( use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="lion", **kwargs, ): @@ -66,6 +68,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, **kwargs, ) self.beta_1 = beta_1 diff --git a/keras/src/optimizers/nadam.py b/keras/src/optimizers/nadam.py index 77454e9f94f9..e307be111942 100644 --- a/keras/src/optimizers/nadam.py +++ b/keras/src/optimizers/nadam.py @@ -49,6 +49,8 @@ def __init__( use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="nadam", **kwargs, ): @@ -62,6 +64,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, **kwargs, ) self.beta_1 = beta_1 diff --git a/keras/src/optimizers/optimizer_test.py b/keras/src/optimizers/optimizer_test.py index 23d47477907c..6ab982d25d2e 100644 --- a/keras/src/optimizers/optimizer_test.py +++ b/keras/src/optimizers/optimizer_test.py @@ -267,6 +267,50 @@ def test_overwrite_with_gradient(self): self.assertAllClose(v, [[1.0, 1.0], [1.0, 1.0]]) self.assertAllClose(v2, [[0.0, 1.0], [2.0, 3.0]]) + def test_overwrite_with_gradient_with_gradient_accumulation(self): + v = backend.Variable([[1.0, 2.0], [3.0, 4.0]]) + v.overwrite_with_gradient = True + v2 = backend.Variable([[1.0, 2.0], [3.0, 4.0]]) + grad_ones = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]]) + grad_twos = backend.convert_to_tensor([[2.0, 2.0], [2.0, 2.0]]) + optimizer = optimizers.SGD( + learning_rate=1.0, gradient_accumulation_steps=2 + ) + + # Iteration 1 + optimizer.apply_gradients([(grad_ones, v), (grad_ones, v2)]) + self.assertAllClose(optimizer.iterations, 1) + self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]]) + self.assertAllClose(v2, [[1.0, 2.0], [3.0, 4.0]]) + self.assertAllClose( + optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]] + ) + self.assertAllClose( + optimizer._accumulated_gradients[1], [[1.0, 1.0], [1.0, 1.0]] + ) + # Iteration 2 + optimizer.apply_gradients([(grad_twos, v), (grad_twos, v2)]) + self.assertAllClose(optimizer.iterations, 2) + self.assertAllClose(v, [[2.0, 2.0], [2.0, 2.0]]) + self.assertAllClose(v2, [[-0.5, 0.5], [1.5, 2.5]]) + self.assertAllClose( + optimizer._accumulated_gradients[0], [[0.0, 0.0], [0.0, 0.0]] + ) + self.assertAllClose( + optimizer._accumulated_gradients[1], [[0.0, 0.0], [0.0, 0.0]] + ) + # Iteration 3 + optimizer.apply_gradients([(grad_ones, v), (grad_ones, v2)]) + self.assertAllClose(optimizer.iterations, 3) + self.assertAllClose(v, [[2.0, 2.0], [2.0, 2.0]]) + self.assertAllClose(v2, [[-0.5, 0.5], [1.5, 2.5]]) + self.assertAllClose( + optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]] + ) + self.assertAllClose( + optimizer._accumulated_gradients[1], [[1.0, 1.0], [1.0, 1.0]] + ) + def test_setting_lr_to_callable_untracks_lr_var(self): adam = optimizers.Adam(learning_rate=0.001) self.assertLen(adam.variables, 2) diff --git a/keras/src/optimizers/rmsprop.py b/keras/src/optimizers/rmsprop.py index ad7c4a079c4d..384bdc21639a 100644 --- a/keras/src/optimizers/rmsprop.py +++ b/keras/src/optimizers/rmsprop.py @@ -63,7 +63,9 @@ def __init__( global_clipnorm=None, use_ema=False, ema_momentum=0.99, - ema_overwrite_frequency=100, + ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="rmsprop", **kwargs, ): @@ -76,6 +78,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, name=name, **kwargs, ) diff --git a/keras/src/optimizers/sgd.py b/keras/src/optimizers/sgd.py index 85a8c8647445..2a1b9cceba98 100644 --- a/keras/src/optimizers/sgd.py +++ b/keras/src/optimizers/sgd.py @@ -52,6 +52,8 @@ def __init__( use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, + loss_scale_factor=None, + gradient_accumulation_steps=None, name="SGD", **kwargs, ): @@ -65,6 +67,8 @@ def __init__( use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, + loss_scale_factor=loss_scale_factor, + gradient_accumulation_steps=gradient_accumulation_steps, **kwargs, ) if not isinstance(momentum, float) or momentum < 0 or momentum > 1: From 47b5ba5bbdfb2fb05e8fec4d5699393369509dfd Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:26:29 -0500 Subject: [PATCH 019/101] Update code path in ignore path (#19537) --- .kokoro/github/ubuntu/gpu/build.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.kokoro/github/ubuntu/gpu/build.sh b/.kokoro/github/ubuntu/gpu/build.sh index 4a35478029f7..1cc06ab1ee09 100644 --- a/.kokoro/github/ubuntu/gpu/build.sh +++ b/.kokoro/github/ubuntu/gpu/build.sh @@ -34,8 +34,8 @@ then python3 -c 'import tensorflow as tf;assert len(tf.config.list_physical_devices("GPU")) > 0' # TODO: keras/layers/merging/merging_test.py::MergingLayersTest::test_sparse_dot_2d Fatal Python error: Aborted - pytest keras --ignore keras/applications \ - --ignore keras/layers/merging/merging_test.py \ + pytest keras --ignore keras/src/applications \ + --ignore keras/src/layers/merging/merging_test.py \ --cov=keras fi @@ -51,11 +51,11 @@ then # TODO: keras/layers/merging/merging_test.py::MergingLayersTest::test_sparse_dot_2d Fatal Python error: Aborted # TODO: keras/trainers/data_adapters/py_dataset_adapter_test.py::PyDatasetAdapterTest::test_basic_flow0 Fatal Python error: Aborted # keras/backend/jax/distribution_lib_test.py is configured for CPU test for now. - pytest keras --ignore keras/applications \ - --ignore keras/layers/merging/merging_test.py \ - --ignore keras/trainers/data_adapters/py_dataset_adapter_test.py \ - --ignore keras/backend/jax/distribution_lib_test.py \ - --ignore keras/distribution/distribution_lib_test.py \ + pytest keras --ignore keras/src/applications \ + --ignore keras/src/layers/merging/merging_test.py \ + --ignore keras/src/trainers/data_adapters/py_dataset_adapter_test.py \ + --ignore keras/src/backend/jax/distribution_lib_test.py \ + --ignore keras/src/distribution/distribution_lib_test.py \ --cov=keras fi @@ -68,6 +68,6 @@ then # Raise error if GPU is not detected. python3 -c 'import torch;assert torch.cuda.is_available()' - pytest keras --ignore keras/applications \ + pytest keras --ignore keras/src/applications \ --cov=keras fi From 1c9e01c79cd8f6c7378f9acdd386867b2dad7e97 Mon Sep 17 00:00:00 2001 From: Sachin Prasad Date: Wed, 17 Apr 2024 11:03:43 -0700 Subject: [PATCH 020/101] Add operations per run (#19538) --- .github/workflows/stale-issue-pr.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/stale-issue-pr.yaml b/.github/workflows/stale-issue-pr.yaml index 034fb4c26698..a5c570dd780e 100644 --- a/.github/workflows/stale-issue-pr.yaml +++ b/.github/workflows/stale-issue-pr.yaml @@ -4,6 +4,8 @@ on: - cron: "30 1 * * *" jobs: close-issues: + # Don't do this in forks + if: github.repository == 'keras-team/keras' runs-on: ubuntu-latest permissions: issues: write @@ -12,6 +14,7 @@ jobs: - name: Awaiting response issues uses: actions/stale@v9 with: + operations-per-run: 500 days-before-issue-stale: 14 days-before-issue-close: 14 stale-issue-label: "stale" @@ -34,6 +37,7 @@ jobs: - name: Contribution issues uses: actions/stale@v9 with: + operations-per-run: 500 days-before-issue-stale: 180 days-before-issue-close: 365 stale-issue-label: "stale" From 69f3abd176afd664e7242be5f5666cd5068822be Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Wed, 17 Apr 2024 11:51:12 -0700 Subject: [PATCH 021/101] Include input shapes in model visualization. --- keras/src/utils/model_visualization.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/keras/src/utils/model_visualization.py b/keras/src/utils/model_visualization.py index ee0a47bfaa39..2784a83e3ca2 100644 --- a/keras/src/utils/model_visualization.py +++ b/keras/src/utils/model_visualization.py @@ -72,9 +72,10 @@ def make_layer_label(layer, **kwargs): '<' ) - colspan = max( - 1, sum(int(x) for x in (show_dtype, show_shapes, show_trainable)) - ) + colspan_max = sum(int(x) for x in (show_dtype, show_trainable)) + if show_shapes: + colspan_max += 2 + colspan = max(1, colspan_max) if show_layer_names: table += ( @@ -104,15 +105,25 @@ def make_layer_label(layer, **kwargs): cols = [] if show_shapes: - shape = None + input_shape = None + output_shape = None try: - shape = tree.map_structure(lambda x: x.shape, layer.output) + input_shape = tree.map_structure(lambda x: x.shape, layer.input) + output_shape = tree.map_structure(lambda x: x.shape, layer.output) except (ValueError, AttributeError): pass + if class_name != "InputLayer": + cols.append( + ( + '" + ) + ) cols.append( ( '" ) ) @@ -248,7 +259,6 @@ def model_to_dot( } if isinstance(model, sequential.Sequential): - # TODO layers = model.layers elif not isinstance(model, functional.Functional): # We treat subclassed models as a single node. From 1228acbbf175bffa49103d8242412f8ccdf06df1 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Wed, 17 Apr 2024 14:44:39 -0700 Subject: [PATCH 022/101] Add pad_to_aspect_ratio feature in ops.image.resize --- keras/src/backend/jax/image.py | 158 ++++++++++++++++++ keras/src/backend/numpy/image.py | 93 +++++++++++ keras/src/backend/tensorflow/image.py | 122 ++++++++++++++ keras/src/backend/torch/image.py | 59 +++++++ keras/src/layers/preprocessing/random_zoom.py | 2 +- keras/src/ops/image.py | 32 ++++ keras/src/ops/image_test.py | 78 +++++++++ 7 files changed, 543 insertions(+), 1 deletion(-) diff --git a/keras/src/backend/jax/image.py b/keras/src/backend/jax/image.py index 7bba72e3f927..e14e178efda1 100644 --- a/keras/src/backend/jax/image.py +++ b/keras/src/backend/jax/image.py @@ -43,6 +43,9 @@ def resize( interpolation="bilinear", antialias=False, crop_to_aspect_ratio=False, + pad_to_aspect_ratio=False, + fill_mode="constant", + fill_value=0.0, data_format="channels_last", ): if interpolation not in RESIZE_INTERPOLATIONS: @@ -50,6 +53,16 @@ def resize( "Invalid value for argument `interpolation`. Expected of one " f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}" ) + if fill_mode != "constant": + raise ValueError( + "Invalid value for argument `fill_mode`. Only `'constant'` " + f"is supported. Received: fill_mode={fill_mode}" + ) + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError( + "Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` " + "can be `True`." + ) if not len(size) == 2: raise ValueError( "Argument `size` must be a tuple of two elements " @@ -62,6 +75,7 @@ def resize( size = (image.shape[0],) + size + (image.shape[-1],) else: size = (image.shape[0], image.shape[1]) + size + batch_size = image.shape[0] elif len(image.shape) == 3: if data_format == "channels_last": size = size + (image.shape[-1],) @@ -114,6 +128,150 @@ def resize( crop_box_hstart : crop_box_hstart + crop_height, crop_box_wstart : crop_box_wstart + crop_width, ] + elif pad_to_aspect_ratio: + shape = image.shape + if data_format == "channels_last": + height, width, channels = shape[-3], shape[-2], shape[-1] + else: + height, width, channels = shape[-2], shape[-1], shape[-3] + + pad_height = int(float(width * target_height) / target_width) + pad_height = max(height, pad_height) + pad_width = int(float(height * target_width) / target_height) + pad_width = max(width, pad_width) + img_box_hstart = int(float(pad_height - height) / 2) + img_box_wstart = int(float(pad_width - width) / 2) + if data_format == "channels_last": + if img_box_hstart > 0: + if len(image.shape) == 4: + padded_img = jnp.concatenate( + [ + jnp.ones( + (batch_size, img_box_hstart, width, channels), + dtype=image.dtype, + ) + * fill_value, + image, + jnp.ones( + (batch_size, img_box_hstart, width, channels), + dtype=image.dtype, + ) + * fill_value, + ], + axis=1, + ) + else: + padded_img = jnp.concatenate( + [ + jnp.ones( + (img_box_hstart, width, channels), + dtype=image.dtype, + ) + * fill_value, + image, + jnp.ones( + (img_box_hstart, width, channels), + dtype=image.dtype, + ) + * fill_value, + ], + axis=0, + ) + elif img_box_wstart > 0: + if len(image.shape) == 4: + padded_img = jnp.concatenate( + [ + jnp.ones( + (batch_size, height, img_box_wstart, channels), + dtype=image.dtype, + ) + * fill_value, + image, + jnp.ones( + (batch_size, height, img_box_wstart, channels), + dtype=image.dtype, + ) + * fill_value, + ], + axis=2, + ) + else: + padded_img = jnp.concatenate( + [ + jnp.ones( + (height, img_box_wstart, channels), + dtype=image.dtype, + ) + * fill_value, + image, + jnp.ones( + (height, img_box_wstart, channels), + dtype=image.dtype, + ) + * fill_value, + ], + axis=1, + ) + else: + padded_img = image + else: + if img_box_hstart > 0: + if len(image.shape) == 4: + padded_img = jnp.concatenate( + [ + jnp.ones( + (batch_size, channels, img_box_hstart, width) + ) + * fill_value, + image, + jnp.ones( + (batch_size, channels, img_box_hstart, width) + ) + * fill_value, + ], + axis=2, + ) + else: + padded_img = jnp.concatenate( + [ + jnp.ones((channels, img_box_hstart, width)) + * fill_value, + image, + jnp.ones((channels, img_box_hstart, width)) + * fill_value, + ], + axis=1, + ) + elif img_box_wstart > 0: + if len(image.shape) == 4: + padded_img = jnp.concatenate( + [ + jnp.ones( + (batch_size, channels, height, img_box_wstart) + ) + * fill_value, + image, + jnp.ones( + (batch_size, channels, height, img_box_wstart) + ) + * fill_value, + ], + axis=3, + ) + else: + padded_img = jnp.concatenate( + [ + jnp.ones((channels, height, img_box_wstart)) + * fill_value, + image, + jnp.ones((channels, height, img_box_wstart)) + * fill_value, + ], + axis=2, + ) + else: + padded_img = image + image = padded_img return jax.image.resize( image, size, method=interpolation, antialias=antialias diff --git a/keras/src/backend/numpy/image.py b/keras/src/backend/numpy/image.py index 2281c422d496..d5e3c617fb5c 100644 --- a/keras/src/backend/numpy/image.py +++ b/keras/src/backend/numpy/image.py @@ -42,6 +42,9 @@ def resize( interpolation="bilinear", antialias=False, crop_to_aspect_ratio=False, + pad_to_aspect_ratio=False, + fill_mode="constant", + fill_value=0.0, data_format="channels_last", ): if interpolation not in RESIZE_INTERPOLATIONS: @@ -49,6 +52,16 @@ def resize( "Invalid value for argument `interpolation`. Expected of one " f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}" ) + if fill_mode != "constant": + raise ValueError( + "Invalid value for argument `fill_mode`. Only `'constant'` " + f"is supported. Received: fill_mode={fill_mode}" + ) + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError( + "Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` " + "can be `True`." + ) if not len(size) == 2: raise ValueError( "Argument `size` must be a tuple of two elements " @@ -113,6 +126,86 @@ def resize( crop_box_hstart : crop_box_hstart + crop_height, crop_box_wstart : crop_box_wstart + crop_width, ] + elif pad_to_aspect_ratio: + shape = image.shape + batch_size = image.shape[0] + if data_format == "channels_last": + height, width, channels = shape[-3], shape[-2], shape[-1] + else: + channels, height, width = shape[-3], shape[-2], shape[-1] + pad_height = int(float(width * target_height) / target_width) + pad_height = max(height, pad_height) + pad_width = int(float(height * target_width) / target_height) + pad_width = max(width, pad_width) + img_box_hstart = int(float(pad_height - height) / 2) + img_box_wstart = int(float(pad_width - width) / 2) + if data_format == "channels_last": + if len(image.shape) == 4: + padded_img = ( + np.ones( + ( + batch_size, + pad_height + height, + pad_width + width, + channels, + ), + dtype=image.dtype, + ) + * fill_value + ) + padded_img[ + :, + img_box_hstart : img_box_hstart + height, + img_box_wstart : img_box_wstart + width, + :, + ] = image + else: + padded_img = ( + np.ones( + (pad_height + height, pad_width + width, channels), + dtype=image.dtype, + ) + * fill_value + ) + padded_img[ + img_box_hstart : img_box_hstart + height, + img_box_wstart : img_box_wstart + width, + :, + ] = image + else: + if len(image.shape) == 4: + padded_img = ( + np.ones( + ( + batch_size, + channels, + pad_height + height, + pad_width + width, + ), + dtype=image.dtype, + ) + * fill_value + ) + padded_img[ + :, + :, + img_box_hstart : img_box_hstart + height, + img_box_wstart : img_box_wstart + width, + ] = image + else: + padded_img = ( + np.ones( + (channels, pad_height + height, pad_width + width), + dtype=image.dtype, + ) + * fill_value + ) + padded_img[ + :, + img_box_hstart : img_box_hstart + height, + img_box_wstart : img_box_wstart + width, + ] = image + image = padded_img return np.array( jax.image.resize(image, size, method=interpolation, antialias=antialias) diff --git a/keras/src/backend/tensorflow/image.py b/keras/src/backend/tensorflow/image.py index c03825dd7d16..927384401d2e 100644 --- a/keras/src/backend/tensorflow/image.py +++ b/keras/src/backend/tensorflow/image.py @@ -42,6 +42,9 @@ def resize( interpolation="bilinear", antialias=False, crop_to_aspect_ratio=False, + pad_to_aspect_ratio=False, + fill_mode="constant", + fill_value=0.0, data_format="channels_last", ): if interpolation not in RESIZE_INTERPOLATIONS: @@ -49,6 +52,16 @@ def resize( "Invalid value for argument `interpolation`. Expected of one " f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}" ) + if fill_mode != "constant": + raise ValueError( + "Invalid value for argument `fill_mode`. Only `'constant'` " + f"is supported. Received: fill_mode={fill_mode}" + ) + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError( + "Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` " + "can be `True`." + ) if not len(size) == 2: raise ValueError( "Argument `size` must be a tuple of two elements " @@ -102,6 +115,115 @@ def resize( crop_box_wstart : crop_box_wstart + crop_width, :, ] + elif pad_to_aspect_ratio: + shape = tf.shape(image) + height, width = shape[-3], shape[-2] + target_height, target_width = size + pad_height = tf.cast( + tf.cast(width * target_height, "float32") / target_width, + "int32", + ) + pad_height = tf.maximum(height, pad_height) + pad_height = tf.cast(pad_height, "int32") + pad_width = tf.cast( + tf.cast(height * target_width, "float32") / target_height, + "int32", + ) + pad_width = tf.maximum(width, pad_width) + pad_width = tf.cast(pad_width, "int32") + + img_box_hstart = tf.cast( + tf.cast(pad_height - height, "float32") / 2, "int32" + ) + img_box_wstart = tf.cast( + tf.cast(pad_width - width, "float32") / 2, "int32" + ) + if len(image.shape) == 4: + batch_size = tf.shape(image)[0] + channels = tf.shape(image)[3] + padded_img = tf.cond( + img_box_hstart > 0, + lambda: tf.concat( + [ + tf.ones( + (batch_size, img_box_hstart, width, channels), + dtype=image.dtype, + ) + * fill_value, + image, + tf.ones( + (batch_size, img_box_hstart, width, channels), + dtype=image.dtype, + ) + * fill_value, + ], + axis=1, + ), + lambda: image, + ) + padded_img = tf.cond( + img_box_wstart > 0, + lambda: tf.concat( + [ + tf.ones( + (batch_size, height, img_box_wstart, channels), + dtype=image.dtype, + ) + * fill_value, + padded_img, + tf.ones( + (batch_size, height, img_box_wstart, channels), + dtype=image.dtype, + ) + * fill_value, + ], + axis=2, + ), + lambda: padded_img, + ) + else: + channels = tf.shape(image)[2] + padded_img = tf.cond( + img_box_hstart > 0, + lambda: tf.concat( + [ + tf.ones( + (img_box_hstart, width, channels), + dtype=image.dtype, + ) + * fill_value, + image, + tf.ones( + (img_box_hstart, width, channels), + dtype=image.dtype, + ) + * fill_value, + ], + axis=0, + ), + lambda: image, + ) + padded_img = tf.cond( + img_box_wstart > 0, + lambda: tf.concat( + [ + tf.ones( + (height, img_box_wstart, channels), + dtype=image.dtype, + ) + * fill_value, + padded_img, + tf.ones( + (height, img_box_wstart, channels), + dtype=image.dtype, + ) + * fill_value, + ], + axis=1, + ), + lambda: padded_img, + ) + image = padded_img resized = tf.image.resize( image, size, method=interpolation, antialias=antialias diff --git a/keras/src/backend/torch/image.py b/keras/src/backend/torch/image.py index 82f609612a78..548f827fb0e0 100644 --- a/keras/src/backend/torch/image.py +++ b/keras/src/backend/torch/image.py @@ -52,6 +52,9 @@ def resize( interpolation="bilinear", antialias=False, crop_to_aspect_ratio=False, + pad_to_aspect_ratio=False, + fill_mode="constant", + fill_value=0.0, data_format="channels_last", ): try: @@ -81,6 +84,16 @@ def resize( "Invalid value for argument `interpolation`. Expected of one " f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}" ) + if fill_mode != "constant": + raise ValueError( + "Invalid value for argument `fill_mode`. Only `'constant'` " + f"is supported. Received: fill_mode={fill_mode}" + ) + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError( + "Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` " + "can be `True`." + ) if not len(size) == 2: raise ValueError( "Argument `size` must be a tuple of two elements " @@ -123,6 +136,52 @@ def resize( crop_box_hstart : crop_box_hstart + crop_height, crop_box_wstart : crop_box_wstart + crop_width, ] + elif pad_to_aspect_ratio: + shape = image.shape + height, width = shape[-2], shape[-1] + target_height, target_width = size + pad_height = int(float(width * target_height) / target_width) + pad_height = max(height, pad_height) + pad_width = int(float(height * target_width) / target_height) + pad_width = max(width, pad_width) + img_box_hstart = int(float(pad_height - height) / 2) + img_box_wstart = int(float(pad_width - width) / 2) + if len(image.shape) == 4: + batch_size = image.shape[0] + channels = image.shape[1] + padded_img = ( + torch.ones( + ( + batch_size, + channels, + pad_height + height, + pad_width + width, + ), + dtype=image.dtype, + ) + * fill_value + ) + padded_img[ + :, + :, + img_box_hstart : img_box_hstart + height, + img_box_wstart : img_box_wstart + width, + ] = image + else: + channels = image.shape[0] + padded_img = ( + torch.ones( + (channels, pad_height + height, pad_width + width), + dtype=image.dtype, + ) + * fill_value + ) + padded_img[ + :, + img_box_hstart : img_box_hstart + height, + img_box_wstart : img_box_wstart + width, + ] = image + image = padded_img resized = torchvision.transforms.functional.resize( img=image, diff --git a/keras/src/layers/preprocessing/random_zoom.py b/keras/src/layers/preprocessing/random_zoom.py index 332da8e0abaa..532e75bd9c44 100644 --- a/keras/src/layers/preprocessing/random_zoom.py +++ b/keras/src/layers/preprocessing/random_zoom.py @@ -68,7 +68,7 @@ class RandomZoom(TFDataLayer): interpolation: Interpolation mode. Supported values: `"nearest"`, `"bilinear"`. seed: Integer. Used to create a random seed. - fill_value: a float represents the value to be filled outside + fill_value: a float that represents the value to be filled outside the boundaries when `fill_mode="constant"`. data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` diff --git a/keras/src/ops/image.py b/keras/src/ops/image.py index 398defe615a4..56e488b48fd8 100644 --- a/keras/src/ops/image.py +++ b/keras/src/ops/image.py @@ -111,6 +111,9 @@ def __init__( interpolation="bilinear", antialias=False, crop_to_aspect_ratio=False, + pad_to_aspect_ratio=False, + fill_mode="constant", + fill_value=0.0, data_format="channels_last", ): super().__init__() @@ -119,6 +122,9 @@ def __init__( self.antialias = antialias self.data_format = data_format self.crop_to_aspect_ratio = crop_to_aspect_ratio + self.pad_to_aspect_ratio = pad_to_aspect_ratio + self.fill_mode = fill_mode + self.fill_value = fill_value def call(self, image): return backend.image.resize( @@ -128,6 +134,9 @@ def call(self, image): antialias=self.antialias, data_format=self.data_format, crop_to_aspect_ratio=self.crop_to_aspect_ratio, + pad_to_aspect_ratio=self.pad_to_aspect_ratio, + fill_mode=self.fill_mode, + fill_value=self.fill_value, ) def compute_output_spec(self, image): @@ -160,6 +169,9 @@ def resize( interpolation="bilinear", antialias=False, crop_to_aspect_ratio=False, + pad_to_aspect_ratio=False, + fill_mode="constant", + fill_value=0.0, data_format="channels_last", ): """Resize images to size using the specified interpolation method. @@ -178,6 +190,15 @@ def resize( largest possible window in the image (of size `(height, width)`) that matches the target aspect ratio. By default (`crop_to_aspect_ratio=False`), aspect ratio may not be preserved. + pad_to_aspect_ratio: If `True`, pad the image without aspect + ratio distortion. When the original aspect ratio differs + from the target aspect ratio, the output image will be + evenly padded on the short side. + fill_mode: When using `pad_to_aspect_ratio=True`, padded areas + are filled according to the given mode. Only `"constant"` is + supported at this time + (fill with constant value, equal to `fill_value`). + fill_value: Float. Padding value to use when `pad_to_aspect_ratio=True`. data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, height, width, channels)` @@ -219,6 +240,11 @@ def resize( "channels)`, or `(batch_size, height, width, channels)`, but " f"got input with incorrect rank, of shape {image.shape}." ) + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError( + "Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` " + "can be `True`." + ) if any_symbolic_tensors((image,)): return Resize( size, @@ -226,6 +252,9 @@ def resize( antialias=antialias, data_format=data_format, crop_to_aspect_ratio=crop_to_aspect_ratio, + pad_to_aspect_ratio=pad_to_aspect_ratio, + fill_mode=fill_mode, + fill_value=fill_value, ).symbolic_call(image) return backend.image.resize( image, @@ -234,6 +263,9 @@ def resize( antialias=antialias, crop_to_aspect_ratio=crop_to_aspect_ratio, data_format=data_format, + pad_to_aspect_ratio=pad_to_aspect_ratio, + fill_mode=fill_mode, + fill_value=fill_value, ) diff --git a/keras/src/ops/image_test.py b/keras/src/ops/image_test.py index 1f3fb7938063..e7a3fe7face0 100644 --- a/keras/src/ops/image_test.py +++ b/keras/src/ops/image_test.py @@ -317,6 +317,84 @@ def test_resize(self, interpolation, antialias, data_format): self.assertEqual(tuple(out.shape), tuple(ref_out.shape)) self.assertAllClose(ref_out, out, atol=0.3) + @parameterized.parameters( + [ + ("channels_last",), + ("channels_first",), + ] + ) + def test_resize_with_crop(self, data_format): + if data_format == "channels_first": + x = np.random.random((3, 60, 50)) * 255 + else: + x = np.random.random((60, 50, 3)) * 255 + out = kimage.resize( + x, + size=(25, 25), + crop_to_aspect_ratio=True, + data_format=data_format, + ) + if data_format == "channels_first": + self.assertEqual(out.shape, (3, 25, 25)) + else: + self.assertEqual(out.shape, (25, 25, 3)) + + # Batched case + if data_format == "channels_first": + x = np.random.random((2, 3, 50, 60)) * 255 + else: + x = np.random.random((2, 50, 60, 3)) * 255 + out = kimage.resize( + x, + size=(25, 25), + crop_to_aspect_ratio=True, + data_format=data_format, + ) + if data_format == "channels_first": + self.assertEqual(out.shape, (2, 3, 25, 25)) + else: + self.assertEqual(out.shape, (2, 25, 25, 3)) + + @parameterized.parameters( + [ + ("channels_last", 2.0), + ("channels_first", 2.0), + ] + ) + def test_resize_with_pad(self, data_format, fill_value): + if data_format == "channels_first": + x = np.random.random((3, 60, 50)) * 255 + else: + x = np.random.random((60, 50, 3)) * 255 + out = kimage.resize( + x, + size=(25, 25), + pad_to_aspect_ratio=True, + data_format=data_format, + fill_value=fill_value, + ) + if data_format == "channels_first": + self.assertEqual(out.shape, (3, 25, 25)) + else: + self.assertEqual(out.shape, (25, 25, 3)) + + # Batched case + if data_format == "channels_first": + x = np.random.random((2, 3, 50, 60)) * 255 + else: + x = np.random.random((2, 50, 60, 3)) * 255 + out = kimage.resize( + x, + size=(25, 25), + pad_to_aspect_ratio=True, + data_format=data_format, + fill_value=fill_value, + ) + if data_format == "channels_first": + self.assertEqual(out.shape, (2, 3, 25, 25)) + else: + self.assertEqual(out.shape, (2, 25, 25, 3)) + @parameterized.parameters( [ ("bilinear", "constant", "channels_last"), From 7cd5c60f8143c664f1eaa06716d7d087571a8152 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Wed, 17 Apr 2024 14:49:09 -0700 Subject: [PATCH 023/101] Add pad_to_aspect_ratio feature in Resizing layer. --- keras/src/layers/preprocessing/resizing.py | 21 +++++++++++++++++++++ keras/src/ops/image.py | 2 +- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/keras/src/layers/preprocessing/resizing.py b/keras/src/layers/preprocessing/resizing.py index 6a6c5bab6a8c..51e4e5f39795 100644 --- a/keras/src/layers/preprocessing/resizing.py +++ b/keras/src/layers/preprocessing/resizing.py @@ -39,6 +39,15 @@ class Resizing(TFDataLayer): largest possible window in the image (of size `(height, width)`) that matches the target aspect ratio. By default (`crop_to_aspect_ratio=False`), aspect ratio may not be preserved. + pad_to_aspect_ratio: If `True`, pad the images without aspect + ratio distortion. When the original aspect ratio differs + from the target aspect ratio, the output image will be + evenly padded on the short side. + fill_mode: When using `pad_to_aspect_ratio=True`, padded areas + are filled according to the given mode. Only `"constant"` is + supported at this time + (fill with constant value, equal to `fill_value`). + fill_value: Float. Padding value to use when `pad_to_aspect_ratio=True`. data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, height, width, channels)` @@ -56,6 +65,9 @@ def __init__( width, interpolation="bilinear", crop_to_aspect_ratio=False, + pad_to_aspect_ratio=False, + fill_mode="constant", + fill_value=0.0, data_format=None, **kwargs, ): @@ -65,6 +77,9 @@ def __init__( self.interpolation = interpolation self.data_format = backend.standardize_data_format(data_format) self.crop_to_aspect_ratio = crop_to_aspect_ratio + self.pad_to_aspect_ratio = pad_to_aspect_ratio + self.fill_mode = fill_mode + self.fill_value = fill_value def call(self, inputs): size = (self.height, self.width) @@ -74,6 +89,9 @@ def call(self, inputs): interpolation=self.interpolation, data_format=self.data_format, crop_to_aspect_ratio=self.crop_to_aspect_ratio, + pad_to_aspect_ratio=self.pad_to_aspect_ratio, + fill_mode=self.fill_mode, + fill_value=self.fill_value, ) def compute_output_shape(self, input_shape): @@ -101,6 +119,9 @@ def get_config(self): "width": self.width, "interpolation": self.interpolation, "crop_to_aspect_ratio": self.crop_to_aspect_ratio, + "pad_to_aspect_ratio": self.pad_to_aspect_ratio, + "fill_mode": self.fill_mode, + "fill_value": self.fill_value, "data_format": self.data_format, } return {**base_config, **config} diff --git a/keras/src/ops/image.py b/keras/src/ops/image.py index 56e488b48fd8..bb817ec4abe2 100644 --- a/keras/src/ops/image.py +++ b/keras/src/ops/image.py @@ -190,7 +190,7 @@ def resize( largest possible window in the image (of size `(height, width)`) that matches the target aspect ratio. By default (`crop_to_aspect_ratio=False`), aspect ratio may not be preserved. - pad_to_aspect_ratio: If `True`, pad the image without aspect + pad_to_aspect_ratio: If `True`, pad the images without aspect ratio distortion. When the original aspect ratio differs from the target aspect ratio, the output image will be evenly padded on the short side. From b955665444622b88871c50d33a8dc19fc3954f2b Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Thu, 18 Apr 2024 12:02:28 +0800 Subject: [PATCH 024/101] Fix incorrect usage of `quantize` (#19541) * Add logic to prevent double quantization * Add detailed info for double quantization error * Update error msg --- keras/src/layers/core/dense.py | 7 ++---- keras/src/layers/core/dense_test.py | 22 +++++++++++++--- keras/src/layers/core/einsum_dense.py | 7 ++---- keras/src/layers/core/einsum_dense_test.py | 29 ++++++++++++++++++---- keras/src/layers/core/embedding.py | 3 +-- keras/src/layers/core/embedding_test.py | 10 +++++++- keras/src/layers/layer.py | 6 +++++ 7 files changed, 62 insertions(+), 22 deletions(-) diff --git a/keras/src/layers/core/dense.py b/keras/src/layers/core/dense.py index fad22e6b755e..e856feb98409 100644 --- a/keras/src/layers/core/dense.py +++ b/keras/src/layers/core/dense.py @@ -1,7 +1,6 @@ import ml_dtypes from keras.src import activations -from keras.src import backend from keras.src import constraints from keras.src import dtype_policies from keras.src import initializers @@ -347,6 +346,7 @@ def _int8_build( initializer=kernel_scale_initializer, trainable=False, ) + self._is_quantized = True def _float8_build(self): if not isinstance( @@ -396,6 +396,7 @@ def _float8_build(self): self.kernel_amax_history.overwrite_with_gradient = True self.outputs_grad_scale.overwrite_with_gradient = True self.outputs_grad_amax_history.overwrite_with_gradient = True + self._is_quantized = True def quantized_call(self, inputs): if self.dtype_policy.quantization_mode == "int8": @@ -552,8 +553,6 @@ def quantize(self, mode): self._tracker.unlock() if mode == "int8": - if backend.standardize_dtype(self._kernel.dtype) == "int8": - raise ValueError("`quantize` can only be done once per layer.") # Configure `self.inputs_quantizer` self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) # Quantize `self._kernel` to int8 and compute corresponding scale @@ -572,8 +571,6 @@ def quantize(self, mode): lambda shape, dtype: kernel_scale, ) elif mode == "float8": - if hasattr(self, "inputs_amax_history"): - raise ValueError("`quantize` can only be done once per layer.") self._float8_build() else: raise NotImplementedError( diff --git a/keras/src/layers/core/dense_test.py b/keras/src/layers/core/dense_test.py index 8a959543dc0f..f9f4e806973b 100644 --- a/keras/src/layers/core/dense_test.py +++ b/keras/src/layers/core/dense_test.py @@ -420,10 +420,24 @@ def test_quantize_when_already_quantized(self, mode): layer = layers.Dense(units=2) layer.build((None, 2)) layer.quantize(mode) - with self.assertRaisesRegex( - ValueError, "`quantize` can only be done once per layer." - ): - layer.quantize(mode) + for m in ["int8", "float8"]: + with self.assertRaisesRegex( + ValueError, "is already quantized with dtype_policy=" + ): + layer.quantize(m) + + @parameterized.named_parameters( + ("int8", "int8_from_float32"), + ("float8", "float8_from_float32"), + ) + def test_quantize_when_already_quantized_using_dtype_argument(self, mode): + layer = layers.Dense(units=2, dtype=mode) + layer.build((None, 2)) + for m in ["int8", "float8"]: + with self.assertRaisesRegex( + ValueError, "is already quantized with dtype_policy=" + ): + layer.quantize(m) @parameterized.named_parameters( ("int8", "int8_from_float32", 3), diff --git a/keras/src/layers/core/einsum_dense.py b/keras/src/layers/core/einsum_dense.py index 8fcecc17c9c4..f3b9cb31a1d8 100644 --- a/keras/src/layers/core/einsum_dense.py +++ b/keras/src/layers/core/einsum_dense.py @@ -5,7 +5,6 @@ import numpy as np from keras.src import activations -from keras.src import backend from keras.src import constraints from keras.src import dtype_policies from keras.src import initializers @@ -431,6 +430,7 @@ def _int8_build( initializer=kernel_scale_initializer, trainable=False, ) + self._is_quantized = True def _float8_build(self): if not isinstance( @@ -480,6 +480,7 @@ def _float8_build(self): self.kernel_amax_history.overwrite_with_gradient = True self.outputs_grad_scale.overwrite_with_gradient = True self.outputs_grad_amax_history.overwrite_with_gradient = True + self._is_quantized = True def quantized_call(self, inputs): if self.dtype_policy.quantization_mode == "int8": @@ -665,8 +666,6 @@ def quantize(self, mode): self._tracker.unlock() if mode == "int8": - if backend.standardize_dtype(self._kernel.dtype) == "int8": - raise ValueError("`quantize` can only be done once per layer.") ( self._input_reduced_axes, self._kernel_reduced_axes, @@ -709,8 +708,6 @@ def quantize(self, mode): lambda shape, dtype: kernel_scale, ) elif mode == "float8": - if hasattr(self, "inputs_amax_history"): - raise ValueError("`quantize` can only be done once per layer.") self._float8_build() else: raise NotImplementedError( diff --git a/keras/src/layers/core/einsum_dense_test.py b/keras/src/layers/core/einsum_dense_test.py index 7ce83dd75f84..098c3595b19f 100644 --- a/keras/src/layers/core/einsum_dense_test.py +++ b/keras/src/layers/core/einsum_dense_test.py @@ -508,15 +508,34 @@ class MyEinsumDense(layers.EinsumDense): def test_quantize_when_already_quantized(self, mode): layer = layers.EinsumDense( equation="ab,bcd->acd", - output_shape=(8, 32), + output_shape=(8, 16), bias_axes="d", ) layer.build((None, 3)) layer.quantize(mode) - with self.assertRaisesRegex( - ValueError, "`quantize` can only be done once per layer." - ): - layer.quantize(mode) + for m in ["int8", "float8"]: + with self.assertRaisesRegex( + ValueError, "is already quantized with dtype_policy=" + ): + layer.quantize(m) + + @parameterized.named_parameters( + ("int8", "int8_from_float32"), + ("float8", "float8_from_float32"), + ) + def test_quantize_when_already_quantized_using_dtype_argument(self, mode): + layer = layers.EinsumDense( + equation="ab,bcd->acd", + output_shape=(8, 16), + bias_axes="d", + dtype=mode, + ) + layer.build((None, 3)) + for m in ["int8", "float8"]: + with self.assertRaisesRegex( + ValueError, "is already quantized with dtype_policy=" + ): + layer.quantize(m) @parameterized.named_parameters( ("int8", "int8_from_float32", 3), diff --git a/keras/src/layers/core/embedding.py b/keras/src/layers/core/embedding.py index 03a9a61ee15b..1807a86b125e 100644 --- a/keras/src/layers/core/embedding.py +++ b/keras/src/layers/core/embedding.py @@ -325,6 +325,7 @@ def _int8_build( initializer=embeddings_scale_initializer, trainable=False, ) + self._is_quantized = True def quantized_call(self, inputs): if self.dtype_policy.quantization_mode == "int8": @@ -374,8 +375,6 @@ def quantize(self, mode): self._tracker.unlock() if mode == "int8": - if backend.standardize_dtype(self._embeddings.dtype) == "int8": - raise ValueError("`quantize` can only be done once per layer.") # Configure `self.inputs_quantizer` self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) # Quantize `self._embeddings` to int8 and compute corresponding diff --git a/keras/src/layers/core/embedding_test.py b/keras/src/layers/core/embedding_test.py index 6c0af85a3095..be2c21ad5008 100644 --- a/keras/src/layers/core/embedding_test.py +++ b/keras/src/layers/core/embedding_test.py @@ -314,7 +314,15 @@ def test_quantize_when_already_quantized(self): layer.build() layer.quantize("int8") with self.assertRaisesRegex( - ValueError, "`quantize` can only be done once per layer." + ValueError, "is already quantized with dtype_policy=" + ): + layer.quantize("int8") + + def test_quantize_when_already_quantized_using_dtype_argument(self): + layer = layers.Embedding(10, 16, dtype="int8_from_float32") + layer.build() + with self.assertRaisesRegex( + ValueError, "is already quantized with dtype_policy=" ): layer.quantize("int8") diff --git a/keras/src/layers/layer.py b/keras/src/layers/layer.py index eb251122b930..4f72ec4c0e93 100644 --- a/keras/src/layers/layer.py +++ b/keras/src/layers/layer.py @@ -1143,6 +1143,12 @@ def _check_quantize_args(self, mode, compute_dtype): f"Layer '{self.name}' (of type '{self.__class__.__name__}') " "is not built yet." ) + if getattr(self, "_is_quantized", False): + raise ValueError( + f"Layer '{self.name}' is already quantized with " + f"dtype_policy='{self.dtype_policy.name}'. " + f"Received: mode={mode}" + ) if mode not in dtype_policies.QUANTIZATION_MODES: raise ValueError( "Invalid quantization mode. " From ab017ea74896a3069f3386bf9ef5d7eee0f3d3a3 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Thu, 18 Apr 2024 11:18:36 -0700 Subject: [PATCH 025/101] Add eigh op. --- keras/src/backend/jax/linalg.py | 4 +++ keras/src/backend/numpy/linalg.py | 4 +++ keras/src/backend/tensorflow/linalg.py | 4 +++ keras/src/backend/torch/linalg.py | 4 +++ keras/src/ops/linalg.py | 46 ++++++++++++++++++++++++-- keras/src/ops/linalg_test.py | 37 +++++++++++++++++---- 6 files changed, 89 insertions(+), 10 deletions(-) diff --git a/keras/src/backend/jax/linalg.py b/keras/src/backend/jax/linalg.py index bffdeba67dcc..7984a734e9d0 100644 --- a/keras/src/backend/jax/linalg.py +++ b/keras/src/backend/jax/linalg.py @@ -27,6 +27,10 @@ def eig(x): return jnp.linalg.eig(x) +def eigh(x): + return jnp.linalg.eigh(x) + + def inv(a): return jnp.linalg.inv(a) diff --git a/keras/src/backend/numpy/linalg.py b/keras/src/backend/numpy/linalg.py index 32d0f762fd76..b5a6c7e9410e 100644 --- a/keras/src/backend/numpy/linalg.py +++ b/keras/src/backend/numpy/linalg.py @@ -18,6 +18,10 @@ def eig(a): return np.linalg.eig(a) +def eigh(a): + return np.linalg.eigh(a) + + def inv(a): return np.linalg.inv(a) diff --git a/keras/src/backend/tensorflow/linalg.py b/keras/src/backend/tensorflow/linalg.py index b50a8b44e875..15459f411331 100644 --- a/keras/src/backend/tensorflow/linalg.py +++ b/keras/src/backend/tensorflow/linalg.py @@ -22,6 +22,10 @@ def eig(a): return tf.linalg.eig(a) +def eigh(a): + return tf.linalg.eigh(a) + + def inv(a): return tf.linalg.inv(a) diff --git a/keras/src/backend/torch/linalg.py b/keras/src/backend/torch/linalg.py index a9158cc64a62..81041782a1b8 100644 --- a/keras/src/backend/torch/linalg.py +++ b/keras/src/backend/torch/linalg.py @@ -19,6 +19,10 @@ def eig(x): return torch.linalg.eig(x) +def eigh(x): + return torch.linalg.eigh(x) + + def inv(x): return torch.linalg.inv(x) diff --git a/keras/src/ops/linalg.py b/keras/src/ops/linalg.py index cd2ac98c7090..9f1bf7368a7d 100644 --- a/keras/src/ops/linalg.py +++ b/keras/src/ops/linalg.py @@ -92,8 +92,8 @@ def call(self, x): return _eig(x) def compute_output_spec(self, x): - _assert_2d(x) _assert_square(x) + _assert_2d(x) return ( KerasTensor(x.shape[:-1], x.dtype), KerasTensor(x.shape, x.dtype), @@ -110,7 +110,6 @@ def eig(x): Returns: A tuple of two tensors: a tensor of shape `(..., M)` containing eigenvalues and a tensor of shape `(..., M, M)` containing eigenvectors. - """ if any_symbolic_tensors((x,)): return Eig().symbolic_call(x) @@ -119,11 +118,52 @@ def eig(x): def _eig(x): x = backend.convert_to_tensor(x) - _assert_2d(x) _assert_square(x) + _assert_2d(x) return backend.linalg.eig(x) +class Eigh(Operation): + + def __init__(self): + super().__init__() + + def call(self, x): + return _eigh(x) + + def compute_output_spec(self, x): + _assert_square(x) + _assert_2d(x) + return ( + KerasTensor(x.shape[:-1], x.dtype), + KerasTensor(x.shape, x.dtype), + ) + + +@keras_export(["keras.ops.eigh", "keras.ops.linalg.eigh"]) +def eigh(x): + """Computes the eigenvalues and eigenvectors of a complex Hermitian. + + Args: + x: Input tensor of shape `(..., M, M)`. + + Returns: + A tuple of two tensors: a tensor of shape `(..., M)` containing + eigenvalues and a tensor of shape `(..., M, M)` containing eigenvectors. + + """ + if any_symbolic_tensors((x,)): + return Eigh().symbolic_call(x) + return _eigh(x) + + +def _eigh(x): + x = backend.convert_to_tensor(x) + _assert_square(x) + _assert_2d(x) + return backend.linalg.eigh(x) + + class Inv(Operation): def __init__(self): diff --git a/keras/src/ops/linalg_test.py b/keras/src/ops/linalg_test.py index 36ab32b202d9..4d62f6a3ce2b 100644 --- a/keras/src/ops/linalg_test.py +++ b/keras/src/ops/linalg_test.py @@ -42,13 +42,11 @@ def test_eig(self): self.assertEqual(w.shape, (None, 20)) self.assertEqual(v.shape, (None, 20, 20)) - x = KerasTensor([None, None, 20]) - with self.assertRaises(ValueError): - linalg.eig(x) - - x = KerasTensor([None, 20, 15]) - with self.assertRaises(ValueError): - linalg.eig(x) + def test_eigh(self): + x = KerasTensor([None, 20, 20]) + w, v = linalg.eigh(x) + self.assertEqual(w.shape, (None, 20)) + self.assertEqual(v.shape, (None, 20, 20)) def test_inv(self): x = KerasTensor([None, 20, 20]) @@ -208,6 +206,16 @@ def test_eig(self): with self.assertRaises(ValueError): linalg.eig(x) + def test_eigh(self): + x = KerasTensor([4, 3, 3]) + w, v = linalg.eigh(x) + self.assertEqual(w.shape, (4, 3)) + self.assertEqual(v.shape, (4, 3, 3)) + + x = KerasTensor([10, 20, 15]) + with self.assertRaises(ValueError): + linalg.eigh(x) + def test_inv(self): x = KerasTensor([4, 3, 3]) out = linalg.inv(x) @@ -346,6 +354,21 @@ def test_eig(self): x_reconstructed = (v * w[..., None, :]) @ v.transpose((0, 2, 1)) self.assertAllClose(x_reconstructed, x, atol=1e-4) + def test_eigh(self): + x = np.random.rand(2, 3, 3) + x = x @ x.transpose((0, 2, 1)) + if backend.backend() == "jax": + import jax + + if jax.default_backend() == "gpu": + # eigh not implemented for jax on gpu backend + with self.assertRaises(NotImplementedError): + linalg.eigh(x) + return + w, v = map(ops.convert_to_numpy, linalg.eigh(x)) + x_reconstructed = (v * w[..., None, :]) @ v.transpose((0, 2, 1)) + self.assertAllClose(x_reconstructed, x, atol=1e-4) + def test_inv(self): x = np.random.rand(4, 3, 3) x_inv = ops.convert_to_numpy(linalg.inv(x)) From 65d0e092236b60a6e9bf998cd9f844ffdee8a2d2 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Thu, 18 Apr 2024 12:04:47 -0700 Subject: [PATCH 026/101] Add keepdim in argmax/argmin. --- keras/src/backend/jax/numpy.py | 8 ++--- keras/src/backend/numpy/numpy.py | 8 ++--- keras/src/backend/tensorflow/numpy.py | 27 +++++++++++++--- keras/src/backend/torch/numpy.py | 8 ++--- keras/src/ops/numpy.py | 46 ++++++++++++++++----------- keras/src/ops/numpy_test.py | 20 ++++++++++++ 6 files changed, 83 insertions(+), 34 deletions(-) diff --git a/keras/src/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py index 9520b21ec281..36605efc811e 100644 --- a/keras/src/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -339,12 +339,12 @@ def arctanh(x): return jnp.arctanh(x) -def argmax(x, axis=None): - return jnp.argmax(x, axis=axis) +def argmax(x, axis=None, keepdims=False): + return jnp.argmax(x, axis=axis, keepdims=keepdims) -def argmin(x, axis=None): - return jnp.argmin(x, axis=axis) +def argmin(x, axis=None, keepdims=False): + return jnp.argmin(x, axis=axis, keepdims=keepdims) def argsort(x, axis=-1): diff --git a/keras/src/backend/numpy/numpy.py b/keras/src/backend/numpy/numpy.py index fa098a029978..0793cee0d03b 100644 --- a/keras/src/backend/numpy/numpy.py +++ b/keras/src/backend/numpy/numpy.py @@ -227,14 +227,14 @@ def arctanh(x): return np.arctanh(x) -def argmax(x, axis=None): +def argmax(x, axis=None, keepdims=False): axis = standardize_axis_for_numpy(axis) - return np.argmax(x, axis=axis).astype("int32") + return np.argmax(x, axis=axis, keepdims=keepdims).astype("int32") -def argmin(x, axis=None): +def argmin(x, axis=None, keepdims=False): axis = standardize_axis_for_numpy(axis) - return np.argmin(x, axis=axis).astype("int32") + return np.argmin(x, axis=axis, keepdims=keepdims).astype("int32") def argsort(x, axis=-1): diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index d1a12e53a780..d033c40536be 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -745,16 +745,35 @@ def arctanh(x): return tf.math.atanh(x) -def argmax(x, axis=None): +def _keepdims(x, y, axis): + if axis is None: + shape = [1 for _ in range(len(x.shape))] + else: + shape = [tf.shape[i] for i in range(len(x.shape))] + for axis in tree.flatten(axis): + shape[axis] = 1 + y = tf.reshape(y, shape) + return y + + +def argmax(x, axis=None, keepdims=False): + _x = x if axis is None: x = tf.reshape(x, [-1]) - return tf.cast(tf.argmax(x, axis=axis), dtype="int32") + y = tf.cast(tf.argmax(x, axis=axis), dtype="int32") + if keepdims: + y = _keepdims(_x, y, axis) + return y -def argmin(x, axis=None): +def argmin(x, axis=None, keepdims=False): + _x = x if axis is None: x = tf.reshape(x, [-1]) - return tf.cast(tf.argmin(x, axis=axis), dtype="int32") + y = tf.cast(tf.argmin(x, axis=axis), dtype="int32") + if keepdims: + y = _keepdims(_x, y, axis) + return y def argsort(x, axis=-1): diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 13f6a47c77ac..257159e46440 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -318,24 +318,24 @@ def arctanh(x): return torch.arctanh(x) -def argmax(x, axis=None): +def argmax(x, axis=None, keepdims=False): x = convert_to_tensor(x) # TODO: torch.argmax doesn't support bool if standardize_dtype(x.dtype) == "bool": x = cast(x, "uint8") - return cast(torch.argmax(x, dim=axis), dtype="int32") + return cast(torch.argmax(x, dim=axis, keepdim=keepdims), dtype="int32") -def argmin(x, axis=None): +def argmin(x, axis=None, keepdims=False): x = convert_to_tensor(x) # TODO: torch.argmin doesn't support bool if standardize_dtype(x.dtype) == "bool": x = cast(x, "uint8") - return cast(torch.argmin(x, dim=axis), dtype="int32") + return cast(torch.argmin(x, dim=axis, keepdim=keepdims), dtype="int32") def argsort(x, axis=-1): diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index 266eb3047024..f4c8ffb7f71d 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -352,7 +352,7 @@ def all(x, axis=None, keepdims=False): for the last to the first axis. keepdims: If `True`, axes which are reduced are left in the result as dimensions with size one. With this option, the result will - broadcast correctly against the input array. Defaults to`False`. + broadcast correctly against the input array. Defaults to `False`. Returns: The tensor containing the logical AND reduction over the `axis`. @@ -416,7 +416,7 @@ def any(x, axis=None, keepdims=False): for the last to the first axis. keepdims: If `True`, axes which are reduced are left in the result as dimensions with size one. With this option, the result will - broadcast correctly against the input array. Defaults to`False`. + broadcast correctly against the input array. Defaults to `False`. Returns: The tensor containing the logical OR reduction over the `axis`. @@ -963,14 +963,17 @@ def arctanh(x): class Argmax(Operation): - def __init__(self, axis=None): + def __init__(self, axis=None, keepdims=False): super().__init__() self.axis = axis + self.keepdims = keepdims def call(self, x): - return backend.numpy.argmax(x, axis=self.axis) + return backend.numpy.argmax(x, axis=self.axis, keepdims=self.keepdims) def compute_output_spec(self, x): + if self.keepdims: + return KerasTensor(x.shape, dtype="int32") if self.axis is None: return KerasTensor([], dtype="int32") return KerasTensor( @@ -979,13 +982,15 @@ def compute_output_spec(self, x): @keras_export(["keras.ops.argmax", "keras.ops.numpy.argmax"]) -def argmax(x, axis=None): +def argmax(x, axis=None, keepdims=False): """Returns the indices of the maximum values along an axis. Args: x: Input tensor. axis: By default, the index is into the flattened tensor, otherwise along the specified axis. + keepdims: If this is set to `True`, the axes which are reduced are left + in the result as dimensions with size one. Defaults to `False`. Returns: Tensor of indices. It has the same shape as `x`, with the dimension @@ -1004,19 +1009,22 @@ def argmax(x, axis=None): array([2, 2], dtype=int32) """ if any_symbolic_tensors((x,)): - return Argmax(axis=axis).symbolic_call(x) - return backend.numpy.argmax(x, axis=axis) + return Argmax(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.argmax(x, axis=axis, keepdims=keepdims) class Argmin(Operation): - def __init__(self, axis=None): + def __init__(self, axis=None, keepdims=False): super().__init__() self.axis = axis + self.keepdims = keepdims def call(self, x): - return backend.numpy.argmin(x, axis=self.axis) + return backend.numpy.argmin(x, axis=self.axis, keepdims=self.keepdims) def compute_output_spec(self, x): + if self.keepdims: + return KerasTensor(x.shape, dtype="int32") if self.axis is None: return KerasTensor([], dtype="int32") return KerasTensor( @@ -1025,13 +1033,15 @@ def compute_output_spec(self, x): @keras_export(["keras.ops.argmin", "keras.ops.numpy.argmin"]) -def argmin(x, axis=None): +def argmin(x, axis=None, keepdims=False): """Returns the indices of the minium values along an axis. Args: x: Input tensor. axis: By default, the index is into the flattened tensor, otherwise along the specified axis. + keepdims: If this is set to `True`, the axes which are reduced are left + in the result as dimensions with size one. Defaults to `False`. Returns: Tensor of indices. It has the same shape as `x`, with the dimension @@ -1050,8 +1060,8 @@ def argmin(x, axis=None): array([0, 0], dtype=int32) """ if any_symbolic_tensors((x,)): - return Argmin(axis=axis).symbolic_call(x) - return backend.numpy.argmin(x, axis=axis) + return Argmin(axis=axis, keepdims=keepdims).symbolic_call(x) + return backend.numpy.argmin(x, axis=axis, keepdims=keepdims) class Argsort(Operation): @@ -3165,7 +3175,7 @@ def linspace( num: Number of samples to generate. Defaults to `50`. Must be non-negative. endpoint: If `True`, `stop` is the last sample. Otherwise, it is - not included. Defaults to`True`. + not included. Defaults to `True`. retstep: If `True`, return `(samples, step)`, where `step` is the spacing between samples. dtype: The type of the output tensor. @@ -3502,7 +3512,7 @@ def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0): are returned. num: Number of samples to generate. Defaults to `50`. endpoint: If `True`, `stop` is the last sample. Otherwise, it is not - included. Defaults to`True`. + included. Defaults to `True`. base: The base of the log space. Defaults to `10`. dtype: The type of the output tensor. axis: The axis in the result to store the samples. Relevant only @@ -3605,8 +3615,8 @@ def max(x, axis=None, keepdims=False, initial=None): axis: Axis or axes along which to operate. By default, flattened input is used. keepdims: If this is set to `True`, the axes which are reduced are left - in the result as dimensions with size one. Defaults to`False`. - initial: The minimum value of an output element. Defaults to`None`. + in the result as dimensions with size one. Defaults to `False`. + initial: The minimum value of an output element. Defaults to `None`. Returns: Maximum of `x`. @@ -3798,8 +3808,8 @@ def min(x, axis=None, keepdims=False, initial=None): axis: Axis or axes along which to operate. By default, flattened input is used. keepdims: If this is set to `True`, the axes which are reduced are left - in the result as dimensions with size one. Defaults to`False`. - initial: The maximum value of an output element. Defaults to`None`. + in the result as dimensions with size one. Defaults to `False`. + initial: The maximum value of an output element. Defaults to `None`. Returns: Minimum of `x`. diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index 6cea9ef86066..f08fb7d1cecf 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -1004,16 +1004,20 @@ def test_arctanh(self): def test_argmax(self): x = KerasTensor((None, 3)) self.assertEqual(knp.argmax(x).shape, ()) + self.assertEqual(knp.argmax(x, keepdims=True).shape, (None, 3)) x = KerasTensor((None, 3, 3)) self.assertEqual(knp.argmax(x, axis=1).shape, (None, 3)) + self.assertEqual(knp.argmax(x, keepdims=True).shape, (None, 3, 3)) def test_argmin(self): x = KerasTensor((None, 3)) self.assertEqual(knp.argmin(x).shape, ()) + self.assertEqual(knp.argmin(x, keepdims=True).shape, (None, 3)) x = KerasTensor((None, 3, 3)) self.assertEqual(knp.argmin(x, axis=1).shape, (None, 3)) + self.assertEqual(knp.argmin(x, keepdims=True).shape, (None, 3, 3)) def test_argsort(self): x = KerasTensor((None, 3)) @@ -1567,10 +1571,12 @@ def test_arctanh(self): def test_argmax(self): x = KerasTensor((2, 3)) self.assertEqual(knp.argmax(x).shape, ()) + self.assertEqual(knp.argmax(x, keepdims=True).shape, (2, 3)) def test_argmin(self): x = KerasTensor((2, 3)) self.assertEqual(knp.argmin(x).shape, ()) + self.assertEqual(knp.argmin(x, keepdims=True).shape, (2, 3)) def test_argsort(self): x = KerasTensor((2, 3)) @@ -3044,17 +3050,31 @@ def test_argmax(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.argmax(x), np.argmax(x)) self.assertAllClose(knp.argmax(x, axis=1), np.argmax(x, axis=1)) + self.assertAllClose( + knp.argmax(x, keepdims=True), np.argmax(x, keepdims=True) + ) self.assertAllClose(knp.Argmax()(x), np.argmax(x)) self.assertAllClose(knp.Argmax(axis=1)(x), np.argmax(x, axis=1)) + self.assertAllClose(knp.Argmax()(x), np.argmax(x)) + self.assertAllClose( + knp.Argmax(keepdims=True)(x), np.argmax(x, keepdims=True) + ) + def test_argmin(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.argmin(x), np.argmin(x)) self.assertAllClose(knp.argmin(x, axis=1), np.argmin(x, axis=1)) + self.assertAllClose( + knp.argmin(x, keepdims=True), np.argmin(x, keepdims=True) + ) self.assertAllClose(knp.Argmin()(x), np.argmin(x)) self.assertAllClose(knp.Argmin(axis=1)(x), np.argmin(x, axis=1)) + self.assertAllClose( + knp.Argmin(keepdims=True)(x), np.argmin(x, keepdims=True) + ) def test_argsort(self): x = np.array([[1, 2, 3], [4, 5, 6]]) From 1135431db4513465f4bf6e65bd5111f68cd6b718 Mon Sep 17 00:00:00 2001 From: Uwe Schmidt Date: Thu, 18 Apr 2024 21:05:46 +0200 Subject: [PATCH 027/101] Fix small bug in model.save_weights (#19545) --- keras/src/models/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/models/model.py b/keras/src/models/model.py index 5678f34c6828..ca12459354f8 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -315,7 +315,7 @@ def save_weights(self, filepath, overwrite=True): at the target location, or instead ask the user via an interactive prompt. """ - return saving_api.save_weights(self, filepath, overwrite=True) + return saving_api.save_weights(self, filepath, overwrite=overwrite) @traceback_utils.filter_traceback def load_weights(self, filepath, skip_mismatch=False, **kwargs): From c256741e5760bc61367515d743c14a8733b6f15b Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Thu, 18 Apr 2024 12:53:07 -0700 Subject: [PATCH 028/101] Update public APIs. --- keras/api/_tf_keras/keras/ops/__init__.py | 1 + keras/api/_tf_keras/keras/ops/linalg/__init__.py | 1 + keras/api/ops/__init__.py | 1 + keras/api/ops/linalg/__init__.py | 1 + 4 files changed, 4 insertions(+) diff --git a/keras/api/_tf_keras/keras/ops/__init__.py b/keras/api/_tf_keras/keras/ops/__init__.py index 1253650e9bc0..f62b9ac82234 100644 --- a/keras/api/_tf_keras/keras/ops/__init__.py +++ b/keras/api/_tf_keras/keras/ops/__init__.py @@ -27,6 +27,7 @@ from keras.src.ops.linalg import cholesky from keras.src.ops.linalg import det from keras.src.ops.linalg import eig +from keras.src.ops.linalg import eigh from keras.src.ops.linalg import inv from keras.src.ops.linalg import lu_factor from keras.src.ops.linalg import norm diff --git a/keras/api/_tf_keras/keras/ops/linalg/__init__.py b/keras/api/_tf_keras/keras/ops/linalg/__init__.py index da392d6c2490..58120054320a 100644 --- a/keras/api/_tf_keras/keras/ops/linalg/__init__.py +++ b/keras/api/_tf_keras/keras/ops/linalg/__init__.py @@ -7,6 +7,7 @@ from keras.src.ops.linalg import cholesky from keras.src.ops.linalg import det from keras.src.ops.linalg import eig +from keras.src.ops.linalg import eigh from keras.src.ops.linalg import inv from keras.src.ops.linalg import lu_factor from keras.src.ops.linalg import norm diff --git a/keras/api/ops/__init__.py b/keras/api/ops/__init__.py index 1253650e9bc0..f62b9ac82234 100644 --- a/keras/api/ops/__init__.py +++ b/keras/api/ops/__init__.py @@ -27,6 +27,7 @@ from keras.src.ops.linalg import cholesky from keras.src.ops.linalg import det from keras.src.ops.linalg import eig +from keras.src.ops.linalg import eigh from keras.src.ops.linalg import inv from keras.src.ops.linalg import lu_factor from keras.src.ops.linalg import norm diff --git a/keras/api/ops/linalg/__init__.py b/keras/api/ops/linalg/__init__.py index da392d6c2490..58120054320a 100644 --- a/keras/api/ops/linalg/__init__.py +++ b/keras/api/ops/linalg/__init__.py @@ -7,6 +7,7 @@ from keras.src.ops.linalg import cholesky from keras.src.ops.linalg import det from keras.src.ops.linalg import eig +from keras.src.ops.linalg import eigh from keras.src.ops.linalg import inv from keras.src.ops.linalg import lu_factor from keras.src.ops.linalg import norm From 2f344347fd63a0f0ec1c7c736bdf161382c20f67 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Thu, 18 Apr 2024 14:48:20 -0700 Subject: [PATCH 029/101] eigh should work on JAX GPU --- keras/src/ops/linalg_test.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/keras/src/ops/linalg_test.py b/keras/src/ops/linalg_test.py index 4d62f6a3ce2b..e1f0decf64b0 100644 --- a/keras/src/ops/linalg_test.py +++ b/keras/src/ops/linalg_test.py @@ -357,14 +357,6 @@ def test_eig(self): def test_eigh(self): x = np.random.rand(2, 3, 3) x = x @ x.transpose((0, 2, 1)) - if backend.backend() == "jax": - import jax - - if jax.default_backend() == "gpu": - # eigh not implemented for jax on gpu backend - with self.assertRaises(NotImplementedError): - linalg.eigh(x) - return w, v = map(ops.convert_to_numpy, linalg.eigh(x)) x_reconstructed = (v * w[..., None, :]) @ v.transpose((0, 2, 1)) self.assertAllClose(x_reconstructed, x, atol=1e-4) From da9af61032d5eaf7b5c64bcc7b215bff42dbe19a Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Thu, 18 Apr 2024 20:54:26 -0500 Subject: [PATCH 030/101] Copy init to keras/__init__.py (#19551) --- api_gen.py | 38 ++---------------------- keras/__init__.py | 76 ++++++++++++++++++++++++++++++----------------- 2 files changed, 51 insertions(+), 63 deletions(-) diff --git a/api_gen.py b/api_gen.py index 28fac8fa4f10..c3f1b9c80b7a 100644 --- a/api_gen.py +++ b/api_gen.py @@ -47,6 +47,8 @@ def create_legacy_directory(package_dir): ) with open(os.path.join(api_dir, "__init__.py"), "w") as f: f.write(init_file) + with open(os.path.join(package_dir, "__init__.py"), "w") as f: + f.write(init_file) # Remove the import of `_tf_keras` in `keras/_tf_keras/keras/__init__.py` init_file = init_file.replace("from keras.api import _tf_keras\n", "\n") with open(os.path.join(tf_keras_dirpath, "__init__.py"), "w") as f: @@ -126,40 +128,6 @@ def export_version_string(api_init_fname): f.write(contents) -def update_package_init(init_fname): - contents = """ -# Import everything from /api/ into keras. -from keras.api import * # noqa: F403 -from keras.api import __version__ # Import * ignores names start with "_". - -import os - -# Add everything in /api/ to the module search path. -__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405 - -# Don't pollute namespace. -del os - -# Never autocomplete `.src` or `.api` on an imported keras object. -def __dir__(): - keys = dict.fromkeys((globals().keys())) - keys.pop("src") - keys.pop("api") - return list(keys) - - -# Don't import `.src` or `.api` during `from keras import *`. -__all__ = [ - name - for name in globals().keys() - if not (name.startswith("_") or name in ("src", "api")) -]""" - with open(init_fname) as f: - init_contents = f.read() - with open(init_fname, "w") as f: - f.write(init_contents.replace("\nfrom keras import api", contents)) - - def build(): # Backup the `keras/__init__.py` and restore it on error in api gen. root_path = os.path.dirname(os.path.abspath(__file__)) @@ -181,8 +149,6 @@ def build(): namex.generate_api_files( "keras", code_directory="src", target_directory="api" ) - # Creates `keras/__init__.py` importing from `keras/api` - update_package_init(build_init_fname) # Add __version__ to keras package export_version_string(build_api_init_fname) # Creates `_tf_keras` with full keras API diff --git a/keras/__init__.py b/keras/__init__.py index 6276b51e1f85..1750a42e8699 100644 --- a/keras/__init__.py +++ b/keras/__init__.py @@ -4,30 +4,52 @@ since your modifications would be overwritten. """ -import os - -# Import everything from /api/ into keras. -from keras.api import * # noqa: F403 -from keras.api import __version__ # Import * ignores names start with "_". - -# Add everything in /api/ to the module search path. -__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405 - -# Don't pollute namespace. -del os - - -# Never autocomplete `.src` or `.api` on an imported keras object. -def __dir__(): - keys = dict.fromkeys((globals().keys())) - keys.pop("src") - keys.pop("api") - return list(keys) - - -# Don't import `.src` or `.api` during `from keras import *`. -__all__ = [ - name - for name in globals().keys() - if not (name.startswith("_") or name in ("src", "api")) -] +from keras.api import _tf_keras +from keras.api import activations +from keras.api import applications +from keras.api import backend +from keras.api import callbacks +from keras.api import config +from keras.api import constraints +from keras.api import datasets +from keras.api import distribution +from keras.api import dtype_policies +from keras.api import export +from keras.api import initializers +from keras.api import layers +from keras.api import legacy +from keras.api import losses +from keras.api import metrics +from keras.api import mixed_precision +from keras.api import models +from keras.api import ops +from keras.api import optimizers +from keras.api import preprocessing +from keras.api import quantizers +from keras.api import random +from keras.api import regularizers +from keras.api import saving +from keras.api import tree +from keras.api import utils +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.backend.common.stateless_scope import StatelessScope +from keras.src.backend.exports import Variable +from keras.src.backend.exports import device +from keras.src.backend.exports import name_scope +from keras.src.dtype_policies.dtype_policy import DTypePolicy +from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.initializers.initializer import Initializer +from keras.src.layers.core.input_layer import Input +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.losses.loss import Loss +from keras.src.metrics.metric import Metric +from keras.src.models.model import Model +from keras.src.models.sequential import Sequential +from keras.src.ops.function import Function +from keras.src.ops.operation import Operation +from keras.src.optimizers.optimizer import Optimizer +from keras.src.quantizers.quantizers import Quantizer +from keras.src.regularizers.regularizers import Regularizer +from keras.src.version import __version__ +from keras.src.version import version From 04891e89da87c2a433beb12ff7dad59403c71671 Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Thu, 18 Apr 2024 21:38:38 -0500 Subject: [PATCH 031/101] Revert "Copy init to keras/__init__.py (#19551)" (#19552) This reverts commit da9af61032d5eaf7b5c64bcc7b215bff42dbe19a. --- api_gen.py | 38 ++++++++++++++++++++++-- keras/__init__.py | 76 +++++++++++++++++------------------------------ 2 files changed, 63 insertions(+), 51 deletions(-) diff --git a/api_gen.py b/api_gen.py index c3f1b9c80b7a..28fac8fa4f10 100644 --- a/api_gen.py +++ b/api_gen.py @@ -47,8 +47,6 @@ def create_legacy_directory(package_dir): ) with open(os.path.join(api_dir, "__init__.py"), "w") as f: f.write(init_file) - with open(os.path.join(package_dir, "__init__.py"), "w") as f: - f.write(init_file) # Remove the import of `_tf_keras` in `keras/_tf_keras/keras/__init__.py` init_file = init_file.replace("from keras.api import _tf_keras\n", "\n") with open(os.path.join(tf_keras_dirpath, "__init__.py"), "w") as f: @@ -128,6 +126,40 @@ def export_version_string(api_init_fname): f.write(contents) +def update_package_init(init_fname): + contents = """ +# Import everything from /api/ into keras. +from keras.api import * # noqa: F403 +from keras.api import __version__ # Import * ignores names start with "_". + +import os + +# Add everything in /api/ to the module search path. +__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405 + +# Don't pollute namespace. +del os + +# Never autocomplete `.src` or `.api` on an imported keras object. +def __dir__(): + keys = dict.fromkeys((globals().keys())) + keys.pop("src") + keys.pop("api") + return list(keys) + + +# Don't import `.src` or `.api` during `from keras import *`. +__all__ = [ + name + for name in globals().keys() + if not (name.startswith("_") or name in ("src", "api")) +]""" + with open(init_fname) as f: + init_contents = f.read() + with open(init_fname, "w") as f: + f.write(init_contents.replace("\nfrom keras import api", contents)) + + def build(): # Backup the `keras/__init__.py` and restore it on error in api gen. root_path = os.path.dirname(os.path.abspath(__file__)) @@ -149,6 +181,8 @@ def build(): namex.generate_api_files( "keras", code_directory="src", target_directory="api" ) + # Creates `keras/__init__.py` importing from `keras/api` + update_package_init(build_init_fname) # Add __version__ to keras package export_version_string(build_api_init_fname) # Creates `_tf_keras` with full keras API diff --git a/keras/__init__.py b/keras/__init__.py index 1750a42e8699..6276b51e1f85 100644 --- a/keras/__init__.py +++ b/keras/__init__.py @@ -4,52 +4,30 @@ since your modifications would be overwritten. """ -from keras.api import _tf_keras -from keras.api import activations -from keras.api import applications -from keras.api import backend -from keras.api import callbacks -from keras.api import config -from keras.api import constraints -from keras.api import datasets -from keras.api import distribution -from keras.api import dtype_policies -from keras.api import export -from keras.api import initializers -from keras.api import layers -from keras.api import legacy -from keras.api import losses -from keras.api import metrics -from keras.api import mixed_precision -from keras.api import models -from keras.api import ops -from keras.api import optimizers -from keras.api import preprocessing -from keras.api import quantizers -from keras.api import random -from keras.api import regularizers -from keras.api import saving -from keras.api import tree -from keras.api import utils -from keras.src.backend.common.keras_tensor import KerasTensor -from keras.src.backend.common.stateless_scope import StatelessScope -from keras.src.backend.exports import Variable -from keras.src.backend.exports import device -from keras.src.backend.exports import name_scope -from keras.src.dtype_policies.dtype_policy import DTypePolicy -from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy -from keras.src.initializers.initializer import Initializer -from keras.src.layers.core.input_layer import Input -from keras.src.layers.input_spec import InputSpec -from keras.src.layers.layer import Layer -from keras.src.losses.loss import Loss -from keras.src.metrics.metric import Metric -from keras.src.models.model import Model -from keras.src.models.sequential import Sequential -from keras.src.ops.function import Function -from keras.src.ops.operation import Operation -from keras.src.optimizers.optimizer import Optimizer -from keras.src.quantizers.quantizers import Quantizer -from keras.src.regularizers.regularizers import Regularizer -from keras.src.version import __version__ -from keras.src.version import version +import os + +# Import everything from /api/ into keras. +from keras.api import * # noqa: F403 +from keras.api import __version__ # Import * ignores names start with "_". + +# Add everything in /api/ to the module search path. +__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405 + +# Don't pollute namespace. +del os + + +# Never autocomplete `.src` or `.api` on an imported keras object. +def __dir__(): + keys = dict.fromkeys((globals().keys())) + keys.pop("src") + keys.pop("api") + return list(keys) + + +# Don't import `.src` or `.api` during `from keras import *`. +__all__ = [ + name + for name in globals().keys() + if not (name.startswith("_") or name in ("src", "api")) +] From 59213cec6a8853f9cdb16a58e88e9ec3e4944699 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Fri, 19 Apr 2024 10:52:35 -0700 Subject: [PATCH 032/101] sum-reduce inlined losses --- keras/src/trainers/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/trainers/trainer.py b/keras/src/trainers/trainer.py index 8475a72b0112..f9bb1a7388e2 100644 --- a/keras/src/trainers/trainer.py +++ b/keras/src/trainers/trainer.py @@ -317,7 +317,7 @@ def metrics(self): if loss is not None: losses.append(loss) for loss in self.losses: - losses.append(ops.cast(loss, dtype=backend.floatx())) + losses.append(ops.sum(ops.cast(loss, dtype=backend.floatx()))) if backend.backend() != "jax" and len(losses) == 0: raise ValueError( "No loss to compute. Provide a `loss` argument in `compile()`." From b560406ce7e9dca3434be947ab697229a8087e3e Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Sat, 20 Apr 2024 02:14:36 +0800 Subject: [PATCH 033/101] Remove the dependency on `tensorflow.experimental.numpy` and support negative indices for `take` and `take_along_axis` (#19556) * Remove `tfnp` * Update numpy api * Improve test coverage * Improve test coverage * Fix `Tri` and `Eye` and increase test converage * Update `round` test * Fix `jnp.round` * Fix `diag` bug for iou_metrics --- keras/src/backend/jax/numpy.py | 13 +- keras/src/backend/tensorflow/linalg.py | 5 +- keras/src/backend/tensorflow/math.py | 5 +- keras/src/backend/tensorflow/numpy.py | 462 ++++++++++++++++++++----- keras/src/backend/tensorflow/random.py | 7 +- keras/src/backend/torch/numpy.py | 37 +- keras/src/ops/numpy.py | 28 +- keras/src/ops/numpy_test.py | 142 +++++++- 8 files changed, 566 insertions(+), 133 deletions(-) diff --git a/keras/src/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py index 36605efc811e..73b569c8bf46 100644 --- a/keras/src/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -974,7 +974,18 @@ def tensordot(x1, x2, axes=2): @sparse.elementwise_unary(linear=False) def round(x, decimals=0): x = convert_to_tensor(x) - return jnp.round(x, decimals=decimals) + + # jnp.round doesn't support decimals < 0 for integers + x_dtype = standardize_dtype(x.dtype) + if "int" in x_dtype and decimals < 0: + factor = cast(math.pow(10, decimals), config.floatx()) + x = cast(x, config.floatx()) + x = jnp.multiply(x, factor) + x = jnp.round(x) + x = jnp.divide(x, factor) + return cast(x, x_dtype) + else: + return jnp.round(x, decimals=decimals) def tile(x, repeats): diff --git a/keras/src/backend/tensorflow/linalg.py b/keras/src/backend/tensorflow/linalg.py index 15459f411331..e76b00ea3fdf 100644 --- a/keras/src/backend/tensorflow/linalg.py +++ b/keras/src/backend/tensorflow/linalg.py @@ -1,5 +1,4 @@ import tensorflow as tf -from tensorflow.experimental import numpy as tfnp from keras.src.backend import config from keras.src.backend import standardize_dtype @@ -36,6 +35,8 @@ def lu_factor(a): def norm(x, ord=None, axis=None, keepdims=False): + from keras.src.backend.tensorflow.numpy import moveaxis + x = convert_to_tensor(x) x_shape = x.shape ndim = x_shape.rank @@ -129,7 +130,7 @@ def norm(x, ord=None, axis=None, keepdims=False): keepdims=keepdims, ) elif ord in ("nuc", 2, -2): - x = tfnp.moveaxis(x, axis, (-2, -1)) + x = moveaxis(x, axis, (-2, -1)) if ord == -2: x = tf.math.reduce_min( tf.linalg.svd(x, compute_uv=False), axis=-1 diff --git a/keras/src/backend/tensorflow/math.py b/keras/src/backend/tensorflow/math.py index ffc7f99f0da0..f9071b3c2a06 100644 --- a/keras/src/backend/tensorflow/math.py +++ b/keras/src/backend/tensorflow/math.py @@ -1,5 +1,4 @@ import tensorflow as tf -from tensorflow.experimental import numpy as tfnp from keras.src.backend import config from keras.src.backend import standardize_dtype @@ -260,6 +259,8 @@ def solve(a, b): def norm(x, ord=None, axis=None, keepdims=False): + from keras.src.backend.tensorflow.numpy import moveaxis + x = convert_to_tensor(x) x_shape = x.shape ndim = x_shape.rank @@ -328,7 +329,7 @@ def norm(x, ord=None, axis=None, keepdims=False): keepdims=keepdims, ) else: - x = tfnp.moveaxis(x, axis, (-2, -1)) + x = moveaxis(x, axis, (-2, -1)) if ord == -2: x = tf.math.reduce_min( tf.linalg.svd(x, compute_uv=False), axis=-1 diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index d033c40536be..3cd7c0441380 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -7,7 +7,6 @@ import numpy as np import tensorflow as tf -from tensorflow.experimental import numpy as tfnp from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops from keras.src import tree @@ -575,12 +574,18 @@ def mean(x, axis=None, keepdims=False): def max(x, axis=None, keepdims=False, initial=None): + x = convert_to_tensor(x) + # The TensorFlow numpy API implementation doesn't support `initial` so we # handle it manually here. if initial is not None: - return tf.math.maximum( - tfnp.max(x, axis=axis, keepdims=keepdims), initial - ) + if standardize_dtype(x.dtype) == "bool": + x = tf.reduce_any(x, axis=axis, keepdims=keepdims) + x = tf.math.maximum(tf.cast(x, "int32"), tf.cast(initial, "int32")) + return tf.cast(x, "bool") + else: + x = tf.reduce_max(x, axis=axis, keepdims=keepdims) + return tf.math.maximum(x, initial) # TensorFlow returns -inf by default for an empty list, but for consistency # with other backends and the numpy API we want to throw in this case. @@ -592,7 +597,10 @@ def max(x, axis=None, keepdims=False, initial=None): message="Cannot compute the max of an empty tensor.", ) - return tfnp.max(x, axis=axis, keepdims=keepdims) + if standardize_dtype(x.dtype) == "bool": + return tf.reduce_any(x, axis=axis, keepdims=keepdims) + else: + return tf.reduce_max(x, axis=axis, keepdims=keepdims) def ones(shape, dtype=None): @@ -650,8 +658,6 @@ def append(x1, x2, axis=None): def arange(start, stop=None, step=1, dtype=None): - # tfnp.arange has trouble with dynamic Tensors in compiled function. - # tf.range does not. if dtype is None: dtypes_to_resolve = [ getattr(start, "dtype", type(start)), @@ -797,27 +803,39 @@ def array(x, dtype=None): def average(x, axis=None, weights=None): x = convert_to_tensor(x) - axis = to_tuple_or_list(axis) - dtypes_to_resolve = [x.dtype, float] - if weights is not None: + + if weights is None: # Treat all weights as 1 + dtype = dtypes.result_type(x.dtype, float) + x = tf.cast(x, dtype) + avg = tf.reduce_mean(x, axis=axis) + else: weights = convert_to_tensor(weights) - dtypes_to_resolve.append(weights.dtype) - result_dtype = dtypes.result_type(*dtypes_to_resolve) - compute_dtype = result_dtype - # TODO: since tfnp.average incorrectly promote bfloat16 to float64, we - # need to cast to float32 first and then cast back to bfloat16 - if compute_dtype == "bfloat16": - compute_dtype = "float32" - x = tf.cast(x, compute_dtype) - if weights is not None: - weights = tf.cast(weights, compute_dtype) - if axis is None: - x = tfnp.average(x, weights=weights, axis=None) - return tf.cast(x, result_dtype) - for a in axis: - # `tfnp.average` does not handle multiple axes. - x = tfnp.average(x, weights=weights, axis=a) - return tf.cast(x, result_dtype) + dtype = dtypes.result_type(x.dtype, weights.dtype, float) + x = tf.cast(x, dtype) + weights = tf.cast(weights, dtype) + + def _rank_equal_case(): + weights_sum = tf.reduce_sum(weights, axis=axis) + return tf.reduce_sum(x * weights, axis=axis) / weights_sum + + def _rank_not_equal_case(): + weights_sum = tf.reduce_sum(weights) + axes = tf.convert_to_tensor([[axis], [0]]) + return tf.tensordot(x, weights, axes) / weights_sum + + if axis is None: + avg = _rank_equal_case() + else: + # We condition on rank rather than shape equality, because if we do + # the latter, when the shapes are partially unknown but the ranks + # are known and different, np_utils.cond will run shape checking on + # the true branch, which will raise a shape-checking error. + avg = tf.cond( + tf.equal(tf.rank(x), tf.rank(weights)), + _rank_equal_case, + _rank_not_equal_case, + ) + return avg def broadcast_to(x, shape): @@ -876,7 +894,8 @@ def conj(x): @sparse.elementwise_unary def copy(x): - return tfnp.copy(x) + x = convert_to_tensor(x) + return tf.identity(x) @sparse.densifying_unary(1) @@ -908,17 +927,52 @@ def count_nonzero(x, axis=None): def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) + if axis is not None: + axisa = axis + axisb = axis + axisc = axis + x1 = moveaxis(x1, axisa, -1) + x2 = moveaxis(x2, axisb, -1) + dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) - return tfnp.cross( - x1, - x2, - axisa=axisa, - axisb=axisb, - axisc=axisc, - axis=axis, + + def maybe_pad_zeros(x, size_of_last_dim): + def pad_zeros(x): + return tf.pad( + x, + tf.concat( + [ + tf.zeros([tf.rank(x) - 1, 2], "int32"), + tf.constant([[0, 1]], "int32"), + ], + axis=0, + ), + ) + + return tf.cond( + tf.equal(size_of_last_dim, 2), lambda: pad_zeros(x), lambda: x + ) + + x1_dim = tf.shape(x1)[-1] + x2_dim = tf.shape(x2)[-1] + x1 = maybe_pad_zeros(x1, x1_dim) + x2 = maybe_pad_zeros(x2, x2_dim) + + # Broadcast each other + shape = tf.shape(x1) + shape = tf.broadcast_dynamic_shape(shape, tf.shape(x2)) + x1 = tf.broadcast_to(x1, shape) + x2 = tf.broadcast_to(x2, shape) + + c = tf.linalg.cross(x1, x2) + c = tf.cond( + (x1_dim == 2) & (x2_dim == 2), + lambda: c[..., 2], + lambda: moveaxis(c, -1, axisc), ) + return c def cumprod(x, axis=None, dtype=None): @@ -944,20 +998,70 @@ def cumsum(x, axis=None, dtype=None): def diag(x, k=0): - return tfnp.diag(x, k=k) + x = convert_to_tensor(x) + if len(x.shape) == 1: + return tf.cond( + tf.equal(tf.size(x), 0), + lambda: tf.zeros([builtins.abs(k), builtins.abs(k)], dtype=x.dtype), + lambda: tf.linalg.diag(x, k=k), + ) + elif len(x.shape) == 2: + return diagonal(x, offset=k) + else: + raise ValueError(f"`x` must be 1d or 2d. Received: x.shape={x.shape}") def diagonal(x, offset=0, axis1=0, axis2=1): - return tfnp.diagonal( - x, - offset=offset, - axis1=axis1, - axis2=axis2, + x = convert_to_tensor(x) + x_rank = x.ndim + if ( + offset == 0 + and (axis1 == x_rank - 2 or axis1 == -2) + and (axis2 == x_rank - 1 or axis2 == -1) + ): + return tf.linalg.diag_part(x) + + x = moveaxis(x, (axis1, axis2), (-2, -1)) + x_shape = tf.shape(x) + + def _zeros(): + return tf.zeros(tf.concat([x_shape[:-1], [0]], 0), dtype=x.dtype) + + x, offset = tf.cond( + tf.logical_or( + tf.less_equal(offset, -1 * x_shape[-2]), + tf.greater_equal(offset, x_shape[-1]), + ), + _zeros, + lambda: (x, offset), ) + return tf.linalg.diag_part(x, k=offset) def diff(a, n=1, axis=-1): - return tfnp.diff(a, n=n, axis=axis) + a = convert_to_tensor(a) + if n == 0: + return a + elif n < 0: + raise ValueError(f"Order `n` must be non-negative. Received n={n}") + elif a.ndim == 0: + raise ValueError( + "`diff` requires input that is at least one dimensional. " + f"Received: a={a}" + ) + axis = canonicalize_axis(axis, a.ndim) + slice1 = [slice(None)] * a.ndim + slice2 = [slice(None)] * a.ndim + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1_tuple = tuple(slice1) + slice2_tuple = tuple(slice2) + for _ in range(n): + if standardize_dtype(a.dtype) == "bool": + a = tf.not_equal(a[slice1_tuple], a[slice2_tuple]) + else: + a = tf.subtract(a[slice1_tuple], a[slice2_tuple]) + return a def digitize(x, bins): @@ -1152,7 +1256,6 @@ def isclose(x1, x2): @sparse.densifying_unary(True) def isfinite(x): - # `tfnp.isfinite` requires `enable_numpy_behavior`, so we reimplement it. x = convert_to_tensor(x) dtype_as_dtype = tf.as_dtype(x.dtype) if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: @@ -1161,7 +1264,6 @@ def isfinite(x): def isinf(x): - # `tfnp.isinf` requires `enable_numpy_behavior`, so we reimplement it. x = convert_to_tensor(x) dtype_as_dtype = tf.as_dtype(x.dtype) if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: @@ -1170,7 +1272,6 @@ def isinf(x): def isnan(x): - # `tfnp.isnan` requires `enable_numpy_behavior`, so we reimplement it. x = convert_to_tensor(x) dtype_as_dtype = tf.as_dtype(x.dtype) if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: @@ -1206,15 +1307,37 @@ def linspace( float, ] dtype = dtypes.result_type(*dtypes_to_resolve) - return tfnp.linspace( - start, - stop, - num=num, - endpoint=endpoint, - retstep=retstep, - dtype=dtype, - axis=axis, - ) + else: + dtype = standardize_dtype(dtype) + start = convert_to_tensor(start, dtype=dtype) + stop = convert_to_tensor(stop, dtype=dtype) + if num < 0: + raise ValueError( + f"`num` must be a non-negative integer. Received: num={num}" + ) + step = tf.convert_to_tensor(np.nan) + if endpoint: + result = tf.linspace(start, stop, num, axis=axis) + if num > 1: + step = (stop - start) / (num - 1) + else: + # tf.linspace doesn't support endpoint=False, so we manually handle it + if num > 0: + step = (stop - start) / num + if num > 1: + new_stop = tf.cast(stop, step.dtype) - step + start = tf.cast(start, new_stop.dtype) + result = tf.linspace(start, new_stop, num, axis=axis) + else: + result = tf.linspace(start, stop, num, axis=axis) + if dtype is not None: + if "int" in dtype: + result = tf.floor(result) + result = tf.cast(result, dtype) + if retstep: + return (result, step) + else: + return result @sparse.densifying_unary(-np.inf) @@ -1271,9 +1394,6 @@ def logaddexp(x1, x2): dtype = dtypes.result_type(x1.dtype, x2.dtype, float) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) - - # Below is the same implementation as tfnp.logaddexp using all native - # ops to prevent incorrect promotion of bfloat16. delta = x1 - x2 return tf.where( tf.math.is_nan(delta), @@ -1300,24 +1420,15 @@ def logical_or(x1, x2): def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0): - if dtype is None: - dtypes_to_resolve = [ - getattr(start, "dtype", type(start)), - getattr(stop, "dtype", type(stop)), - float, - ] - dtype = dtypes.result_type(*dtypes_to_resolve) - start = tf.cast(start, dtype) - stop = tf.cast(stop, dtype) - return tfnp.logspace( - start, - stop, + result = linspace( + start=start, + stop=stop, num=num, endpoint=endpoint, - base=base, dtype=dtype, axis=axis, ) + return tf.pow(tf.cast(base, result.dtype), result) @sparse.elementwise_binary_union(tf.sparse.maximum, densify_mixed=True) @@ -1345,12 +1456,17 @@ def meshgrid(*x, indexing="xy"): def min(x, axis=None, keepdims=False, initial=None): x = convert_to_tensor(x) + # The TensorFlow numpy API implementation doesn't support `initial` so we # handle it manually here. if initial is not None: - return tf.math.minimum( - tfnp.min(x, axis=axis, keepdims=keepdims), initial - ) + if standardize_dtype(x.dtype) == "bool": + x = tf.reduce_all(x, axis=axis, keepdims=keepdims) + x = tf.math.minimum(tf.cast(x, "int32"), tf.cast(initial, "int32")) + return tf.cast(x, "bool") + else: + x = tf.reduce_min(x, axis=axis, keepdims=keepdims) + return tf.math.minimum(x, initial) # TensorFlow returns inf by default for an empty list, but for consistency # with other backends and the numpy API we want to throw in this case. @@ -1362,7 +1478,10 @@ def min(x, axis=None, keepdims=False, initial=None): message="Cannot compute the min of an empty tensor.", ) - return tfnp.min(x, axis=axis, keepdims=keepdims) + if standardize_dtype(x.dtype) == "bool": + return tf.reduce_all(x, axis=axis, keepdims=keepdims) + else: + return tf.reduce_min(x, axis=axis, keepdims=keepdims) @sparse.elementwise_binary_union(tf.sparse.minimum, densify_mixed=True) @@ -1392,7 +1511,24 @@ def mod(x1, x2): def moveaxis(x, source, destination): - return tfnp.moveaxis(x, source=source, destination=destination) + x = convert_to_tensor(x) + + _source = to_tuple_or_list(source) + _destination = to_tuple_or_list(destination) + _source = tuple(canonicalize_axis(i, x.ndim) for i in _source) + _destination = tuple(canonicalize_axis(i, x.ndim) for i in _destination) + if len(_source) != len(_destination): + raise ValueError( + "Inconsistent number of `source` and `destination`. " + f"Received: source={source}, destination={destination}" + ) + # Directly return x if no movement is required + if _source == _destination: + return x + perm = [i for i in range(x.ndim) if i not in _source] + for dest, src in sorted(zip(_destination, _source)): + perm.insert(dest, src) + return tf.transpose(x, perm) def nan_to_num(x): @@ -1615,8 +1751,6 @@ def reciprocal(x): def repeat(x, repeats, axis=None): - # tfnp.repeat has trouble with dynamic Tensors in compiled function. - # tf.repeat does not. x = convert_to_tensor(x) # TODO: tf.repeat doesn't support uint16 if standardize_dtype(x.dtype) == "uint16": @@ -1640,7 +1774,14 @@ def reshape(x, newshape): def roll(x, shift, axis=None): - return tfnp.roll(x, shift, axis=axis) + x = convert_to_tensor(x) + if axis is not None: + return tf.roll(x, shift=shift, axis=axis) + + # If axis is None, the roll happens as a 1-d tensor. + original_shape = tf.shape(x) + x = tf.roll(tf.reshape(x, [-1]), shift, 0) + return tf.reshape(x, original_shape) @sparse.elementwise_unary @@ -1695,14 +1836,12 @@ def split(x, indices_or_sections, axis=0): if not isinstance(indices_or_sections, int): # `tf.split` requires `num_or_size_splits`, so we need to convert # `indices_or_sections` to the appropriate format. - # The following implementation offers better compatibility for the - # tensor argument `indices_or_sections` than original `tfnp.split`. total_size = x.shape[axis] indices_or_sections = convert_to_tensor(indices_or_sections) start_size = indices_or_sections[0:1] end_size = total_size - indices_or_sections[-1:] num_or_size_splits = tf.concat( - [start_size, tfnp.diff(indices_or_sections), end_size], axis=0 + [start_size, diff(indices_or_sections), end_size], axis=0 ) else: num_or_size_splits = indices_or_sections @@ -1726,7 +1865,34 @@ def std(x, axis=None, keepdims=False): def swapaxes(x, axis1, axis2): - return tfnp.swapaxes(x, axis1=axis1, axis2=axis2) + x = convert_to_tensor(x) + + if ( + x.shape.rank is not None + and isinstance(axis1, int) + and isinstance(axis2, int) + ): + # This branch makes sure `perm` is statically known, to avoid a + # not-compile-time-constant XLA error. + axis1 = canonicalize_axis(axis1, x.ndim) + axis2 = canonicalize_axis(axis2, x.ndim) + + # Directly return x if no movement is required + if axis1 == axis2: + return x + + perm = list(range(x.ndim)) + perm[axis1] = axis2 + perm[axis2] = axis1 + else: + x_rank = tf.rank(x) + axis1 = tf.where(axis1 < 0, tf.add(axis1, x_rank), axis1) + axis2 = tf.where(axis2 < 0, tf.add(axis2, x_rank), axis2) + perm = tf.range(x_rank) + perm = tf.tensor_scatter_nd_update( + perm, [[axis1], [axis2]], [axis2, axis1] + ) + return tf.transpose(x, perm) def take(x, indices, axis=None): @@ -1737,9 +1903,7 @@ def take(x, indices, axis=None): f"`x.dtype={x.dtype}` when `indices` is a sparse tensor; " "densifying `indices`." ) - return tfnp.take( - x, convert_to_tensor(indices, sparse=False), axis=axis - ) + return take(x, convert_to_tensor(indices, sparse=False), axis=axis) if axis is None: x = tf.reshape(x, (-1,)) elif axis != 0: @@ -1748,9 +1912,7 @@ def take(x, indices, axis=None): f"`axis={axis}` when `indices` is a sparse tensor; " "densifying `indices`." ) - return tfnp.take( - x, convert_to_tensor(indices, sparse=False), axis=axis - ) + return take(x, convert_to_tensor(indices, sparse=False), axis=axis) output = tf.nn.safe_embedding_lookup_sparse( embedding_weights=tf.convert_to_tensor(x), sparse_ids=tf.sparse.expand_dims(indices, axis=-1), @@ -1758,11 +1920,72 @@ def take(x, indices, axis=None): ) output.set_shape(indices.shape + output.shape[len(indices.shape) :]) return output - return tfnp.take(x, indices, axis=axis) + + x = convert_to_tensor(x) + indices = convert_to_tensor(indices) + if axis is None: + x = tf.reshape(x, [-1]) + axis = 0 + # Correct the indices using "fill" mode which is the same as in jax + indices = tf.where( + indices < 0, + indices + tf.cast(tf.shape(x)[axis], indices.dtype), + indices, + ) + return tf.gather(x, indices, axis=axis) def take_along_axis(x, indices, axis=None): - return tfnp.take_along_axis(x, indices, axis=axis) + x = convert_to_tensor(x) + indices = convert_to_tensor(indices, "int64") + if axis is None: + if indices.ndim != 1: + raise ValueError( + "`indices` must be 1D if axis=None. " + f"Received: indices.shape={indices.shape}" + ) + return take_along_axis(tf.reshape(x, [-1]), indices, 0) + rank = tf.rank(x) + static_axis = axis + axis = axis + rank if axis < 0 else axis + + # Broadcast shapes to match, ensure that the axis of interest is not + # broadcast. + x_shape_original = tf.shape(x, out_type=indices.dtype) + indices_shape_original = tf.shape(indices, out_type=indices.dtype) + x_shape = tf.tensor_scatter_nd_update(x_shape_original, [[axis]], [1]) + indices_shape = tf.tensor_scatter_nd_update( + indices_shape_original, [[axis]], [1] + ) + broadcasted_shape = tf.broadcast_dynamic_shape(x_shape, indices_shape) + x_shape = tf.tensor_scatter_nd_update( + broadcasted_shape, [[axis]], [x_shape_original[axis]] + ) + indices_shape = tf.tensor_scatter_nd_update( + broadcasted_shape, [[axis]], [indices_shape_original[axis]] + ) + x = tf.broadcast_to(x, x_shape) + indices = tf.broadcast_to(indices, indices_shape) + + # Save indices shape so we can restore it later. + possible_result_shape = indices.shape + + # Correct the indices using "fill" mode which is the same as in jax + indices = tf.where(indices < 0, indices + x_shape[static_axis], indices) + + x = swapaxes(x, static_axis, -1) + indices = swapaxes(indices, static_axis, -1) + + x_shape = tf.shape(x) + x = tf.reshape(x, [-1, x_shape[-1]]) + indices_shape = tf.shape(indices) + indices = tf.reshape(indices, [-1, indices_shape[-1]]) + + result = tf.gather(x, indices, batch_dims=1) + result = tf.reshape(result, indices_shape) + result = swapaxes(result, static_axis, -1) + result.set_shape(possible_result_shape) + return result @sparse.elementwise_unary @@ -1800,7 +2023,6 @@ def tensordot(x1, x2, axes=2): @sparse.elementwise_unary def round(x, decimals=0): - # `tfnp.round` requires `enable_numpy_behavior`, so we reimplement it. if decimals == 0: return tf.round(x) x_dtype = x.dtype @@ -1821,7 +2043,6 @@ def round(x, decimals=0): def tile(x, repeats): - # The TFNP implementation is buggy, we roll our own. x = convert_to_tensor(x) repeats = tf.reshape(convert_to_tensor(repeats, dtype="int32"), [-1]) repeats_size = tf.size(repeats) @@ -1844,12 +2065,39 @@ def trace(x, offset=0, axis1=0, axis2=1): dtype = standardize_dtype(x.dtype) if dtype not in ("int64", "uint32", "uint64"): dtype = dtypes.result_type(dtype, "int32") - return tfnp.trace(x, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype) + x_shape = tf.shape(x) + x = moveaxis(x, (axis1, axis2), (-2, -1)) + # Mask out the diagonal and reduce. + x = tf.where( + eye(x_shape[axis1], x_shape[axis2], k=offset, dtype="bool"), + x, + tf.zeros_like(x), + ) + # The output dtype is set to "int32" if the input dtype is "bool" + if standardize_dtype(x.dtype) == "bool": + x = tf.cast(x, "int32") + return tf.cast(tf.reduce_sum(x, axis=(-2, -1)), dtype) def tri(N, M=None, k=0, dtype=None): - dtype = dtype or config.floatx() - return tfnp.tri(N, M=M, k=k, dtype=dtype) + M = M if M is not None else N + dtype = standardize_dtype(dtype or config.floatx()) + if k < 0: + lower = -k - 1 + if lower > N: + r = tf.zeros([N, M], dtype=dtype) + else: + o = tf.ones([N, M], dtype="bool") + r = tf.cast( + tf.logical_not(tf.linalg.band_part(o, lower, -1)), dtype=dtype + ) + else: + o = tf.ones([N, M], dtype=dtype) + if k > M: + r = o + else: + r = tf.linalg.band_part(o, -1, k) + return r def tril(x, k=0): @@ -2069,7 +2317,29 @@ def sum(x, axis=None, keepdims=False): def eye(N, M=None, k=0, dtype=None): dtype = dtype or config.floatx() - return tfnp.eye(N, M=M, k=k, dtype=dtype) + if not M: + M = N + # Making sure N, M and k are `int` + N, M, k = int(N), int(M), int(k) + if k >= M or -k >= N: + # tf.linalg.diag will raise an error in this case + return zeros([N, M], dtype=dtype) + if k == 0: + return tf.eye(N, M, dtype=dtype) + # We need the precise length, otherwise tf.linalg.diag will raise an error + diag_len = builtins.min(N, M) + if k > 0: + if N >= M: + diag_len -= k + elif N + k > M: + diag_len = M - k + elif k <= 0: + if M >= N: + diag_len += k + elif M - k > N: + diag_len = N + k + diagonal_ = tf.ones([diag_len], dtype=dtype) + return tf.linalg.diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k) def floor_divide(x1, x2): diff --git a/keras/src/backend/tensorflow/random.py b/keras/src/backend/tensorflow/random.py index eeb38a6aa523..0212610085d8 100644 --- a/keras/src/backend/tensorflow/random.py +++ b/keras/src/backend/tensorflow/random.py @@ -1,5 +1,4 @@ import tensorflow as tf -from tensorflow.experimental import numpy as tfnp from keras.src.backend.common import standardize_dtype from keras.src.backend.config import floatx @@ -87,12 +86,14 @@ def dropout(inputs, rate, noise_shape=None, seed=None): def shuffle(x, axis=0, seed=None): + from keras.src.backend.tensorflow.numpy import swapaxes + seed = tf_draw_seed(seed) if axis == 0: return tf.random.experimental.stateless_shuffle(x, seed=seed) - x = tfnp.swapaxes(x, axis1=0, axis2=axis) + x = swapaxes(x, axis1=0, axis2=axis) x = tf.random.experimental.stateless_shuffle(x, seed=seed) - x = tfnp.swapaxes(x, axis1=0, axis2=axis) + x = swapaxes(x, axis1=0, axis2=axis) return x diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 257159e46440..9e252b8469b8 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -172,8 +172,11 @@ def max(x, axis=None, keepdims=False, initial=None): result = result.values if initial is not None: - initial = convert_to_tensor(initial) - return torch.maximum(result, torch.full(result.shape, initial)) + dtype = to_torch_dtype(result.dtype) + initial = convert_to_tensor(initial, dtype=dtype) + return torch.maximum( + result, torch.full(result.shape, initial, dtype=dtype) + ) return result @@ -744,8 +747,15 @@ def linspace( dtype = dtypes.result_type(*dtypes_to_resolve) dtype = to_torch_dtype(dtype) - if endpoint is False: - stop = stop - ((stop - start) / num) + step = convert_to_tensor(torch.nan) + if endpoint: + if num > 1: + step = (stop - start) / (num - 1) + else: + if num > 0: + step = (stop - start) / num + if num > 1: + stop = stop - ((stop - start) / num) if hasattr(start, "__len__") and hasattr(stop, "__len__"): start = convert_to_tensor(start, dtype=dtype) stop = convert_to_tensor(stop, dtype=dtype) @@ -766,7 +776,7 @@ def linspace( device=get_device(), ) if retstep is True: - return (linspace, num) + return (linspace, step) return linspace @@ -949,7 +959,8 @@ def min(x, axis=None, keepdims=False, initial=None): result = result.values if initial is not None: - initial = convert_to_tensor(initial) + dtype = to_torch_dtype(result.dtype) + initial = convert_to_tensor(initial, dtype=dtype) return torch.minimum(result, initial) return result @@ -1265,6 +1276,13 @@ def swapaxes(x, axis1, axis2): def take(x, indices, axis=None): x = convert_to_tensor(x) indices = convert_to_tensor(indices).long() + # Correct the indices using "fill" mode which is the same as in jax + x_dim = x.shape[axis] if axis is not None else x.shape[0] + indices = torch.where( + indices < 0, + indices + x_dim, + indices, + ) if x.ndim == 2 and axis == 0: # This case is equivalent to embedding lookup. return torch.nn.functional.embedding(indices, x) @@ -1285,6 +1303,13 @@ def take(x, indices, axis=None): def take_along_axis(x, indices, axis=None): x = convert_to_tensor(x) indices = convert_to_tensor(indices).long() + # Correct the indices using "fill" mode which is the same as in jax + x_dim = x.shape[axis] if axis is not None else x.shape[0] + indices = torch.where( + indices < 0, + indices + x_dim, + indices, + ) return torch.take_along_dim(x, indices, dim=axis) diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index f4c8ffb7f71d..44583a5766c2 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -5266,14 +5266,18 @@ def trace(x, offset=0, axis1=0, axis2=1): class Tri(Operation): - def call(self, N, M=None, k=0, dtype=None): - return backend.numpy.tri(N, M=M, k=k, dtype=dtype) + def __init__(self, k=0, dtype=None): + super().__init__() + self.k = k + self.dtype = dtype or backend.floatx() - def compute_output_spec(self, N, M=None, k=0, dtype=None): + def call(self, N, M=None): + return backend.numpy.tri(N=N, M=M, k=self.k, dtype=self.dtype) + + def compute_output_spec(self, N, M=None): if M is None: M = N - dtype = dtype or backend.floatx() - return KerasTensor((N, M), dtype=dtype) + return KerasTensor((N, M), dtype=self.dtype) @keras_export(["keras.ops.tri", "keras.ops.numpy.tri"]) @@ -6021,14 +6025,18 @@ def ones(shape, dtype=None): class Eye(Operation): - def call(self, N, M=None, k=0, dtype=None): - return backend.numpy.eye(N, M=M, k=k, dtype=dtype) + def __init__(self, k=0, dtype=None): + super().__init__() + self.k = k + self.dtype = dtype or backend.floatx() - def compute_output_spec(self, N, M=None, k=0, dtype=None): + def call(self, N, M=None): + return backend.numpy.eye(N, M=M, k=self.k, dtype=self.dtype) + + def compute_output_spec(self, N, M=None): if M is None: M = N - dtype = dtype or backend.floatx() - return KerasTensor((N, M), dtype=dtype) + return KerasTensor((N, M), dtype=self.dtype) @keras_export(["keras.ops.eye", "keras.ops.numpy.eye"]) diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index f08fb7d1cecf..d4cdb2e59e55 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -2170,6 +2170,14 @@ def test_cross(self): self.assertAllClose(knp.Cross()(x1, y3), np.cross(x1, y3)) self.assertAllClose(knp.Cross()(x2, y3), np.cross(x2, y3)) + # Test axis is not None + self.assertAllClose( + knp.cross(x1, y1, axis=-1), np.cross(x1, y1, axis=-1) + ) + self.assertAllClose( + knp.Cross(axis=-1)(x1, y1), np.cross(x1, y1, axis=-1) + ) + def test_einsum(self): x = np.arange(24).reshape([2, 3, 4]).astype("float32") y = np.arange(24).reshape([2, 4, 3]).astype("float32") @@ -2452,6 +2460,10 @@ def test_linspace(self): knp.Linspace(num=5, endpoint=False)(0, 10), np.linspace(0, 10, 5, endpoint=False), ) + self.assertAllClose( + knp.Linspace(num=0, endpoint=False)(0, 10), + np.linspace(0, 10, 0, endpoint=False), + ) start = np.zeros([2, 3, 4]) stop = np.ones([2, 3, 4]) @@ -2659,27 +2671,33 @@ def test_take(self): self.assertAllClose(knp.Take()(x, 0), np.take(x, 0)) self.assertAllClose(knp.Take(axis=1)(x, 0), np.take(x, 0, axis=1)) - # test with multi-dimensional indices + # Test with multi-dimensional indices rng = np.random.default_rng(0) x = rng.standard_normal((2, 3, 4, 5)) indices = rng.integers(0, 4, (6, 7)) self.assertAllClose( - knp.take(x, indices, axis=2), - np.take(x, indices, axis=2), + knp.take(x, indices, axis=2), np.take(x, indices, axis=2) ) - # test with negative axis + # Test with negative axis self.assertAllClose( - knp.take(x, indices, axis=-2), - np.take(x, indices, axis=-2), + knp.take(x, indices, axis=-2), np.take(x, indices, axis=-2) ) - # test with axis=None & x.ndim=2 + + # Test with axis=None & x.ndim=2 x = np.array(([1, 2], [3, 4])) indices = np.array([2, 3]) self.assertAllClose( knp.take(x, indices, axis=None), np.take(x, indices, axis=None) ) + # Test with negative indices + x = rng.standard_normal((2, 3, 4, 5)) + indices = rng.integers(-3, 0, (6, 7)) + self.assertAllClose( + knp.take(x, indices, axis=2), np.take(x, indices, axis=2) + ) + @parameterized.named_parameters( named_product( [ @@ -2744,6 +2762,30 @@ def test_take_along_axis(self): np.take_along_axis(x, indices, axis=2), ) + # Test with axis=None + x = np.arange(12).reshape([1, 1, 3, 4]) + indices = np.array([1, 2, 3], dtype=np.int32) + self.assertAllClose( + knp.take_along_axis(x, indices, axis=None), + np.take_along_axis(x, indices, axis=None), + ) + self.assertAllClose( + knp.TakeAlongAxis(axis=None)(x, indices), + np.take_along_axis(x, indices, axis=None), + ) + + # Test with negative indices + x = np.arange(12).reshape([1, 1, 3, 4]) + indices = np.full([1, 4, 1, 1], -1, dtype=np.int32) + self.assertAllClose( + knp.take_along_axis(x, indices, axis=2), + np.take_along_axis(x, indices, axis=2), + ) + self.assertAllClose( + knp.TakeAlongAxis(axis=2)(x, indices), + np.take_along_axis(x, indices, axis=2), + ) + def test_tensordot(self): x = np.arange(24).reshape([1, 2, 3, 4]).astype("float32") y = np.arange(24).reshape([3, 4, 1, 2]).astype("float32") @@ -3448,6 +3490,10 @@ def test_diff(self): self.assertAllClose(knp.diff(x, n=2, axis=0), np.diff(x, n=2, axis=0)) self.assertAllClose(knp.diff(x, n=2, axis=1), np.diff(x, n=2, axis=1)) + # Test n=0 + x = np.array([1, 2, 4, 7, 0]) + self.assertAllClose(knp.diff(x, n=0), np.diff(x, n=0)) + def test_dot(self): x = np.arange(24).reshape([2, 3, 4]).astype("float32") y = np.arange(12).reshape([4, 3]).astype("float32") @@ -3865,6 +3911,20 @@ def test_round(self): self.assertAllClose(knp.round(x), np.round(x)) self.assertAllClose(knp.Round()(x), np.round(x)) + # Test with decimal=1 + self.assertAllClose(knp.round(x, decimals=1), np.round(x, decimals=1)) + self.assertAllClose(knp.Round(decimals=1)(x), np.round(x, decimals=1)) + + # Test with integers + x = np.array([[1, 2, 3], [3, 2, 1]], dtype="int32") + self.assertAllClose(knp.round(x, decimals=1), np.round(x, decimals=1)) + self.assertAllClose(knp.Round(decimals=1)(x), np.round(x, decimals=1)) + + # Test with integers and decimal < 0 + x = np.array([[123, 234, 345], [345, 234, 123]], dtype="int32") + self.assertAllClose(knp.round(x, decimals=-1), np.round(x, decimals=-1)) + self.assertAllClose(knp.Round(decimals=-1)(x), np.round(x, decimals=-1)) + def test_sign(self): x = np.array([[1, -2, 3], [-3, 2, -1]]) self.assertAllClose(knp.sign(x), np.sign(x)) @@ -4028,6 +4088,14 @@ def test_tile(self): self.assertAllClose(knp.tile(x, [2, 3]), np.tile(x, [2, 3])) self.assertAllClose(knp.Tile([2, 3])(x), np.tile(x, [2, 3])) + # If repeats.ndim > x.ndim + self.assertAllClose(knp.tile(x, [2, 3, 4]), np.tile(x, [2, 3, 4])) + self.assertAllClose(knp.Tile([2, 3, 4])(x), np.tile(x, [2, 3, 4])) + + # If repeats.ndim < x.ndim + self.assertAllClose(knp.tile(x, [2]), np.tile(x, [2])) + self.assertAllClose(knp.Tile([2])(x), np.tile(x, [2])) + def test_trace(self): x = np.arange(24).reshape([1, 2, 3, 4]) self.assertAllClose(knp.trace(x), np.trace(x)) @@ -4186,7 +4254,22 @@ def test_eye(self): self.assertAllClose(knp.Eye()(3), np.eye(3)) self.assertAllClose(knp.Eye()(3, 4), np.eye(3, 4)) - self.assertAllClose(knp.Eye()(3, 4, 1), np.eye(3, 4, 1)) + self.assertAllClose(knp.Eye(k=1)(3, 4), np.eye(3, 4, k=1)) + + # Test k >= N + self.assertAllClose(knp.Eye(k=3)(3), np.eye(3, k=3)) + + # Test k > 0 and N >= M + self.assertAllClose(knp.Eye(k=1)(3), np.eye(3, k=1)) + + # Test k > 0 and N < M and N + k > M + self.assertAllClose(knp.Eye(k=2)(3, 4), np.eye(3, 4, k=2)) + + # Test k < 0 and M >= N + self.assertAllClose(knp.Eye(k=-1)(3), np.eye(3, k=-1)) + + # Test k < 0 and M < N and M - k > N + self.assertAllClose(knp.Eye(k=-2)(4, 3), np.eye(4, 3, k=-2)) def test_arange(self): self.assertAllClose(knp.arange(3), np.arange(3)) @@ -4228,7 +4311,16 @@ def test_tri(self): self.assertAllClose(knp.Tri()(3), np.tri(3)) self.assertAllClose(knp.Tri()(3, 4), np.tri(3, 4)) - self.assertAllClose(knp.Tri()(3, 4, 1), np.tri(3, 4, 1)) + self.assertAllClose(knp.Tri(k=1)(3, 4), np.tri(3, 4, 1)) + + # Test k < 0 + self.assertAllClose(knp.Tri(k=-1)(3), np.tri(3, k=-1)) + + # Test -k-1 > N + self.assertAllClose(knp.Tri(k=-5)(3), np.tri(3, k=-5)) + + # Test k > M + self.assertAllClose(knp.Tri(k=4)(3), np.tri(3, k=4)) def create_sparse_tensor(x, indices_from=None, start=0, delta=2): @@ -5074,6 +5166,18 @@ def test_max(self, dtype): self.assertEqual(standardize_dtype(knp.max(x).dtype), expected_dtype) self.assertEqual(knp.Max().symbolic_call(x).dtype, expected_dtype) + # Test with initial + initial = 1 + expected_dtype = standardize_dtype( + jnp.max(x_jax, initial=initial).dtype + ) + self.assertEqual( + standardize_dtype(knp.max(x, initial=initial).dtype), expected_dtype + ) + self.assertEqual( + knp.Max(initial=initial).symbolic_call(x).dtype, expected_dtype + ) + @parameterized.named_parameters(named_product(dtype=ALL_DTYPES)) def test_ones(self, dtype): import jax.numpy as jnp @@ -6045,19 +6149,19 @@ def test_eye(self, dtype): expected_dtype, ) self.assertEqual( - standardize_dtype(knp.Eye().symbolic_call(3, dtype=dtype).dtype), + standardize_dtype(knp.Eye(dtype=dtype).symbolic_call(3).dtype), expected_dtype, ) expected_dtype = standardize_dtype(jnp.eye(3, 4, 1, dtype=dtype).dtype) self.assertEqual( - standardize_dtype(knp.eye(3, 4, 1, dtype=dtype).dtype), + standardize_dtype(knp.eye(3, 4, k=1, dtype=dtype).dtype), expected_dtype, ) self.assertEqual( standardize_dtype( - knp.Eye().symbolic_call(3, 4, 1, dtype=dtype).dtype + knp.Eye(k=1, dtype=dtype).symbolic_call(3, 4).dtype ), expected_dtype, ) @@ -6743,6 +6847,18 @@ def test_min(self, dtype): self.assertEqual(standardize_dtype(knp.min(x).dtype), expected_dtype) self.assertEqual(knp.Min().symbolic_call(x).dtype, expected_dtype) + # Test with initial + initial = 0 + expected_dtype = standardize_dtype( + jnp.min(x_jax, initial=initial).dtype + ) + self.assertEqual( + standardize_dtype(knp.min(x, initial=initial).dtype), expected_dtype + ) + self.assertEqual( + knp.Min(initial=initial).symbolic_call(x).dtype, expected_dtype + ) + @parameterized.named_parameters( named_product(dtypes=itertools.combinations(ALL_DTYPES, 2)) ) @@ -7518,7 +7634,7 @@ def test_tri(self, dtype): expected_dtype, ) self.assertEqual( - standardize_dtype(knp.Tri().symbolic_call(3, dtype=dtype).dtype), + standardize_dtype(knp.Tri(dtype=dtype).symbolic_call(3).dtype), expected_dtype, ) From 2c59ea8afa2b6af1e4136980cc5ff0655b2883ab Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Fri, 19 Apr 2024 11:22:30 -0700 Subject: [PATCH 034/101] Add op.select. --- keras/src/backend/jax/numpy.py | 4 ++ keras/src/backend/numpy/numpy.py | 4 ++ keras/src/backend/tensorflow/numpy.py | 4 ++ keras/src/backend/torch/numpy.py | 9 ++++ keras/src/ops/numpy.py | 63 +++++++++++++++++++++++++++ keras/src/ops/numpy_test.py | 13 ++++++ 6 files changed, 97 insertions(+) diff --git a/keras/src/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py index 36605efc811e..e681419e1fe5 100644 --- a/keras/src/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -1143,3 +1143,7 @@ def correlate(x1, x2, mode="valid"): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) return jnp.correlate(x1, x2, mode) + + +def select(condlist, choicelist, default=0): + return jnp.select(condlist, choicelist, default=default) diff --git a/keras/src/backend/numpy/numpy.py b/keras/src/backend/numpy/numpy.py index 0793cee0d03b..ecbd201c4e43 100644 --- a/keras/src/backend/numpy/numpy.py +++ b/keras/src/backend/numpy/numpy.py @@ -1090,3 +1090,7 @@ def correlate(x1, x2, mode="valid"): x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return np.correlate(x1, x2, mode) + + +def select(condlist, choicelist, default=0): + return np.select(condlist, choicelist, default=default) diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index d033c40536be..282435bdb91e 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -2132,3 +2132,7 @@ def correlate(x1, x2, mode="valid"): x2 = tf.reshape(x2, (x2_len, 1, 1)) return tf.squeeze(tf.nn.conv1d(x1, x2, stride=1, padding=mode.upper())) + + +def select(condlist, choicelist, default=0): + return tfnp.select(condlist, choicelist, default=default) diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 257159e46440..378a68689d5b 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -1567,3 +1567,12 @@ def correlate(x1, x2, mode="valid"): result = result[..., start_idx : start_idx + x1_len] return torch.squeeze(result) + + +def select(condlist, choicelist, default=0): + condlist = [convert_to_tensor(c) for c in condlist] + choicelist = [convert_to_tensor(c) for c in choicelist] + out = convert_to_tensor(default) + for c, v in reversed(list(zip(condlist, choicelist))): + out = torch.where(c, v, out) + return out diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index f4c8ffb7f71d..51fe60be7b04 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -6172,3 +6172,66 @@ def correlate(x1, x2, mode="valid"): if any_symbolic_tensors((x1, x2)): return Correlate(mode=mode).symbolic_call(x1, x2) return backend.numpy.correlate(x1, x2, mode=mode) + + +class Select(Operation): + def __init__(self): + super().__init__() + + def call(self, condlist, choicelist, default=0): + return backend.numpy.correlate(condlist, choicelist, default) + + def compute_output_spec(self, condlist, choicelist, default=0): + first_element = choicelist[0] + return KerasTensor(first_element.shape, dtype=first_element.dtype) + + +@keras_export(["keras.ops.select", "keras.ops.numpy.select"]) +def select(condlist, choicelist, default=0): + """Return elements from `choicelist`, based on conditions in `condlist`. + + Args: + condlist: List of boolean tensors. + The list of conditions which determine from which array + in choicelist the output elements are taken. + When multiple conditions are satisfied, + the first one encountered in condlist is used. + choicelist: List of tensors. + The list of tensors from which the output elements are taken. + This list has to be of the same length as `condlist`. + defaults: Optional scalar value. + The element inserted in the output + when all conditions evaluate to `False`. + + Returns: + Tensor where the output at position `m` is the `m`-th element + of the tensor in `choicelist` where the `m`-th element of the + corresponding tensor in `condlist` is `True`. + + Example: + + ```python + from keras import ops + + x = ops.arange(6) + condlist = [x<3, x>3] + choicelist = [x, x**2] + ops.select(condlist, choicelist, 42) + # Returns: tensor([0, 1, 2, 42, 16, 25]) + ``` + """ + if not isinstance(condlist, list) or not isinstance(choicelist, list): + raise ValueError( + "condlist and choicelist must be lists. Received: " + f"type(condlist) = {type(condlist)}, " + f"type(choicelist) = {type(choicelist)}" + ) + if not condlist or not choicelist: + raise ValueError( + "condlist and choicelist must not be empty. Received: " + f"condlist = {condlist}, " + f"choicelist = {choicelist}" + ) + if any_symbolic_tensors(condlist + choicelist + [default]): + return Select().symbolic_call(condlist, choicelist, default) + return backend.numpy.select(condlist, choicelist, default) diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index f08fb7d1cecf..9fd00f7d75f1 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -4169,6 +4169,19 @@ def test_correlate_different_size(self): knp.Correlate(mode="full")(x, y), np.correlate(x, y, mode="full") ) + def test_select(self): + x = np.arange(6) + condlist = [x < 3, x > 3] + choicelist = [x, x**2] + y = knp.select(condlist, choicelist, 42) + self.assertAllClose(y, [0, 1, 2, 42, 16, 25]) + + x = backend.KerasTensor((6,)) + condlist = [x < 3, x > 3] + choicelist = [x, x**2] + y = knp.select(condlist, choicelist, 42) + self.assertEqual(y.shape, (6,)) + class NumpyArrayCreateOpsCorrectnessTest(testing.TestCase): def test_ones(self): From a4315076922ad265cfe95b48681b4ced11f542fd Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Fri, 19 Apr 2024 12:47:40 -0700 Subject: [PATCH 035/101] Add new API for select --- keras/api/_tf_keras/keras/ops/__init__.py | 1 + keras/api/_tf_keras/keras/ops/numpy/__init__.py | 1 + keras/api/ops/__init__.py | 1 + keras/api/ops/numpy/__init__.py | 1 + 4 files changed, 4 insertions(+) diff --git a/keras/api/_tf_keras/keras/ops/__init__.py b/keras/api/_tf_keras/keras/ops/__init__.py index f62b9ac82234..08222c5cf78d 100644 --- a/keras/api/_tf_keras/keras/ops/__init__.py +++ b/keras/api/_tf_keras/keras/ops/__init__.py @@ -190,6 +190,7 @@ from keras.src.ops.numpy import reshape from keras.src.ops.numpy import roll from keras.src.ops.numpy import round +from keras.src.ops.numpy import select from keras.src.ops.numpy import sign from keras.src.ops.numpy import sin from keras.src.ops.numpy import sinh diff --git a/keras/api/_tf_keras/keras/ops/numpy/__init__.py b/keras/api/_tf_keras/keras/ops/numpy/__init__.py index 1d5434e40288..e0fa7488feb0 100644 --- a/keras/api/_tf_keras/keras/ops/numpy/__init__.py +++ b/keras/api/_tf_keras/keras/ops/numpy/__init__.py @@ -112,6 +112,7 @@ from keras.src.ops.numpy import reshape from keras.src.ops.numpy import roll from keras.src.ops.numpy import round +from keras.src.ops.numpy import select from keras.src.ops.numpy import sign from keras.src.ops.numpy import sin from keras.src.ops.numpy import sinh diff --git a/keras/api/ops/__init__.py b/keras/api/ops/__init__.py index f62b9ac82234..08222c5cf78d 100644 --- a/keras/api/ops/__init__.py +++ b/keras/api/ops/__init__.py @@ -190,6 +190,7 @@ from keras.src.ops.numpy import reshape from keras.src.ops.numpy import roll from keras.src.ops.numpy import round +from keras.src.ops.numpy import select from keras.src.ops.numpy import sign from keras.src.ops.numpy import sin from keras.src.ops.numpy import sinh diff --git a/keras/api/ops/numpy/__init__.py b/keras/api/ops/numpy/__init__.py index 1d5434e40288..e0fa7488feb0 100644 --- a/keras/api/ops/numpy/__init__.py +++ b/keras/api/ops/numpy/__init__.py @@ -112,6 +112,7 @@ from keras.src.ops.numpy import reshape from keras.src.ops.numpy import roll from keras.src.ops.numpy import round +from keras.src.ops.numpy import select from keras.src.ops.numpy import sign from keras.src.ops.numpy import sin from keras.src.ops.numpy import sinh From 29d10d1a6c58d02a4f9629ececddac6280c340cd Mon Sep 17 00:00:00 2001 From: hertschuh <1091026+hertschuh@users.noreply.github.com> Date: Fri, 19 Apr 2024 12:48:23 -0700 Subject: [PATCH 036/101] Make `ops.abs` and `ops.absolute` consistent between backends. (#19563) - The TensorFlow implementation was missing `convert_to_tensor` - The sparse annotation was unnecessarily applied twice - Now `abs` calls `absolute` in all backends Also fixed TensorFlow `ops.select`. --- keras/src/backend/jax/numpy.py | 4 +--- keras/src/backend/tensorflow/numpy.py | 4 ++-- keras/src/backend/torch/numpy.py | 8 ++++---- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/keras/src/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py index 19d6b81e78dc..eae67ba65881 100644 --- a/keras/src/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -223,10 +223,8 @@ def absolute(x): return jnp.absolute(x) -@sparse.elementwise_unary(linear=False) def abs(x): - x = convert_to_tensor(x) - return jnp.absolute(x) + return absolute(x) def all(x, axis=None, keepdims=False): diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index 154249b7b083..3712fb3bf84d 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -615,6 +615,7 @@ def zeros(shape, dtype=None): @sparse.elementwise_unary def absolute(x): + x = convert_to_tensor(x) # uintx and bool are always non-negative dtype = standardize_dtype(x.dtype) if "uint" in dtype or dtype == "bool": @@ -622,7 +623,6 @@ def absolute(x): return tf.abs(x) -@sparse.elementwise_unary def abs(x): return absolute(x) @@ -2405,4 +2405,4 @@ def correlate(x1, x2, mode="valid"): def select(condlist, choicelist, default=0): - return tfnp.select(condlist, choicelist, default=default) + return tf.experimental.numpy.select(condlist, choicelist, default=default) diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 7edbdfeb4548..56cf96140783 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -201,10 +201,6 @@ def zeros_like(x, dtype=None): def absolute(x): - return abs(x) - - -def abs(x): x = convert_to_tensor(x) # bool are always non-negative if standardize_dtype(x.dtype) == "bool": @@ -212,6 +208,10 @@ def abs(x): return torch.abs(x) +def abs(x): + return absolute(x) + + def all(x, axis=None, keepdims=False): x = convert_to_tensor(x) if axis is None: From 6e42834e318414a438c8f58bde90eb39b47437dc Mon Sep 17 00:00:00 2001 From: Luke Wood Date: Fri, 19 Apr 2024 20:34:29 -0400 Subject: [PATCH 037/101] Add pickle support for Keras model (#19555) * Implement unit tests for pickling * Reformat model_test * Reformat model_test * Rename depickle to unpickle * Rename depickle to unpickle * Reformat * remove a comment --- keras/src/models/model.py | 26 ++++++++++++++++++++++++++ keras/src/models/model_test.py | 27 ++++++++++++++++++++++++++- 2 files changed, 52 insertions(+), 1 deletion(-) diff --git a/keras/src/models/model.py b/keras/src/models/model.py index ca12459354f8..3ee5abd4c728 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -1,8 +1,10 @@ import inspect +import io import json import typing import warnings +import keras.src.saving.saving_lib as saving_lib from keras.src import backend from keras.src import utils from keras.src.api_export import keras_export @@ -348,6 +350,30 @@ def load_weights(self, filepath, skip_mismatch=False, **kwargs): self, filepath, skip_mismatch=skip_mismatch, **kwargs ) + # Note: renaming this function will cause old pickles to be broken. + # This is probably not a huge deal, as pickle should not be a recommended + # saving format -- it should only be supported for use with distributed + # computing frameworks. + @classmethod + def _unpickle_model(cls, bytesio): + # pickle is not safe regardless of what you do. + return saving_lib._load_model_from_fileobj( + bytesio, custom_objects=None, compile=True, safe_mode=False + ) + + def __reduce__(self): + """__reduce__ is used to customize the behavior of `pickle.pickle()`. + + The method returns a tuple of two elements: a function, and a list of + arguments to pass to that function. In this case we just leverage the + keras saving library.""" + buf = io.BytesIO() + saving_lib._save_model_to_fileobj(self, buf, "h5") + return ( + self._unpickle_model, + (buf,), + ) + def quantize(self, mode): """Quantize the weights of the model. diff --git a/keras/src/models/model_test.py b/keras/src/models/model_test.py index 871fc4bff19e..7fa91c5b95d8 100644 --- a/keras/src/models/model_test.py +++ b/keras/src/models/model_test.py @@ -1,3 +1,5 @@ +import pickle + import numpy as np import pytest from absl.testing import parameterized @@ -116,6 +118,29 @@ def call(self, x): ) self.assertIsInstance(new_model, Functional) + @parameterized.named_parameters( + ("single_output_1", _get_model_single_output), + ("single_output_2", _get_model_single_output), + ("single_output_3", _get_model_single_output), + ("single_output_4", _get_model_single_output), + ("single_list_output_1", _get_model_single_output_list), + ("single_list_output_2", _get_model_single_output_list), + ("single_list_output_3", _get_model_single_output_list), + ("single_list_output_4", _get_model_single_output_list), + ) + def test_functional_pickling(self, model_fn): + model = model_fn() + self.assertIsInstance(model, Functional) + model.compile() + x = np.random.rand(8, 3) + + reloaded_pickle = pickle.loads(pickle.dumps(model)) + + pred_reloaded = reloaded_pickle.predict(x) + pred = model.predict(x) + + self.assertAllClose(np.array(pred_reloaded), np.array(pred)) + @parameterized.named_parameters( ("single_output_1", _get_model_single_output, None), ("single_output_2", _get_model_single_output, "list"), @@ -138,7 +163,7 @@ def test_functional_single_output(self, model_fn, loss_type): loss = [loss] elif loss_type == "dict": loss = {"output_a": loss} - elif loss_type == "dict_lsit": + elif loss_type == "dict_list": loss = {"output_a": [loss]} model.compile( optimizer="sgd", From ee8b2f87c52c9991f06b71b3a91a1f4350807f25 Mon Sep 17 00:00:00 2001 From: Luke Wood Date: Fri, 19 Apr 2024 21:28:49 -0400 Subject: [PATCH 038/101] Ellipsis Serialization and tests (#19564) * Serialization and tests * Serialization and tests * Serialization and tests --- keras/src/saving/serialization_lib.py | 7 +++++++ keras/src/saving/serialization_lib_test.py | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/keras/src/saving/serialization_lib.py b/keras/src/saving/serialization_lib.py index 40125572809b..3adc832884ee 100644 --- a/keras/src/saving/serialization_lib.py +++ b/keras/src/saving/serialization_lib.py @@ -162,6 +162,11 @@ def serialize_keras_object(obj): "step": serialize_keras_object(obj.step), }, } + # Ellipsis is an instance, and ellipsis class is not in global scope. + # checking equality also fails elsewhere in the library, so we have + # to dynamically get the type. + if isinstance(obj, type(Ellipsis)): + return {"class_name": "__ellipsis__", "config": {}} if isinstance(obj, backend.KerasTensor): history = getattr(obj, "_keras_history", None) if history: @@ -613,6 +618,8 @@ class ModifiedMeanSquaredError(keras.losses.MeanSquaredError): return np.array(inner_config["value"], dtype=inner_config["dtype"]) if config["class_name"] == "__bytes__": return inner_config["value"].encode("utf-8") + if config["class_name"] == "__ellipsis__": + return Ellipsis if config["class_name"] == "__slice__": return slice( deserialize_keras_object( diff --git a/keras/src/saving/serialization_lib_test.py b/keras/src/saving/serialization_lib_test.py index 06ed6ac7198f..80df36f3eeb9 100644 --- a/keras/src/saving/serialization_lib_test.py +++ b/keras/src/saving/serialization_lib_test.py @@ -107,6 +107,10 @@ def tuples_to_lists_str(x): reserialized_str = tuples_to_lists_str(reserialized) self.assertEqual(serialized_str, reserialized_str) + def test_serialize_ellipsis(self): + _, deserialized, _ = self.roundtrip(Ellipsis) + self.assertEqual(..., deserialized) + def test_tensors_and_shapes(self): x = ops.random.normal((2, 2), dtype="float64") obj = {"x": x} From 86b08c81354fe6b32847cce2aaecf8a0f8bcce7a Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Fri, 19 Apr 2024 21:02:49 -0700 Subject: [PATCH 039/101] Make TF one_hot input dtype less strict. --- keras/src/backend/tensorflow/nn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/backend/tensorflow/nn.py b/keras/src/backend/tensorflow/nn.py index 2167087198f0..f74976177b7a 100644 --- a/keras/src/backend/tensorflow/nn.py +++ b/keras/src/backend/tensorflow/nn.py @@ -446,7 +446,7 @@ def conv_transpose( def one_hot(x, num_classes, axis=-1, dtype="float32", sparse=False): - x = convert_to_tensor(x) + x = convert_to_tensor(x, dtype="int64") if dtype is None: dtype = "float32" if sparse: From 261fa4e5d76f825c6f3fd70414306d63e09e0474 Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Sat, 20 Apr 2024 23:18:12 +0800 Subject: [PATCH 040/101] Fix einsum `_int8_call` (#19570) --- keras/src/layers/core/einsum_dense.py | 4 +++- keras/src/layers/core/einsum_dense_test.py | 22 ++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/keras/src/layers/core/einsum_dense.py b/keras/src/layers/core/einsum_dense.py index f3b9cb31a1d8..c872c75a92b8 100644 --- a/keras/src/layers/core/einsum_dense.py +++ b/keras/src/layers/core/einsum_dense.py @@ -408,7 +408,9 @@ def _int8_build( self._custom_gradient_equation, self._kernel_reverse_transpose_axes, ) = _analyze_quantization_info(self.equation, self.input_spec.ndim) - self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) + self.inputs_quantizer = quantizers.AbsMaxQuantizer( + axis=self._input_reduced_axes + ) self._kernel = self.add_weight( name="kernel", shape=kernel_shape, diff --git a/keras/src/layers/core/einsum_dense_test.py b/keras/src/layers/core/einsum_dense_test.py index 098c3595b19f..eaa102a4df33 100644 --- a/keras/src/layers/core/einsum_dense_test.py +++ b/keras/src/layers/core/einsum_dense_test.py @@ -469,6 +469,28 @@ def test_quantize_int8(self): backend.standardize_dtype(layer.kernel_scale.dtype), "float32" ) + @parameterized.named_parameters( + ("btnh,nhd->btd", "btnh,nhd->btd", (None, 8), (1, 2, 2, 4)), + ("btd,ndh->btnh", "btd,ndh->btnh", (None, 2, 8), (1, 2, 4)), + ("btd,df->btf", "btd,df->btf", (None, 4), (1, 2, 4)), + ) + @pytest.mark.skipif( + backend.backend() == "numpy", + reason=f"{backend.backend()} does not support ops.custom_gradient.", + ) + def test_quantize_int8_with_specific_equations( + self, equation, output_shape, input_shape + ): + layer = layers.EinsumDense(equation=equation, output_shape=output_shape) + layer.build(input_shape) + x = ops.random.uniform(input_shape) + y_float = layer(x) + + layer.quantize("int8") + y_quantized = layer(x) + mse = ops.mean(ops.square(y_float - y_quantized)) + self.assertLess(mse, 1e-3) # A weak correctness test + @parameterized.named_parameters( ("int8", "int8"), ("float8", "float8"), From 2e31633d4d09576970e15c913cbc804c8052a799 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Sat, 20 Apr 2024 13:10:25 -0400 Subject: [PATCH 041/101] CTC Decoding for JAX and Tensorflow (#19366) * Tensorflow OP for CTC decoding * JAX op for CTC greedy decoding * Update CTC decoding documentation * Fix linting issues * Fix trailing whitespace * Simplify returns in tensorflow CTC wrapper * Fix CTC decoding error messages * Fix line too long * Bug fixes to JAX CTC greedy decoder * Force int typecast in TF CTC decoder * Unit tests for CTC greedy decoding * Add unit test for CTC beam search decoding * Fix mask index set location in JAX CTC decoding * CTC beam search decoding for JAX * Fix unhandled token repetitions in ctc_beam_search_decode * Fix merge_repeated bug in CTC beam search decode * Fix beam storage and repetition bugs in JAX ctc_decode * Remove trailing whitespace * Fix ordering bug for ties in JAX CTC beam search * Cast sequence lengths to integers in JAX ctc_decode * Remove line break in docstring * CTC beam search decoding for JAX * Fix unhandled token repetitions in ctc_beam_search_decode * Fix merge_repeated bug in CTC beam search decode * Fix beam storage and repetition bugs in JAX ctc_decode * Fix ordering bug for ties in JAX CTC beam search * Generate public api directory * Add not implemented errors for NumPy and Torch CTC decoding * Remove unused redefinition of JAX ctc_beam_search_decode --- keras/api/_tf_keras/keras/ops/__init__.py | 1 + keras/api/_tf_keras/keras/ops/nn/__init__.py | 1 + keras/api/ops/__init__.py | 1 + keras/api/ops/nn/__init__.py | 1 + keras/src/backend/jax/nn.py | 233 +++++++++++++++++++ keras/src/backend/numpy/nn.py | 14 ++ keras/src/backend/tensorflow/nn.py | 58 +++++ keras/src/backend/torch/nn.py | 14 ++ keras/src/ops/nn.py | 60 +++++ keras/src/ops/nn_test.py | 61 +++++ 10 files changed, 444 insertions(+) diff --git a/keras/api/_tf_keras/keras/ops/__init__.py b/keras/api/_tf_keras/keras/ops/__init__.py index 08222c5cf78d..ba5c5179db5a 100644 --- a/keras/api/_tf_keras/keras/ops/__init__.py +++ b/keras/api/_tf_keras/keras/ops/__init__.py @@ -56,6 +56,7 @@ from keras.src.ops.nn import categorical_crossentropy from keras.src.ops.nn import conv from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_decode from keras.src.ops.nn import ctc_loss from keras.src.ops.nn import depthwise_conv from keras.src.ops.nn import elu diff --git a/keras/api/_tf_keras/keras/ops/nn/__init__.py b/keras/api/_tf_keras/keras/ops/nn/__init__.py index 9452ea18a766..61efc22a5701 100644 --- a/keras/api/_tf_keras/keras/ops/nn/__init__.py +++ b/keras/api/_tf_keras/keras/ops/nn/__init__.py @@ -10,6 +10,7 @@ from keras.src.ops.nn import categorical_crossentropy from keras.src.ops.nn import conv from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_decode from keras.src.ops.nn import ctc_loss from keras.src.ops.nn import depthwise_conv from keras.src.ops.nn import elu diff --git a/keras/api/ops/__init__.py b/keras/api/ops/__init__.py index 08222c5cf78d..ba5c5179db5a 100644 --- a/keras/api/ops/__init__.py +++ b/keras/api/ops/__init__.py @@ -56,6 +56,7 @@ from keras.src.ops.nn import categorical_crossentropy from keras.src.ops.nn import conv from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_decode from keras.src.ops.nn import ctc_loss from keras.src.ops.nn import depthwise_conv from keras.src.ops.nn import elu diff --git a/keras/api/ops/nn/__init__.py b/keras/api/ops/nn/__init__.py index 9452ea18a766..61efc22a5701 100644 --- a/keras/api/ops/nn/__init__.py +++ b/keras/api/ops/nn/__init__.py @@ -10,6 +10,7 @@ from keras.src.ops.nn import categorical_crossentropy from keras.src.ops.nn import conv from keras.src.ops.nn import conv_transpose +from keras.src.ops.nn import ctc_decode from keras.src.ops.nn import ctc_loss from keras.src.ops.nn import depthwise_conv from keras.src.ops.nn import elu diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 740c9b17e5a2..916ff885d6cd 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -659,3 +659,236 @@ def _iterate(prev, x): ) return -last_alpha_mask[jnp.arange(batch_size), target_length] + + +def ctc_greedy_decode( + inputs, + sequence_length, + merge_repeated=True, + mask_index=None, +): + inputs = jnp.array(inputs) + sequence_length = jnp.array(sequence_length, dtype=jnp.int32) + + if mask_index is None: + mask_index = inputs.shape[-1] - 1 + + indices = jnp.argmax(inputs, axis=-1) + scores = jnp.max(inputs, axis=-1) + + seqlen_mask = jnp.arange(inputs.shape[1])[None, :] + seqlen_mask = seqlen_mask >= sequence_length[:, None] + + if merge_repeated: + repeat = indices[:, 1:] == indices[:, :-1] + repeat = jnp.pad(repeat, ((0, 0), (1, 0))) + + indices = jnp.where(repeat, mask_index, indices) + else: + repeat = jnp.zeros_like(indices, dtype=bool) + + indices = jnp.where(seqlen_mask, mask_index, indices) + indices = [batch[batch != mask_index] for batch in indices] + max_len = max(len(batch) for batch in indices) + indices = jnp.array( + [jnp.pad(batch, (0, max_len - len(batch))) for batch in indices] + ) + + scores = jnp.where(seqlen_mask, 0.0, scores) + scores = -jnp.sum(scores, axis=1)[:, None] + + return [indices], scores + + +def ctc_beam_search_decode( + inputs, + sequence_length, + beam_width=100, + top_paths=1, + mask_index=None, +): + inputs = jnp.array(inputs) + sequence_length = jnp.array(sequence_length) + + batch_size, max_seq_len, num_classes = inputs.shape + inputs = jnn.log_softmax(inputs) + seqlen_mask = jnp.arange(max_seq_len)[None, :] >= sequence_length[:, None] + + if mask_index is None: + mask_index = num_classes - 1 + + # This is a workaround for the fact that jnp.argsort does not support + # the order parameter which is used to break ties when scores are equal. + # For compatibility with the tensorflow implementation, we flip the inputs + # and the mask_index, and then flip the classes back to the correct indices + inputs = jnp.flip(inputs, axis=2) + mask_index = num_classes - mask_index - 1 + + _pad = -1 + + init_paths = jnp.full( + (batch_size, 2 * beam_width, max_seq_len), _pad, dtype=jnp.int32 + ) + + num_init_paths = jnp.min(jnp.array([num_classes, beam_width])) + max_classes = jnp.argsort(inputs[:, 0], axis=1)[:, -num_init_paths:] + init_classes = jnp.where(max_classes == mask_index, _pad, max_classes) + init_paths = init_paths.at[:, :num_init_paths, 0].set(init_classes) + + init_scores = ( + jnp.full((batch_size, 2 * beam_width), -jnp.inf) + .at[:, :num_init_paths] + .set(jnp.take_along_axis(inputs[:, 0], max_classes, axis=1)) + ) + init_masked = init_paths[:, :, 0] == _pad + + def _extend_paths(paths, scores, masked, x): + paths = jnp.repeat(paths, num_classes, axis=0) + scores = jnp.repeat(scores, num_classes) + masked = jnp.repeat(masked, num_classes) + + path_tail_index = jnp.argmax(paths == _pad, axis=1) + paths_arange = jnp.arange(2 * beam_width * num_classes) + path_tails = paths[paths_arange, path_tail_index - 1] + path_tails = jnp.where(path_tail_index == 0, _pad, path_tails) + + classes = jnp.arange(num_classes).at[mask_index].set(_pad) + classes = jnp.tile(classes, 2 * beam_width) + + prev_masked = masked + masked = classes == _pad + + masked_repeat = ~prev_masked & (path_tails == classes) + classes = jnp.where(masked_repeat, _pad, classes) + paths = paths.at[paths_arange, path_tail_index].set(classes) + + x = jnp.tile(x, 2 * beam_width) + scores = scores + x + + return paths, scores, masked + + def _merge_scores(unique_inverse, scores): + scores_max = jnp.max(scores) + scores_exp = jnp.exp(scores - scores_max) + scores = jnp.zeros_like(scores).at[unique_inverse].add(scores_exp) + scores = jnp.log(scores) + scores_max + return scores + + def _prune_paths(paths, scores, masked): + paths, unique_inverse = jnp.unique( + paths, + return_inverse=True, + size=2 * num_classes * beam_width, + axis=0, + fill_value=_pad, + ) + unique_inverse = jnp.squeeze(unique_inverse, axis=1) + + emit_scores = jnp.where(masked, -jnp.inf, scores) + mask_scores = jnp.where(masked, scores, -jnp.inf) + + emit_scores = _merge_scores(unique_inverse, emit_scores) + mask_scores = _merge_scores(unique_inverse, mask_scores) + + total_scores = jnp.logaddexp(emit_scores, mask_scores) + top_indices = jnp.argsort(total_scores)[-beam_width:] + + paths = paths[top_indices] + emit_scores = emit_scores[top_indices] + mask_scores = mask_scores[top_indices] + + paths = jnp.tile(paths, (2, 1)) + scores = jnp.concatenate([emit_scores, mask_scores]) + masked = jnp.concatenate( + [jnp.zeros(beam_width, bool), jnp.ones(beam_width, bool)] + ) + + return paths, scores, masked + + def _decode_step(paths, scores, masked, x): + paths, scores, masked = _extend_paths(paths, scores, masked, x) + paths, scores, masked = _prune_paths(paths, scores, masked) + return paths, scores, masked + + def _step(prev, x): + paths, scores, masked = prev + x, seqlen_mask = x + + paths, scores, masked = lax.cond( + seqlen_mask, + lambda paths, scores, masked, x: (paths, scores, masked), + _decode_step, + paths, + scores, + masked, + x, + ) + + return (paths, scores, masked), None + + def _decode_batch( + init_paths, init_scores, init_masked, inputs, seqlen_mask + ): + (paths, scores, masked), _ = lax.scan( + _step, + (init_paths, init_scores, init_masked), + (inputs[1:], seqlen_mask[1:]), + ) + + paths, unique_inverse = jnp.unique( + paths, + return_inverse=True, + size=2 * num_classes * beam_width, + axis=0, + fill_value=_pad, + ) + unique_inverse = jnp.squeeze(unique_inverse, axis=1) + scores = _merge_scores(unique_inverse, scores) + + top_indices = jnp.argsort(scores)[-top_paths:][::-1] + paths = paths[top_indices] + scores = scores[top_indices] + + return paths, scores + + paths, scores = jax.vmap(_decode_batch)( + init_paths, init_scores, init_masked, inputs, seqlen_mask + ) + + # convert classes back to the correct indices + paths = jnp.where(paths == _pad, _pad, num_classes - paths - 1) + + lengths = jnp.argmax(paths == _pad, axis=2) + lengths = jnp.max(lengths, axis=0) + paths = jnp.where(paths == _pad, 0, paths) + + paths = paths.transpose((1, 0, 2)) + paths = [path[:, :length] for path, length in zip(paths, lengths)] + + return paths, scores + + +def ctc_decode( + inputs, + sequence_length, + strategy, + beam_width=100, + top_paths=1, + merge_repeated=True, + mask_index=None, +): + if strategy == "greedy": + return ctc_greedy_decode( + inputs, + sequence_length, + merge_repeated=merge_repeated, + mask_index=mask_index, + ) + else: + return ctc_beam_search_decode( + inputs, + sequence_length, + beam_width=beam_width, + top_paths=top_paths, + mask_index=mask_index, + ) diff --git a/keras/src/backend/numpy/nn.py b/keras/src/backend/numpy/nn.py index 7dee370ef005..9e42d64536c2 100644 --- a/keras/src/backend/numpy/nn.py +++ b/keras/src/backend/numpy/nn.py @@ -615,3 +615,17 @@ def batch_normalization( res = res + offset return x * inv + res + + +def ctc_decode( + inputs, + sequence_length, + strategy, + beam_width=100, + top_paths=1, + merge_repeated=True, + mask_index=None, +): + raise NotImplementedError( + "NumPy backend does not yet support CTC decoding." + ) diff --git a/keras/src/backend/tensorflow/nn.py b/keras/src/backend/tensorflow/nn.py index f74976177b7a..296f7737c14a 100644 --- a/keras/src/backend/tensorflow/nn.py +++ b/keras/src/backend/tensorflow/nn.py @@ -828,3 +828,61 @@ def ctc_loss( blank_index=mask_index, logits_time_major=False, ) + + +def ctc_decode( + inputs, + sequence_length, + strategy, + beam_width=100, + top_paths=1, + merge_repeated=True, + mask_index=None, +): + """Decodes the output of a softmax using CTC (Connectionist Temporal + Classification). + + Arguments: + inputs: Tensor `(batch_size, max_length, num_classes)` containing the + output of the softmax. + sequence_length: Tensor `(batch_size,)` containing the sequence length + for each output sequence in the batch. + strategy: String for decoding strategy to use. Supported values + are "greedy" and "beam_search". + beam_width: Integer specifying the beam width to use for beam + search strategies. Ignored if `strategy` is "greedy". + top_paths: Integer specifying the number of top paths to return. + Ignored if `strategy` is "greedy". + merge_repeated: Boolean specifying whether to merge repeated labels + during decoding. + mask_index: Integer specifying the index of the blank label. + + Returns: + A tuple of a list of `SparseTensor` containing the decoded sequences + and a `Tensor` containing the negative of the sum of probability + logits (if strategy is `"greedy"`) or the log probability (if strategy + is `"beam_search"`) for each sequence. + """ + inputs = tf.convert_to_tensor(inputs) + inputs = tf.transpose(inputs, (1, 0, 2)) + + sequence_length = tf.convert_to_tensor(sequence_length, dtype="int32") + if strategy == "greedy": + return tf.nn.ctc_greedy_decoder( + inputs=inputs, + sequence_length=sequence_length, + merge_repeated=merge_repeated, + blank_index=mask_index, + ) + elif strategy == "beam_search": + return tf.nn.ctc_beam_search_decoder( + inputs=inputs, + sequence_length=sequence_length, + beam_width=beam_width, + top_paths=top_paths, + ) + else: + raise ValueError( + f"Invalid strategy {strategy}. Supported values are " + "'greedy' and 'beam_search'." + ) diff --git a/keras/src/backend/torch/nn.py b/keras/src/backend/torch/nn.py index a5cbaab3ea47..d20f73b95ac2 100644 --- a/keras/src/backend/torch/nn.py +++ b/keras/src/backend/torch/nn.py @@ -763,3 +763,17 @@ def ctc_loss( blank=mask_index, reduction="none", ) + + +def ctc_decode( + inputs, + sequence_length, + strategy, + beam_width=100, + top_paths=1, + merge_repeated=True, + mask_index=None, +): + raise NotImplementedError( + "Torch backend does not yet support CTC decoding." + ) diff --git a/keras/src/ops/nn.py b/keras/src/ops/nn.py index 4ec642b018a6..06ae7af6452d 100644 --- a/keras/src/ops/nn.py +++ b/keras/src/ops/nn.py @@ -1873,6 +1873,66 @@ def ctc_loss(target, output, target_length, output_length, mask_index=0): ) +@keras_export( + [ + "keras.ops.ctc_decode", + "keras.ops.nn.ctc_decode", + ] +) +def ctc_decode( + inputs, + sequence_lengths, + strategy, + beam_width=100, + top_paths=1, + merge_repeated=True, + mask_index=None, +): + """Decodes the output of a CTC network. + + Args: + inputs: A tensor of shape `(batch_size, max_length, num_classes)` + containing the logits (output of the model). + sequence_lengths: A tensor of shape `(batch_size,)` containing the + sequence lengths for the batch. + strategy: A string for the decoding strategy. Supported values are + `"greedy"` and `"beam_search"`. + beam_width: An integer scalar beam width used in beam search. + Defaults to `100`. + top_paths: An integer scalar, the number of top paths to return. + Defaults to `1`. + merge_repeated: A boolean scalar, whether to merge repeated + labels in the output. Defaults to `True`. + mask_index: An integer scalar, the index of the mask character in + the vocabulary. Defaults to `None`. + + Returns: + A tuple containing: + + - a list of decoded sequences. + - a list of the negative of the sum of the probability logits + (if strategy is `"greedy"`) or the log probability (if strategy is + `"beam_search"`) for each sequence. + """ + + if any_symbolic_tensors((inputs, sequence_lengths)): + raise NotImplementedError( + "CTC decoding is not supported with KerasTensors. Use it " + "inside the call() method of a Layer or the predict_step " + "method of a model." + ) + + return backend.nn.ctc_decode( + inputs=inputs, + sequence_length=sequence_lengths, + strategy=strategy, + beam_width=beam_width, + top_paths=top_paths, + merge_repeated=merge_repeated, + mask_index=mask_index, + ) + + class Normalize(Operation): def __init__(self, axis=-1, order=2): super().__init__() diff --git a/keras/src/ops/nn_test.py b/keras/src/ops/nn_test.py index 0e13aac7163a..3d155e2c973f 100644 --- a/keras/src/ops/nn_test.py +++ b/keras/src/ops/nn_test.py @@ -1903,6 +1903,67 @@ def test_ctc_loss(self): result = knn.ctc_loss(labels, outputs, label_length, output_length) self.assertAllClose(result, np.array([3.4411672, 1.91680186])) + @pytest.mark.skipif( + backend.backend() not in ["tensorflow", "jax"], + reason="CTC decode only supported for TF and JAX backends", + ) + def test_ctc_decode(self): + inputs = np.array( + [ + [ + [0.1, 0.4, 0.2, 0.4], + [0.3, 0.3, 0.4, 0.2], + [0.3, 0.2, 0.4, 0.3], + ], + [ + [0.1, 0.4, 0.7, 0.2], + [0.3, 0.3, 0.4, 0.1], + [0.2, 0.1, 0.1, 0.5], + ], + [ + [0.1, 0.4, 0.2, 0.7], + [0.3, 0.3, 0.2, 0.7], + [0.3, 0.2, 0.4, 0.1], + ], + ] + ) + labels = np.array([[1, 2], [2, 0], [0, 0]]) + score_labels = np.array([[-1.2], [-1.6], [-0.7]]) + + (decoded,), scores = knn.ctc_decode( + inputs, sequence_lengths=[3, 3, 1], strategy="greedy" + ) + + self.assertAllClose(decoded, labels) + self.assertAllClose(scores, score_labels) + + labels = [ + np.array([[1, 2], [2, 0], [0, 0]]), + np.array([[2, 0], [2, 0], [1, 0]]), + ] + score_labels = np.array( + [ + [-2.33578291, -2.44335217], + [-2.22499622, -2.25768432], + [-1.0633859, -1.3633859], + ] + ) + + beam_width = 4 + top_paths = 2 + + decoded, scores = knn.ctc_decode( + inputs, + sequence_lengths=[3, 3, 1], + strategy="beam_search", + beam_width=beam_width, + top_paths=top_paths, + ) + + for i in range(top_paths): + self.assertAllClose(decoded[i], labels[i]) + self.assertAllClose(scores, score_labels) + def test_normalize(self): x = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32) self.assertAllClose( From 6f45089e9c8211a9b587798247ccf6bfa8c7c029 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Sat, 20 Apr 2024 10:13:01 -0700 Subject: [PATCH 042/101] Docstring edits --- keras/src/backend/tensorflow/nn.py | 43 ------------------------------ keras/src/ops/nn.py | 10 +++---- 2 files changed, 5 insertions(+), 48 deletions(-) diff --git a/keras/src/backend/tensorflow/nn.py b/keras/src/backend/tensorflow/nn.py index 296f7737c14a..454ce5f7c275 100644 --- a/keras/src/backend/tensorflow/nn.py +++ b/keras/src/backend/tensorflow/nn.py @@ -797,25 +797,6 @@ def ctc_loss( output_length, mask_index=0, ): - """Runs CTC (Connectionist Temporal Classification) loss on each - batch element. - - Arguments: - target: Tensor `(batch_size, max_length)` containing the - target sequences in integer format. - output: Tensor `(batch_size, max_length, num_classes)` - containing the output of the softmax. - target_length: Tensor `(batch_size,)` containing the sequence length - for each target sequence in the batch. - output_length: Tensor `(batch_size,)` containing the sequence length - for each output sequence in the batch. - mask_index: The value in `target` and `output` that represents the - blank label. - - Returns: - A tensor of shape `(batch_size,)` containing the CTC loss for each - sample in the batch. - """ target = tf.convert_to_tensor(target) target = tf.cast(target, dtype="int32") output = tf.convert_to_tensor(output) @@ -839,30 +820,6 @@ def ctc_decode( merge_repeated=True, mask_index=None, ): - """Decodes the output of a softmax using CTC (Connectionist Temporal - Classification). - - Arguments: - inputs: Tensor `(batch_size, max_length, num_classes)` containing the - output of the softmax. - sequence_length: Tensor `(batch_size,)` containing the sequence length - for each output sequence in the batch. - strategy: String for decoding strategy to use. Supported values - are "greedy" and "beam_search". - beam_width: Integer specifying the beam width to use for beam - search strategies. Ignored if `strategy` is "greedy". - top_paths: Integer specifying the number of top paths to return. - Ignored if `strategy` is "greedy". - merge_repeated: Boolean specifying whether to merge repeated labels - during decoding. - mask_index: Integer specifying the index of the blank label. - - Returns: - A tuple of a list of `SparseTensor` containing the decoded sequences - and a `Tensor` containing the negative of the sum of probability - logits (if strategy is `"greedy"`) or the log probability (if strategy - is `"beam_search"`) for each sequence. - """ inputs = tf.convert_to_tensor(inputs) inputs = tf.transpose(inputs, (1, 0, 2)) diff --git a/keras/src/ops/nn.py b/keras/src/ops/nn.py index 06ae7af6452d..71af40e82384 100644 --- a/keras/src/ops/nn.py +++ b/keras/src/ops/nn.py @@ -1888,7 +1888,7 @@ def ctc_decode( merge_repeated=True, mask_index=None, ): - """Decodes the output of a CTC network. + """Decodes the output of a CTC model. Args: inputs: A tensor of shape `(batch_size, max_length, num_classes)` @@ -1898,9 +1898,9 @@ def ctc_decode( strategy: A string for the decoding strategy. Supported values are `"greedy"` and `"beam_search"`. beam_width: An integer scalar beam width used in beam search. - Defaults to `100`. + Defaults to 100. top_paths: An integer scalar, the number of top paths to return. - Defaults to `1`. + Defaults to 1. merge_repeated: A boolean scalar, whether to merge repeated labels in the output. Defaults to `True`. mask_index: An integer scalar, the index of the mask character in @@ -1909,8 +1909,8 @@ def ctc_decode( Returns: A tuple containing: - - a list of decoded sequences. - - a list of the negative of the sum of the probability logits + - A list of decoded sequences. + - A list of the negative of the sum of the probability logits (if strategy is `"greedy"`) or the log probability (if strategy is `"beam_search"`) for each sequence. """ From b44d810bf4c7fd9596467f03e6cd538340515d8f Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Sat, 20 Apr 2024 11:39:49 -0700 Subject: [PATCH 043/101] Expand nan_to_num args. --- keras/src/backend/jax/numpy.py | 4 ++-- keras/src/backend/numpy/numpy.py | 4 ++-- keras/src/backend/tensorflow/numpy.py | 18 +++++++++++------- keras/src/backend/torch/numpy.py | 4 ++-- keras/src/ops/numpy.py | 24 +++++++++++++++++++++--- keras/src/ops/numpy_test.py | 20 ++++++++++++++++++++ 6 files changed, 58 insertions(+), 16 deletions(-) diff --git a/keras/src/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py index eae67ba65881..7b8b40330f58 100644 --- a/keras/src/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -768,9 +768,9 @@ def moveaxis(x, source, destination): return jnp.moveaxis(x, source=source, destination=destination) -def nan_to_num(x): +def nan_to_num(x, nan=0.0, posinf=None, neginf=None): x = convert_to_tensor(x) - return jnp.nan_to_num(x) + return jnp.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) def ndim(x): diff --git a/keras/src/backend/numpy/numpy.py b/keras/src/backend/numpy/numpy.py index ecbd201c4e43..50e1f610113e 100644 --- a/keras/src/backend/numpy/numpy.py +++ b/keras/src/backend/numpy/numpy.py @@ -688,8 +688,8 @@ def moveaxis(x, source, destination): return np.moveaxis(x, source=source, destination=destination) -def nan_to_num(x): - return np.nan_to_num(x) +def nan_to_num(x, nan=0.0, posinf=None, neginf=None): + return np.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) def ndim(x): diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index 3712fb3bf84d..af2c10ea94bb 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -1531,7 +1531,7 @@ def moveaxis(x, source, destination): return tf.transpose(x, perm) -def nan_to_num(x): +def nan_to_num(x, nan=0.0, posinf=None, neginf=None): x = convert_to_tensor(x) dtype = x.dtype @@ -1539,14 +1539,18 @@ def nan_to_num(x): if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: return x - # Replace NaN with 0 - x = tf.where(tf.math.is_nan(x), tf.constant(0, dtype), x) + # Replace NaN with `nan` + x = tf.where(tf.math.is_nan(x), tf.constant(nan, dtype), x) - # Replace positive infinity with dtype.max - x = tf.where(tf.math.is_inf(x) & (x > 0), tf.constant(dtype.max, dtype), x) + # Replace positive infinity with `posinf` or `dtype.max` + if posinf is None: + posinf = dtype.max + x = tf.where(tf.math.is_inf(x) & (x > 0), tf.constant(posinf, dtype), x) - # Replace negative infinity with dtype.min - x = tf.where(tf.math.is_inf(x) & (x < 0), tf.constant(dtype.min, dtype), x) + # Replace negative infinity with `neginf` or `dtype.min` + if neginf is None: + neginf = dtype.min + x = tf.where(tf.math.is_inf(x) & (x < 0), tf.constant(neginf, dtype), x) return x diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 56cf96140783..632594de2f64 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -994,9 +994,9 @@ def moveaxis(x, source, destination): return torch.moveaxis(x, source=source, destination=destination) -def nan_to_num(x): +def nan_to_num(x, nan=0.0, posinf=None, neginf=None): x = convert_to_tensor(x) - return torch.nan_to_num(x) + return torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) def ndim(x): diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index 9e902d430452..c312740d1ff9 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -3955,8 +3955,19 @@ def moveaxis(x, source, destination): class NanToNum(Operation): + def __init__(self, nan=0.0, posinf=None, neginf=None): + super().__init__() + self.nan = nan + self.posinf = posinf + self.neginf = neginf + def call(self, x): - return backend.numpy.nan_to_num(x) + return backend.numpy.nan_to_num( + x, nan=self.nan, posinf=self.posinf, neginf=self.neginf + ) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype=x.dtype) @keras_export( @@ -3965,16 +3976,23 @@ def call(self, x): "keras.ops.numpy.nan_to_num", ] ) -def nan_to_num(x): +def nan_to_num(x, nan=0.0, posinf=None, neginf=None): """Replace NaN with zero and infinity with large finite numbers. Args: x: Input data. + nan: Optional float or int. Value to replace `NaN` entries with. + posinf: Optional float or int. + Value to replace positive infinity with. + neginf: Optional float or int. + Value to replace negative infinity with. Returns: `x`, with non-finite values replaced. """ - return backend.numpy.nan_to_num(x) + if any_symbolic_tensors((x,)): + return NanToNum(nan=nan, posinf=posinf, neginf=neginf).symbolic_call(x) + return backend.numpy.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) class Ndim(Operation): diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index 007d75a49be2..1322d3549b02 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -4335,6 +4335,26 @@ def test_tri(self): # Test k > M self.assertAllClose(knp.Tri(k=4)(3), np.tri(3, k=4)) + def test_nan_to_num(self): + x = knp.array([1.0, np.nan, np.inf, -np.inf]) + self.assertAllClose( + knp.nan_to_num(x), [1.0, 0.0, 3.402823e38, -3.402823e38] + ) + self.assertAllClose( + knp.NanToNum()(x), [1.0, 0.0, 3.402823e38, -3.402823e38] + ) + self.assertAllClose( + knp.nan_to_num(x, nan=2, posinf=3, neginf=4), [1.0, 2.0, 3.0, 4.0] + ) + self.assertAllClose( + knp.NanToNum(nan=2, posinf=3, neginf=4)(x), [1.0, 2.0, 3.0, 4.0] + ) + + x = backend.KerasTensor((3, 4)) + self.assertEqual( + knp.NanToNum(nan=2, posinf=3, neginf=4)(x).shape, (3, 4) + ) + def create_sparse_tensor(x, indices_from=None, start=0, delta=2): if indices_from is not None: From 36d24c9d36e6d6c1dfa1647c96755bd7577df329 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Sat, 20 Apr 2024 12:17:59 -0700 Subject: [PATCH 044/101] Add vectorize op. --- keras/api/_tf_keras/keras/ops/__init__.py | 1 + .../api/_tf_keras/keras/ops/numpy/__init__.py | 1 + keras/api/ops/__init__.py | 1 + keras/api/ops/numpy/__init__.py | 1 + keras/src/backend/jax/numpy.py | 4 ++ keras/src/backend/numpy/numpy.py | 4 ++ keras/src/backend/tensorflow/numpy.py | 8 ++++ keras/src/backend/torch/numpy.py | 9 ++++ keras/src/ops/numpy.py | 29 +++++++++++ keras/src/ops/numpy_test.py | 48 +++++++++++-------- 10 files changed, 86 insertions(+), 20 deletions(-) diff --git a/keras/api/_tf_keras/keras/ops/__init__.py b/keras/api/_tf_keras/keras/ops/__init__.py index ba5c5179db5a..dec15361c0ae 100644 --- a/keras/api/_tf_keras/keras/ops/__init__.py +++ b/keras/api/_tf_keras/keras/ops/__init__.py @@ -220,6 +220,7 @@ from keras.src.ops.numpy import true_divide from keras.src.ops.numpy import var from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vectorize from keras.src.ops.numpy import vstack from keras.src.ops.numpy import where from keras.src.ops.numpy import zeros diff --git a/keras/api/_tf_keras/keras/ops/numpy/__init__.py b/keras/api/_tf_keras/keras/ops/numpy/__init__.py index e0fa7488feb0..7cb19b551198 100644 --- a/keras/api/_tf_keras/keras/ops/numpy/__init__.py +++ b/keras/api/_tf_keras/keras/ops/numpy/__init__.py @@ -141,6 +141,7 @@ from keras.src.ops.numpy import true_divide from keras.src.ops.numpy import var from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vectorize from keras.src.ops.numpy import vstack from keras.src.ops.numpy import where from keras.src.ops.numpy import zeros diff --git a/keras/api/ops/__init__.py b/keras/api/ops/__init__.py index ba5c5179db5a..dec15361c0ae 100644 --- a/keras/api/ops/__init__.py +++ b/keras/api/ops/__init__.py @@ -220,6 +220,7 @@ from keras.src.ops.numpy import true_divide from keras.src.ops.numpy import var from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vectorize from keras.src.ops.numpy import vstack from keras.src.ops.numpy import where from keras.src.ops.numpy import zeros diff --git a/keras/api/ops/numpy/__init__.py b/keras/api/ops/numpy/__init__.py index e0fa7488feb0..7cb19b551198 100644 --- a/keras/api/ops/numpy/__init__.py +++ b/keras/api/ops/numpy/__init__.py @@ -141,6 +141,7 @@ from keras.src.ops.numpy import true_divide from keras.src.ops.numpy import var from keras.src.ops.numpy import vdot +from keras.src.ops.numpy import vectorize from keras.src.ops.numpy import vstack from keras.src.ops.numpy import where from keras.src.ops.numpy import zeros diff --git a/keras/src/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py index 7b8b40330f58..4e2ec5c115bd 100644 --- a/keras/src/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -1023,6 +1023,10 @@ def vstack(xs): return jnp.vstack(xs) +def vectorize(pyfunc): + return jnp.vectorize(pyfunc) + + def where(condition, x1, x2): return jnp.where(condition, x1, x2) diff --git a/keras/src/backend/numpy/numpy.py b/keras/src/backend/numpy/numpy.py index 50e1f610113e..176c3842108a 100644 --- a/keras/src/backend/numpy/numpy.py +++ b/keras/src/backend/numpy/numpy.py @@ -937,6 +937,10 @@ def vstack(xs): return np.vstack(xs) +def vectorize(pyfunc): + return np.vectorize(pyfunc) + + def where(condition, x1, x2): if x1 is not None and x2 is not None: if not isinstance(x1, (int, float)): diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index af2c10ea94bb..aa82c6726ace 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -2156,6 +2156,14 @@ def vstack(xs): return tf.concat(xs, axis=0) +def vectorize(pyfunc): + @functools.wraps(pyfunc) + def wrapped(x): + return tf.vectorized_map(pyfunc, x) + + return wrapped + + def where(condition, x1, x2): condition = tf.cast(condition, "bool") if x1 is not None and x2 is not None: diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 632594de2f64..8948f66b0b83 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -1,4 +1,5 @@ import builtins +import functools import math import torch @@ -1412,6 +1413,14 @@ def vstack(xs): return torch.vstack(xs) +def vectorize(pyfunc): + @functools.wraps(pyfunc) + def wrapped(x): + return torch.vmap(pyfunc, x) + + return wrapped + + def where(condition, x1, x2): condition = convert_to_tensor(condition, dtype=bool) if x1 is not None and x2 is not None: diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index c312740d1ff9..f3f367674758 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -5415,6 +5415,35 @@ def vdot(x1, x2): return backend.numpy.vdot(x1, x2) +@keras_export(["keras.ops.vectorize", "keras.ops.numpy.vectorize"]) +def vectorize(pyfunc): + """Turn a function into a vectorized function. + + Example: + + ```python + def myfunc(a, b): + return a + b + + vfunc = np.vectorize(myfunc) + y = vfunc([1, 2, 3, 4], 2) # Returns Tensor([3, 4, 5, 6]) + ``` + + Args: + pyfunc: Callable of a single tensor argument. + + Returns: + A new function that applies `pyfunc` to every element + of its input along axis 0 (the batch axis). + """ + if not callable(pyfunc): + raise ValueError( + "Expected argument `pyfunc` to be a callable. " + f"Received: pyfunc={pyfunc}" + ) + return backend.numpy.vectorize(pyfunc) + + class Vstack(Operation): def call(self, xs): return backend.numpy.vstack(xs) diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index 1322d3549b02..ed69eeb1544e 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -4250,6 +4250,34 @@ def test_select(self): y = knp.select(condlist, choicelist, 42) self.assertEqual(y.shape, (6,)) + def test_nan_to_num(self): + x = knp.array([1.0, np.nan, np.inf, -np.inf]) + self.assertAllClose( + knp.nan_to_num(x), [1.0, 0.0, 3.402823e38, -3.402823e38] + ) + self.assertAllClose( + knp.NanToNum()(x), [1.0, 0.0, 3.402823e38, -3.402823e38] + ) + self.assertAllClose( + knp.nan_to_num(x, nan=2, posinf=3, neginf=4), [1.0, 2.0, 3.0, 4.0] + ) + self.assertAllClose( + knp.NanToNum(nan=2, posinf=3, neginf=4)(x), [1.0, 2.0, 3.0, 4.0] + ) + + x = backend.KerasTensor((3, 4)) + self.assertEqual( + knp.NanToNum(nan=2, posinf=3, neginf=4)(x).shape, (3, 4) + ) + + def test_vectorize(self): + def myfunc(a, b): + return a + b + + vfunc = np.vectorize(myfunc) + y = vfunc([1, 2, 3, 4], 2) + self.assertAllClose(y, [3, 4, 5, 6]) + class NumpyArrayCreateOpsCorrectnessTest(testing.TestCase): def test_ones(self): @@ -4335,26 +4363,6 @@ def test_tri(self): # Test k > M self.assertAllClose(knp.Tri(k=4)(3), np.tri(3, k=4)) - def test_nan_to_num(self): - x = knp.array([1.0, np.nan, np.inf, -np.inf]) - self.assertAllClose( - knp.nan_to_num(x), [1.0, 0.0, 3.402823e38, -3.402823e38] - ) - self.assertAllClose( - knp.NanToNum()(x), [1.0, 0.0, 3.402823e38, -3.402823e38] - ) - self.assertAllClose( - knp.nan_to_num(x, nan=2, posinf=3, neginf=4), [1.0, 2.0, 3.0, 4.0] - ) - self.assertAllClose( - knp.NanToNum(nan=2, posinf=3, neginf=4)(x), [1.0, 2.0, 3.0, 4.0] - ) - - x = backend.KerasTensor((3, 4)) - self.assertEqual( - knp.NanToNum(nan=2, posinf=3, neginf=4)(x).shape, (3, 4) - ) - def create_sparse_tensor(x, indices_from=None, start=0, delta=2): if indices_from is not None: From 5021ab79ac2ac30dfa85d9484ab8edab9cab9a88 Mon Sep 17 00:00:00 2001 From: AlexanderLavelle <73360008+AlexanderLavelle@users.noreply.github.com> Date: Sat, 20 Apr 2024 19:24:12 -0400 Subject: [PATCH 045/101] list insert requires index (#19575) --- keras/src/utils/tracking.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keras/src/utils/tracking.py b/keras/src/utils/tracking.py index 02678de336a0..d24cfc3836a6 100644 --- a/keras/src/utils/tracking.py +++ b/keras/src/utils/tracking.py @@ -146,10 +146,10 @@ def append(self, value): self.tracker.track(value) super().append(value) - def insert(self, value): + def insert(self, index, value): if self.tracker: self.tracker.track(value) - super().insert(value) + super().insert(index, value) def extend(self, values): if self.tracker: From a6dfcbc0b329150e5c14e3b1ad01b695a797320c Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Sun, 21 Apr 2024 10:38:30 -0700 Subject: [PATCH 046/101] Add signature and exclude args to knp.vectorize. --- keras/src/backend/common/backend_utils.py | 223 ++++++++++++++++++++++ keras/src/backend/jax/numpy.py | 4 +- keras/src/backend/numpy/numpy.py | 4 +- keras/src/backend/tensorflow/numpy.py | 18 +- keras/src/backend/torch/numpy.py | 12 +- keras/src/ops/numpy.py | 17 +- keras/src/ops/numpy_test.py | 14 ++ 7 files changed, 276 insertions(+), 16 deletions(-) diff --git a/keras/src/backend/common/backend_utils.py b/keras/src/backend/common/backend_utils.py index 1d005be50b71..4be0d75d5f27 100644 --- a/keras/src/backend/common/backend_utils.py +++ b/keras/src/backend/common/backend_utils.py @@ -1,4 +1,6 @@ +import functools import operator +import re import warnings @@ -288,3 +290,224 @@ def to_tuple_or_list(value): if isinstance(value, int): return (value,) return value + + +### Code for ops.vectorize() used for TF and torch backends. + +# See http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html +_DIMENSION_NAME = r"\w+" +_CORE_DIMENSION_LIST = "(?:{0:}(?:,{0:})*)?".format(_DIMENSION_NAME) +_ARGUMENT = rf"\({_CORE_DIMENSION_LIST}\)" +_ARGUMENT_LIST = "{0:}(?:,{0:})*".format(_ARGUMENT) +_SIGNATURE = "^{0:}->{0:}$".format(_ARGUMENT_LIST) + + +def _vectorize_parse_gufunc_signature( + signature, +): + if not re.match(_SIGNATURE, signature): + raise ValueError(f"not a valid gufunc signature: {signature}") + args, retvals = ( + [ + tuple(re.findall(_DIMENSION_NAME, arg)) + for arg in re.findall(_ARGUMENT, arg_list) + ] + for arg_list in signature.split("->") + ) + return args, retvals + + +def _vectorize_update_dim_sizes(dim_sizes, shape, core_dims, is_input=True): + num_core_dims = len(core_dims) + if is_input: + if len(shape) < num_core_dims: + raise ValueError( + f"input with shape {shape} does not " + "have enough dimensions for all core " + f"dimensions {core_dims}" + ) + else: + if len(shape) != num_core_dims: + raise ValueError( + f"output shape {shape} does not " + f"match core dimensions {core_dims}" + ) + + core_shape = shape[-num_core_dims:] if core_dims else () + for dim, size in zip(core_dims, core_shape): + if dim not in dim_sizes: + dim_sizes[dim] = size + elif size != dim_sizes[dim]: + raise ValueError( + f"inconsistent size for core dimension {dim}: " + f"{size} vs {dim_sizes[dim]}" + ) + + +def _vectorize_parse_input_dimensions( + args, + input_core_dims, +): + from keras.src import ops + + if len(args) != len(input_core_dims): + raise TypeError( + "wrong number of positional arguments: " + f"expected {len(input_core_dims)}, got {len(args)}" + ) + shapes = [] + dim_sizes: dict[str, int] = {} + for arg, core_dims in zip(args, input_core_dims): + _vectorize_update_dim_sizes( + dim_sizes, arg.shape, core_dims, is_input=True + ) + ndim = arg.ndim - len(core_dims) + shapes.append(arg.shape[:ndim]) + broadcast_shape = shapes[0] + for s in shapes: + broadcast_shape = ops.broadcast_shapes(broadcast_shape, s) + return broadcast_shape, dim_sizes + + +def _vectorize_check_output_dims( + func, + dim_sizes, + expected_output_core_dims, +): + from keras.src import ops + + def wrapped(*args): + out = func(*args) + if isinstance(out, (list, tuple)): + out_shapes = [ops.shape(x) for x in out] + else: + out_shapes = [out.shape] + + if expected_output_core_dims is None: + output_core_dims = [()] * len(out_shapes) + else: + output_core_dims = expected_output_core_dims + if len(output_core_dims) > 1 and not isinstance(out, tuple): + raise TypeError( + "output must be a tuple when multiple outputs " + f"are expected, got: {out}" + ) + if len(out_shapes) != len(output_core_dims): + raise TypeError( + "wrong number of output arguments: " + f"expected {len(output_core_dims)}, got {len(out_shapes)}" + ) + + sizes = dict(dim_sizes) + for shape, core_dims in zip(out_shapes, output_core_dims): + _vectorize_update_dim_sizes(sizes, shape, core_dims, is_input=False) + + return out + + return wrapped + + +def _vectorize_apply_excluded(func, excluded, args, kwargs): + if not excluded: + return func, args, kwargs + + dynamic_args = [arg for i, arg in enumerate(args) if i not in excluded] + dynamic_kwargs = { + key: val for key, val in kwargs.items() if key not in excluded + } + static_args = [ + (i, args[i]) + for i in sorted(e for e in excluded if isinstance(e, int)) + if i < len(args) + ] + static_kwargs = {key: val for key, val in kwargs.items() if key in excluded} + + def new_func(*args, **kwargs): + args = list(args) + for i, arg in static_args: + args.insert(i, arg) + return func(*args, **kwargs, **static_kwargs) + + return new_func, dynamic_args, dynamic_kwargs + + +def vectorize_impl(pyfunc, vmap_fn, *, excluded=None, signature=None): + """Implementation adapted from JAX and NumPy.""" + + from keras.src import ops + + excluded = None or set() + + @functools.wraps(pyfunc) + def wrapped(*args, **kwargs): + excluded_func, args, kwargs = _vectorize_apply_excluded( + pyfunc, excluded, args, kwargs + ) + + if signature is not None: + input_core_dims, output_core_dims = ( + _vectorize_parse_gufunc_signature(signature) + ) + else: + input_core_dims = [()] * len(args) + output_core_dims = None + + none_args = {i for i, arg in enumerate(args) if arg is None} + if any(none_args): + if any(input_core_dims[i] != () for i in none_args): + raise ValueError( + f"Cannot pass None at locations {none_args} " + f"with signature={signature}" + ) + excluded_func, args, _ = _vectorize_apply_excluded( + excluded_func, none_args, args, {} + ) + input_core_dims = [ + dim + for i, dim in enumerate(input_core_dims) + if i not in none_args + ] + + args = tuple(map(ops.convert_to_tensor, args)) + + broadcast_shape, dim_sizes = _vectorize_parse_input_dimensions( + args, input_core_dims + ) + checked_func = _vectorize_check_output_dims( + excluded_func, dim_sizes, output_core_dims + ) + squeezed_args = [] + rev_filled_shapes = [] + for arg, core_dims in zip(args, input_core_dims): + noncore_shape = arg.shape[: arg.ndim - len(core_dims)] + + pad_ndim = len(broadcast_shape) - len(noncore_shape) + filled_shape = pad_ndim * (1,) + noncore_shape + rev_filled_shapes.append(filled_shape[::-1]) + + squeeze_indices = tuple( + i for i, size in enumerate(noncore_shape) if size == 1 + ) + squeezed_arg = ops.squeeze(arg, axis=squeeze_indices) + squeezed_args.append(squeezed_arg) + + vectorized_func = checked_func + dims_to_expand = [] + for negdim, axis_sizes in enumerate(zip(*rev_filled_shapes)): + in_axes = tuple(None if size == 1 else 0 for size in axis_sizes) + if all(axis is None for axis in in_axes): + dims_to_expand.append(len(broadcast_shape) - 1 - negdim) + else: + vectorized_func = vmap_fn(vectorized_func, in_axes) + result = vectorized_func(*squeezed_args) + + if not dims_to_expand: + return result + elif isinstance(result, tuple): + return tuple( + ops.expand_dims(r, axis=dims_to_expand) for r in result + ) + else: + return ops.expand_dims(result, axis=dims_to_expand) + + return wrapped diff --git a/keras/src/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py index 4e2ec5c115bd..9a048b300770 100644 --- a/keras/src/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -1023,8 +1023,8 @@ def vstack(xs): return jnp.vstack(xs) -def vectorize(pyfunc): - return jnp.vectorize(pyfunc) +def vectorize(pyfunc, *, excluded=None, signature=None): + return jnp.vectorize(pyfunc, excluded=excluded, signature=signature) def where(condition, x1, x2): diff --git a/keras/src/backend/numpy/numpy.py b/keras/src/backend/numpy/numpy.py index 176c3842108a..f3090a4d6163 100644 --- a/keras/src/backend/numpy/numpy.py +++ b/keras/src/backend/numpy/numpy.py @@ -937,8 +937,8 @@ def vstack(xs): return np.vstack(xs) -def vectorize(pyfunc): - return np.vectorize(pyfunc) +def vectorize(pyfunc, *, excluded=None, signature=None): + return np.vectorize(pyfunc, excluded=excluded, signature=signature) def where(condition, x1, x2): diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index aa82c6726ace..f69b9883af06 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -15,6 +15,7 @@ from keras.src.backend.common import dtypes from keras.src.backend.common.backend_utils import canonicalize_axis from keras.src.backend.common.backend_utils import to_tuple_or_list +from keras.src.backend.common.backend_utils import vectorize_impl from keras.src.backend.tensorflow import sparse from keras.src.backend.tensorflow.core import cast from keras.src.backend.tensorflow.core import convert_to_tensor @@ -2156,14 +2157,25 @@ def vstack(xs): return tf.concat(xs, axis=0) -def vectorize(pyfunc): - @functools.wraps(pyfunc) +def _vmap_fn(fn, in_axes=0): + if in_axes != 0: + raise ValueError( + "Not supported with `vectorize()` with the TensorFlow backend." + ) + + @functools.wraps(fn) def wrapped(x): - return tf.vectorized_map(pyfunc, x) + return tf.vectorized_map(fn, x) return wrapped +def vectorize(pyfunc, *, excluded=None, signature=None): + return vectorize_impl( + pyfunc, _vmap_fn, excluded=excluded, signature=signature + ) + + def where(condition, x1, x2): condition = tf.cast(condition, "bool") if x1 is not None and x2 is not None: diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 8948f66b0b83..9438ed07dac5 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -1,5 +1,4 @@ import builtins -import functools import math import torch @@ -9,6 +8,7 @@ from keras.src.backend.common import dtypes from keras.src.backend.common.backend_utils import canonicalize_axis from keras.src.backend.common.backend_utils import to_tuple_or_list +from keras.src.backend.common.backend_utils import vectorize_impl from keras.src.backend.common.variables import standardize_dtype from keras.src.backend.torch.core import cast from keras.src.backend.torch.core import convert_to_tensor @@ -1413,12 +1413,10 @@ def vstack(xs): return torch.vstack(xs) -def vectorize(pyfunc): - @functools.wraps(pyfunc) - def wrapped(x): - return torch.vmap(pyfunc, x) - - return wrapped +def vectorize(pyfunc, *, excluded=None, signature=None): + return vectorize_impl( + pyfunc, torch.vmap, excluded=excluded, signature=signature + ) def where(condition, x1, x2): diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index f3f367674758..4ad40edc06da 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -5416,7 +5416,7 @@ def vdot(x1, x2): @keras_export(["keras.ops.vectorize", "keras.ops.numpy.vectorize"]) -def vectorize(pyfunc): +def vectorize(pyfunc, *, excluded=None, signature=None): """Turn a function into a vectorized function. Example: @@ -5431,6 +5431,17 @@ def myfunc(a, b): Args: pyfunc: Callable of a single tensor argument. + excluded: Optional set of integers representing + positional arguments for which the function + will not be vectorized. + These will be passed directly to `pyfunc` unmodified. + signature: Optional generalized universal function signature, + e.g., `"(m,n),(n)->(m)"` for vectorized + matrix-vector multiplication. If provided, + `pyfunc` will be called with (and expected to return) + arrays with shapes given by the size of corresponding + core dimensions. By default, `pyfunc` is assumed + to take scalars tensors as input and output. Returns: A new function that applies `pyfunc` to every element @@ -5441,7 +5452,9 @@ def myfunc(a, b): "Expected argument `pyfunc` to be a callable. " f"Received: pyfunc={pyfunc}" ) - return backend.numpy.vectorize(pyfunc) + return backend.numpy.vectorize( + pyfunc, excluded=excluded, signature=signature + ) class Vstack(Operation): diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index ed69eeb1544e..f2046c3907d1 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -4271,6 +4271,7 @@ def test_nan_to_num(self): ) def test_vectorize(self): + # Basic functionality def myfunc(a, b): return a + b @@ -4278,6 +4279,19 @@ def myfunc(a, b): y = vfunc([1, 2, 3, 4], 2) self.assertAllClose(y, [3, 4, 5, 6]) + # Test signature arg + vfunc = knp.vectorize(knp.trace, signature="(d,d)->()") + out = vfunc(np.eye(4)) + self.assertAllClose( + out, np.vectorize(np.trace, signature="(d,d)->()")(np.eye(4)) + ) + + vfunc = knp.vectorize(knp.diag, signature="(d,d)->(d)") + out = vfunc(np.eye(4)) + self.assertAllClose( + out, np.vectorize(np.diag, signature="(d,d)->(d)")(np.eye(4)) + ) + class NumpyArrayCreateOpsCorrectnessTest(testing.TestCase): def test_ones(self): From 3afc089aa90687bd9fb5b74139ee72aa32286902 Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Mon, 22 Apr 2024 13:14:47 +0800 Subject: [PATCH 047/101] Fix the apis of `dtype_polices` (#19580) * Fix api of `dtype_polices` * Update docstring * Increase test coverage * Fix format --- .../keras/dtype_policies/__init__.py | 3 + keras/api/dtype_policies/__init__.py | 3 + keras/src/dtype_policies/__init__.py | 77 ++++++++++++++++++- keras/src/dtype_policies/dtype_policy.py | 5 ++ keras/src/dtype_policies/dtype_policy_test.py | 55 ++++++++++++- keras/src/losses/__init__.py | 4 +- 6 files changed, 140 insertions(+), 7 deletions(-) diff --git a/keras/api/_tf_keras/keras/dtype_policies/__init__.py b/keras/api/_tf_keras/keras/dtype_policies/__init__.py index da8364263a22..2abb181f5dfb 100644 --- a/keras/api/_tf_keras/keras/dtype_policies/__init__.py +++ b/keras/api/_tf_keras/keras/dtype_policies/__init__.py @@ -4,6 +4,9 @@ since your modifications would be overwritten. """ +from keras.src.dtype_policies import deserialize +from keras.src.dtype_policies import get +from keras.src.dtype_policies import serialize from keras.src.dtype_policies.dtype_policy import DTypePolicy from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy diff --git a/keras/api/dtype_policies/__init__.py b/keras/api/dtype_policies/__init__.py index da8364263a22..2abb181f5dfb 100644 --- a/keras/api/dtype_policies/__init__.py +++ b/keras/api/dtype_policies/__init__.py @@ -4,6 +4,9 @@ since your modifications would be overwritten. """ +from keras.src.dtype_policies import deserialize +from keras.src.dtype_policies import get +from keras.src.dtype_policies import serialize from keras.src.dtype_policies.dtype_policy import DTypePolicy from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy diff --git a/keras/src/dtype_policies/__init__.py b/keras/src/dtype_policies/__init__.py index ec84c2660417..03cff8015b97 100644 --- a/keras/src/dtype_policies/__init__.py +++ b/keras/src/dtype_policies/__init__.py @@ -1,23 +1,96 @@ from keras.src import backend +from keras.src.api_export import keras_export from keras.src.dtype_policies import dtype_policy from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES +from keras.src.dtype_policies.dtype_policy import DTypePolicy from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy +ALL_OBJECTS = { + DTypePolicy, + FloatDTypePolicy, + QuantizedDTypePolicy, + QuantizedFloat8DTypePolicy, +} +ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} + +@keras_export("keras.dtype_policies.serialize") +def serialize(dtype_policy): + """Serializes `DTypePolicy` instance. + + Args: + dtype_policy: A Keras `DTypePolicy` instance. + + Returns: + `DTypePolicy` configuration dictionary. + """ + from keras.src.saving import serialization_lib + + return serialization_lib.serialize_keras_object(dtype_policy) + + +@keras_export("keras.dtype_policies.deserialize") +def deserialize(config, custom_objects=None): + """Deserializes a serialized `DTypePolicy` instance. + + Args: + config: `DTypePolicy` configuration. + custom_objects: Optional dictionary mapping names (strings) to custom + objects (classes and functions) to be considered during + deserialization. + + Returns: + A Keras `DTypePolicy` instance. + """ + from keras.src.saving import serialization_lib + + return serialization_lib.deserialize_keras_object( + config, + module_objects=ALL_OBJECTS_DICT, + custom_objects=custom_objects, + ) + + +@keras_export("keras.dtype_policies.get") def get(identifier): + """Retrieves a Keras `DTypePolicy` instance. + + The `identifier` may be the string name of a `DTypePolicy` class. + + >>> policy = dtype_policies.get("mixed_bfloat16") + >>> type(loss) + + + You can also specify `config` of the dtype policy to this function by + passing dict containing `class_name` and `config` as an identifier. Also + note that the `class_name` must map to a `DTypePolicy` class + + >>> identifier = {"class_name": "FloatDTypePolicy", + ... "config": {"name": "float32"}} + >>> policy = dtype_policies.get(identifier) + >>> type(loss) + + + Args: + identifier: A dtype policy identifier. One of `None` or string name of a + `DTypePolicy` or `DTypePolicy` configuration dictionary or a + `DTypePolicy` instance. + + Returns: + A Keras `DTypePolicy` instance. + """ from keras.src.dtype_policies.dtype_policy import ( _get_quantized_dtype_policy_by_str, ) - from keras.src.saving import serialization_lib if identifier is None: return dtype_policy.dtype_policy() if isinstance(identifier, (FloatDTypePolicy, QuantizedDTypePolicy)): return identifier if isinstance(identifier, dict): - return serialization_lib.deserialize_keras_object(identifier) + return deserialize(identifier) if isinstance(identifier, str): if identifier.startswith(QUANTIZATION_MODES): return _get_quantized_dtype_policy_by_str(identifier) diff --git a/keras/src/dtype_policies/dtype_policy.py b/keras/src/dtype_policies/dtype_policy.py index 2618e118e2bf..a55eaa4c0659 100644 --- a/keras/src/dtype_policies/dtype_policy.py +++ b/keras/src/dtype_policies/dtype_policy.py @@ -293,6 +293,11 @@ def _get_all_valid_policies(self): ] return valid_policies + def get_config(self): + config = super().get_config() + config.update({"amax_history_length": self.amax_history_length}) + return config + @keras_export( [ diff --git a/keras/src/dtype_policies/dtype_policy_test.py b/keras/src/dtype_policies/dtype_policy_test.py index b040663781a8..b66df0779f30 100644 --- a/keras/src/dtype_policies/dtype_policy_test.py +++ b/keras/src/dtype_policies/dtype_policy_test.py @@ -1,5 +1,8 @@ from absl.testing import parameterized +from keras.src.dtype_policies import deserialize +from keras.src.dtype_policies import get +from keras.src.dtype_policies import serialize from keras.src.dtype_policies.dtype_policy import DTypePolicy from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy @@ -64,7 +67,7 @@ def test_get_config_from_config(self): new_policy = DTypePolicy.from_config(config) self.assertEqual(new_policy.name, "mixed_float16") - def test_serialization(self): + def test_python_serialization(self): """Test builtin serialization methods.""" import copy import pickle @@ -91,6 +94,16 @@ def test_serialization(self): repr(copied_policy), '' ) + def test_serialization(self): + policy = DTypePolicy("mixed_float16") + config = serialize(policy) + reloaded_policy = deserialize(config) + self.assertEqual(policy.name, reloaded_policy.name) + + # Test `dtype_policies.get` + reloaded_policy = get(config) + self.assertEqual(policy.name, reloaded_policy.name) + class FloatDTypePolicyTest(test_case.TestCase): def test_initialization_valid_name(self): @@ -154,6 +167,16 @@ def test_get_config_from_config(self): new_policy = FloatDTypePolicy.from_config(config) self.assertEqual(new_policy.name, "mixed_float16") + def test_serialization(self): + policy = FloatDTypePolicy("mixed_float16") + config = serialize(policy) + reloaded_policy = deserialize(config) + self.assertEqual(policy.name, reloaded_policy.name) + + # Test `dtype_policies.get` + reloaded_policy = get(config) + self.assertEqual(policy.name, reloaded_policy.name) + class QuantizedDTypePolicyTest(test_case.TestCase, parameterized.TestCase): @parameterized.named_parameters( @@ -224,7 +247,7 @@ def test_get_config_from_config(self): '', ), ) - def test_serialization(self, name, repr_str): + def test_python_serialization(self, name, repr_str): import copy import pickle @@ -244,6 +267,16 @@ def test_serialization(self, name, repr_str): copied_policy = pickle.load(f) self.assertEqual(repr(copied_policy), repr_str) + def test_serialization(self): + policy = QuantizedDTypePolicy("int8_from_float32") + config = serialize(policy) + reloaded_policy = deserialize(config) + self.assertEqual(policy.name, reloaded_policy.name) + + # Test `dtype_policies.get` + reloaded_policy = get(config) + self.assertEqual(policy.name, reloaded_policy.name) + def test_properties_for_float8(self): policy = QuantizedFloat8DTypePolicy("float8_from_mixed_bfloat16") self.assertEqual(policy.amax_history_length, 1024) @@ -256,7 +289,7 @@ def test_invalid_properties_for_float8(self): with self.assertRaisesRegex(TypeError, "must be an integer."): QuantizedFloat8DTypePolicy("float8_from_float32", 512.0) - def test_serialization_for_float8(self): + def test_python_serialization_for_float8(self): import copy import pickle @@ -288,6 +321,22 @@ def test_serialization_for_float8(self): ) self.assertEqual(copied_policy.amax_history_length, 123) + def test_serialization_for_float8(self): + policy = QuantizedFloat8DTypePolicy("float8_from_mixed_float16") + config = serialize(policy) + reloaded_policy = deserialize(config) + self.assertEqual(policy.name, reloaded_policy.name) + self.assertEqual( + policy.amax_history_length, reloaded_policy.amax_history_length + ) + + # Test `dtype_policies.get` + reloaded_policy = get(config) + self.assertEqual(policy.name, reloaded_policy.name) + self.assertEqual( + policy.amax_history_length, reloaded_policy.amax_history_length + ) + @parameterized.named_parameters( ("int8_from_mixed_bfloat16", "int8_from_mixed_bfloat16"), ("float8_from_mixed_bfloat16", "float8_from_mixed_bfloat16"), diff --git a/keras/src/losses/__init__.py b/keras/src/losses/__init__.py index 9652ceb057bf..3f4ef8d0f693 100644 --- a/keras/src/losses/__init__.py +++ b/keras/src/losses/__init__.py @@ -135,8 +135,8 @@ def deserialize(name, custom_objects=None): Args: name: Loss configuration. custom_objects: Optional dictionary mapping names (strings) to custom - objects (classes and functions) to be considered during - deserialization. + objects (classes and functions) to be considered during + deserialization. Returns: A Keras `Loss` instance or a loss function. From afc92f5b33aaa8be5bec729a8398faec747b75ca Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Mon, 22 Apr 2024 19:40:35 +0800 Subject: [PATCH 048/101] Fix keys of `save_own_variables` and `load_own_variables` (#19581) --- keras/src/layers/convolutional/base_conv.py | 12 ++++--- keras/src/layers/core/dense.py | 40 +++++++++++---------- keras/src/layers/core/einsum_dense.py | 40 +++++++++++---------- keras/src/layers/core/embedding.py | 12 ++++--- 4 files changed, 60 insertions(+), 44 deletions(-) diff --git a/keras/src/layers/convolutional/base_conv.py b/keras/src/layers/convolutional/base_conv.py index 96a66e58e40c..ffb1e4a87805 100644 --- a/keras/src/layers/convolutional/base_conv.py +++ b/keras/src/layers/convolutional/base_conv.py @@ -307,9 +307,11 @@ def save_own_variables(self, store): # Do nothing if the layer isn't yet built if not self.built: return - store["0"] = self.kernel + target_variables = [self.kernel] if self.use_bias: - store["1"] = self.bias + target_variables.append(self.bias) + for i, variable in enumerate(target_variables): + store[str(i)] = variable def load_own_variables(self, store): if not self.lora_enabled: @@ -317,9 +319,11 @@ def load_own_variables(self, store): # Do nothing if the layer isn't yet built if not self.built: return - self._kernel.assign(store["0"]) + target_variables = [self._kernel] if self.use_bias: - self.bias.assign(store["1"]) + target_variables.append(self.bias) + for i, variable in enumerate(target_variables): + variable.assign(store[str(i)]) if self.lora_enabled: self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape)) self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape)) diff --git a/keras/src/layers/core/dense.py b/keras/src/layers/core/dense.py index e856feb98409..da32e01078b3 100644 --- a/keras/src/layers/core/dense.py +++ b/keras/src/layers/core/dense.py @@ -202,24 +202,26 @@ def save_own_variables(self, store): # The keys of the `store` will be saved as determined because the # default ordering will change after quantization kernel_value, kernel_scale = self._get_kernel_with_merged_lora() - store["0"] = kernel_value + target_variables = [kernel_value] if self.use_bias: - store["1"] = self.bias + target_variables.append(self.bias) if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): mode = self.dtype_policy.quantization_mode if mode == "int8": - store["2"] = kernel_scale + target_variables.append(kernel_scale) elif mode == "float8": - store["2"] = self.inputs_scale - store["3"] = self.inputs_amax_history - store["4"] = self.kernel_scale - store["5"] = self.kernel_amax_history - store["6"] = self.outputs_grad_scale - store["7"] = self.outputs_grad_amax_history + target_variables.append(self.inputs_scale) + target_variables.append(self.inputs_amax_history) + target_variables.append(self.kernel_scale) + target_variables.append(self.kernel_amax_history) + target_variables.append(self.outputs_grad_scale) + target_variables.append(self.outputs_grad_amax_history) else: raise NotImplementedError( self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) + for i, variable in enumerate(target_variables): + store[str(i)] = variable def load_own_variables(self, store): if not self.lora_enabled: @@ -229,24 +231,26 @@ def load_own_variables(self, store): return # The keys of the `store` will be saved as determined because the # default ordering will change after quantization - self._kernel.assign(store["0"]) + target_variables = [self._kernel] if self.use_bias: - self.bias.assign(store["1"]) + target_variables.append(self.bias) if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): mode = self.dtype_policy.quantization_mode if mode == "int8": - self.kernel_scale.assign(store["2"]) + target_variables.append(self.kernel_scale) elif mode == "float8": - self.inputs_scale.assign(store["2"]) - self.inputs_amax_history.assign(store["3"]) - self.kernel_scale.assign(store["4"]) - self.kernel_amax_history.assign(store["5"]) - self.outputs_grad_scale.assign(store["6"]) - self.outputs_grad_amax_history.assign(store["7"]) + target_variables.append(self.inputs_scale) + target_variables.append(self.inputs_amax_history) + target_variables.append(self.kernel_scale) + target_variables.append(self.kernel_amax_history) + target_variables.append(self.outputs_grad_scale) + target_variables.append(self.outputs_grad_amax_history) else: raise NotImplementedError( self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) + for i, variable in enumerate(target_variables): + variable.assign(store[str(i)]) if self.lora_enabled: self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape)) self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape)) diff --git a/keras/src/layers/core/einsum_dense.py b/keras/src/layers/core/einsum_dense.py index c872c75a92b8..bfc7eff4341f 100644 --- a/keras/src/layers/core/einsum_dense.py +++ b/keras/src/layers/core/einsum_dense.py @@ -257,24 +257,26 @@ def save_own_variables(self, store): # The keys of the `store` will be saved as determined because the # default ordering will change after quantization kernel_value, kernel_scale = self._get_kernel_with_merged_lora() - store["0"] = kernel_value + target_variables = [kernel_value] if self.bias is not None: - store["1"] = self.bias + target_variables.append(self.bias) if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): mode = self.dtype_policy.quantization_mode if mode == "int8": - store["2"] = kernel_scale + target_variables.append(kernel_scale) elif mode == "float8": - store["2"] = self.inputs_scale - store["3"] = self.inputs_amax_history - store["4"] = self.kernel_scale - store["5"] = self.kernel_amax_history - store["6"] = self.outputs_grad_scale - store["7"] = self.outputs_grad_amax_history + target_variables.append(self.inputs_scale) + target_variables.append(self.inputs_amax_history) + target_variables.append(self.kernel_scale) + target_variables.append(self.kernel_amax_history) + target_variables.append(self.outputs_grad_scale) + target_variables.append(self.outputs_grad_amax_history) else: raise NotImplementedError( self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) + for i, variable in enumerate(target_variables): + store[str(i)] = variable def load_own_variables(self, store): if not self.lora_enabled: @@ -284,24 +286,26 @@ def load_own_variables(self, store): return # The keys of the `store` will be saved as determined because the # default ordering will change after quantization - self._kernel.assign(store["0"]) + target_variables = [self._kernel] if self.bias is not None: - self.bias.assign(store["1"]) + target_variables.append(self.bias) if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): mode = self.dtype_policy.quantization_mode if mode == "int8": - self.kernel_scale.assign(store["2"]) + target_variables.append(self.kernel_scale) elif mode == "float8": - self.inputs_scale.assign(store["2"]) - self.inputs_amax_history.assign(store["3"]) - self.kernel_scale.assign(store["4"]) - self.kernel_amax_history.assign(store["5"]) - self.outputs_grad_scale.assign(store["6"]) - self.outputs_grad_amax_history.assign(store["7"]) + target_variables.append(self.inputs_scale) + target_variables.append(self.inputs_amax_history) + target_variables.append(self.kernel_scale) + target_variables.append(self.kernel_amax_history) + target_variables.append(self.outputs_grad_scale) + target_variables.append(self.outputs_grad_amax_history) else: raise NotImplementedError( self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) + for i, variable in enumerate(target_variables): + variable.assign(store[str(i)]) if self.lora_enabled: self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape)) self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape)) diff --git a/keras/src/layers/core/embedding.py b/keras/src/layers/core/embedding.py index 1807a86b125e..0de116379bd0 100644 --- a/keras/src/layers/core/embedding.py +++ b/keras/src/layers/core/embedding.py @@ -199,15 +199,17 @@ def save_own_variables(self, store): embeddings_value, embeddings_scale = ( self._get_embeddings_with_merged_lora() ) - store["0"] = embeddings_value + target_variables = [embeddings_value] if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): mode = self.dtype_policy.quantization_mode if mode == "int8": - store["1"] = embeddings_scale + target_variables.append(embeddings_scale) else: raise NotImplementedError( self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) + for i, variable in enumerate(target_variables): + store[str(i)] = variable def load_own_variables(self, store): if not self.lora_enabled: @@ -217,15 +219,17 @@ def load_own_variables(self, store): return # The keys of the `store` will be saved as determined because the # default ordering will change after quantization - self._embeddings.assign(store["0"]) + target_variables = [self._embeddings] if isinstance(self.dtype_policy, dtype_policies.QuantizedDTypePolicy): mode = self.dtype_policy.quantization_mode if mode == "int8": - self.embeddings_scale.assign(store["1"]) + target_variables.append(self.embeddings_scale) else: raise NotImplementedError( self.QUANTIZATION_MODE_ERROR_TEMPLATE.format(mode) ) + for i, variable in enumerate(target_variables): + variable.assign(store[str(i)]) if self.lora_enabled: self.lora_embeddings_a.assign( ops.zeros(self.lora_embeddings_a.shape) From d7f5d2957d74b89bbe2751ea1ef605a1387731cc Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 22 Apr 2024 09:38:54 -0700 Subject: [PATCH 049/101] Fix JAX CTC test. --- keras/src/backend/jax/nn.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 916ff885d6cd..9b612760db3d 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -782,7 +782,8 @@ def _prune_paths(paths, scores, masked): axis=0, fill_value=_pad, ) - unique_inverse = jnp.squeeze(unique_inverse, axis=1) + if len(unique_inverse.shape) >= 2: + unique_inverse = jnp.squeeze(unique_inverse, axis=1) emit_scores = jnp.where(masked, -jnp.inf, scores) mask_scores = jnp.where(masked, scores, -jnp.inf) @@ -842,7 +843,8 @@ def _decode_batch( axis=0, fill_value=_pad, ) - unique_inverse = jnp.squeeze(unique_inverse, axis=1) + if len(unique_inverse.shape) >= 2: + unique_inverse = jnp.squeeze(unique_inverse, axis=1) scores = _merge_scores(unique_inverse, scores) top_indices = jnp.argsort(scores)[-top_paths:][::-1] From 4919ea1702049a1cc9348f534656908204b3d9f3 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 22 Apr 2024 10:46:56 -0700 Subject: [PATCH 050/101] Fix loss_weights handling in single output case --- keras/src/trainers/compile_utils.py | 10 ++- keras/src/trainers/trainer_test.py | 114 +++++++++++++++++++++++++++- 2 files changed, 117 insertions(+), 7 deletions(-) diff --git a/keras/src/trainers/compile_utils.py b/keras/src/trainers/compile_utils.py index 729c26921336..5678f8cba151 100644 --- a/keras/src/trainers/compile_utils.py +++ b/keras/src/trainers/compile_utils.py @@ -413,10 +413,14 @@ def __init__( reduction="sum_over_batch_size", output_names=None, ): - if loss_weights and not isinstance(loss_weights, (list, tuple, dict)): + if loss_weights and not isinstance( + loss_weights, (list, tuple, dict, float) + ): raise ValueError( - "Expected `loss_weights` argument to be a list, tuple, or " - f"dict. Received instead: loss_weights={loss_weights} " + "Expected `loss_weights` argument to be a float " + "(single output case) or a list, tuple, or " + "dict (multiple output case). " + f"Received instead: loss_weights={loss_weights} " f"of type {type(loss_weights)}" ) self._user_loss = loss diff --git a/keras/src/trainers/trainer_test.py b/keras/src/trainers/trainer_test.py index 3aed3253ee90..a7ec7b9b59a7 100644 --- a/keras/src/trainers/trainer_test.py +++ b/keras/src/trainers/trainer_test.py @@ -90,7 +90,7 @@ def call(self, x): } -class ListModel(Trainer, layers.Layer): +class ListInputModel(Trainer, layers.Layer): def __init__(self, units): layers.Layer.__init__(self) Trainer.__init__(self) @@ -110,6 +110,25 @@ def call(self, x): return self.dense_1(x[0]) + self.dense_2(x[1]) +class ListOutputModel(Trainer, layers.Layer): + def __init__(self, units): + layers.Layer.__init__(self) + Trainer.__init__(self) + self.dense_1 = layers.Dense( + units, + use_bias=False, + kernel_initializer=initializers.Ones(), + ) + self.dense_2 = layers.Dense( + units, + use_bias=False, + kernel_initializer=initializers.Ones(), + ) + + def call(self, x): + return [self.dense_1(x), self.dense_2(x)] + + class TrainingTestingLayer(Trainer, layers.Layer): def __init__(self, **kwargs): layers.Layer.__init__(self, **kwargs) @@ -265,8 +284,8 @@ def test_fit_flow(self, run_eagerly, jit_compile, use_steps_per_epoch): self.assertIn("mean_squared_error", history) self.assertAllClose( history["mean_squared_error"], - [14.402393, 10.991339, 8.388159], - atol=6.1051628e-1, + [14.5, 11.5, 8.5], + atol=0.6, # TODO: abnormal results for certain configs. ) @parameterized.named_parameters( @@ -1164,7 +1183,7 @@ def metrics_one(y_true, y_pred): @pytest.mark.requires_trainable_backend def test_nested_inputs(self): - model = ListModel(units=2) + model = ListInputModel(units=2) out = model([np.ones((3, 2)), np.ones((3, 3))]) self.assertEqual(tuple(out.shape), (3, 2)) model.compile(optimizer="sgd", loss="mse", metrics=["mse"]) @@ -1420,6 +1439,93 @@ def compute_loss( history = model.fit(x, y) self.assertGreater(history.history["custom"][0], 0.0) + @pytest.mark.requires_trainable_backend + def test_loss_weights(self): + epochs = 3 + batch_size = 20 + dataset_size = batch_size * 2 + + # Single output case. + model = ExampleModel(units=3) + model.compile( + optimizer=optimizers.SGD(), + loss=losses.MeanSquaredError(), + metrics=[metrics.MeanSquaredError()], + loss_weights=0.2, + ) + x = np.ones((dataset_size, 4)) + y = np.zeros((dataset_size, 3)) + history = model.fit( + x, + y, + batch_size=batch_size, + epochs=epochs, + ) + history = history.history + self.assertIn("loss", history) + self.assertAllClose( + history["loss"], + [3.182979, 3.115617, 3.049681], + atol=1e-3, + ) + + # Dict output case. + model = StructModel(units=3) + model.compile( + optimizer=optimizers.SGD(), + loss={ + "y_one": losses.MeanSquaredError(), + "y_two": losses.MeanSquaredError(), + }, + metrics={ + "y_one": metrics.MeanSquaredError(), + "y_two": metrics.MeanSquaredError(), + }, + loss_weights={"y_one": 0.1, "y_two": 0.2}, + ) + x1 = np.ones((dataset_size, 4)) + x2 = np.ones((dataset_size, 4)) + y1 = np.zeros((dataset_size, 3)) + y2 = np.zeros((dataset_size, 3)) + history = model.fit( + {"x_one": x1, "x_two": x2}, + {"y_one": y1, "y_two": y2}, + batch_size=batch_size, + epochs=epochs, + ) + history = history.history + self.assertIn("loss", history) + self.assertAllClose( + history["loss"], + [4.778718, 4.694403, 4.611693], + atol=1e-3, + ) + + # List output case. + model = ListOutputModel(units=3) + model.compile( + optimizer=optimizers.SGD(), + loss=[losses.MeanSquaredError(), losses.MeanSquaredError()], + metrics=[metrics.MeanSquaredError(), metrics.MeanSquaredError()], + loss_weights=[0.1, 0.2], + ) + x = np.ones((dataset_size, 4)) + y1 = np.zeros((dataset_size, 3)) + y2 = np.zeros((dataset_size, 3)) + history = model.fit( + x, + [y1, y2], + batch_size=batch_size, + epochs=epochs, + ) + history = history.history + self.assertIn("loss", history) + self.assertAllClose( + history["loss"], + [4.778718, 4.694403, 4.611693], + atol=1e-3, + ) + class TrainerDistributeTest(testing.TestCase): @pytest.mark.skipif( From 99fdbf7859ee86f4330f8d353347ddaad14b85e7 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 22 Apr 2024 10:49:42 -0700 Subject: [PATCH 051/101] Fix JAX vectorize. --- keras/src/backend/jax/numpy.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/keras/src/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py index 9a048b300770..3e69cb0cf02f 100644 --- a/keras/src/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -1024,6 +1024,8 @@ def vstack(xs): def vectorize(pyfunc, *, excluded=None, signature=None): + if excluded is None: + excluded = set() return jnp.vectorize(pyfunc, excluded=excluded, signature=signature) From 33bf553148dfa9f806b89de0b9eec11497a19bc8 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 22 Apr 2024 15:43:27 -0700 Subject: [PATCH 052/101] Move _tf_keras directory to the root of the pip package. --- keras/src/version.py | 2 +- pip_build.py | 58 ++++++++++++++++++++++++++++++++++++++------ 2 files changed, 52 insertions(+), 8 deletions(-) diff --git a/keras/src/version.py b/keras/src/version.py index c0168d651dae..36b1de170aef 100644 --- a/keras/src/version.py +++ b/keras/src/version.py @@ -1,7 +1,7 @@ from keras.src.api_export import keras_export # Unique source of truth for the version number. -__version__ = "3.3.0" +__version__ = "3.3.1" @keras_export("keras.version") diff --git a/pip_build.py b/pip_build.py index 887c7119e269..ff3ddf78a6ca 100644 --- a/pip_build.py +++ b/pip_build.py @@ -60,6 +60,57 @@ def export_version_string(version, is_nightly=False, rc_index=None): f.write(init_contents) +def ignore_files(_, filenames): + return [f for f in filenames if f.endswith("_test.py")] + + +def copy_source_to_build_directory(root_path): + # Copy sources (`keras/` directory and setup files) to build + # directory + os.chdir(root_path) + os.mkdir(build_directory) + shutil.copytree( + package, os.path.join(build_directory, package), ignore=ignore_files + ) + for fname in to_copy: + shutil.copy(fname, os.path.join(f"{build_directory}", fname)) + os.chdir(build_directory) + + +def build(root_path, is_nightly=False, rc_index=None): + if os.path.exists(build_directory): + raise ValueError(f"Directory already exists: {build_directory}") + + try: + copy_source_to_build_directory(root_path) + create_legacy_directory() + from keras.src.version import __version__ # noqa: E402 + + export_version_string(__version__, is_nightly, rc_index) + return build_and_save_output(root_path, __version__) + finally: + # Clean up: remove the build directory (no longer needed) + shutil.rmtree(build_directory) + + +def create_legacy_directory(): + shutil.move(os.path.join("keras", "api", "_tf_keras"), "keras") + with open(os.path.join("keras", "api", "__init__.py")) as f: + contents = f.read() + contents = contents.replace("from keras.api import _tf_keras", "") + with open(os.path.join("keras", "api", "__init__.py"), "w") as f: + f.write(contents) + + with open(os.path.join("keras", "_tf_keras", "__init__.py")) as f: + contents = f.read() + contents = contents.replace( + "from keras.api._tf_keras import keras", + "from keras._tf_keras import keras", + ) + with open(os.path.join("keras", "_tf_keras", "__init__.py"), "w") as f: + f.write(contents) + + def build_and_save_output(root_path, __version__): # Build the package os.system("python3 -m build") @@ -85,13 +136,6 @@ def build_and_save_output(root_path, __version__): return whl_path -def build(root_path, is_nightly=False, rc_index=None): - from keras.src.version import __version__ # noqa: E402 - - export_version_string(__version__, is_nightly, rc_index) - return build_and_save_output(root_path, __version__) - - def install_whl(whl_fpath): print(f"Installing wheel file: {whl_fpath}") os.system(f"pip3 install {whl_fpath} --force-reinstall --no-dependencies") From 3ebb36fce26c37b5095853a605f07c5f18b57597 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 22 Apr 2024 16:36:26 -0700 Subject: [PATCH 053/101] One time fix to _tf_keras API. --- keras/api/__init__.py | 1 - keras/api/_tf_keras/keras/__init__.py | 21 ++- keras/api/_tf_keras/keras/backend/__init__.py | 131 ++++++++++++++++++ keras/api/_tf_keras/keras/layers/__init__.py | 12 ++ keras/api/_tf_keras/keras/losses/__init__.py | 23 ++- keras/api/_tf_keras/keras/metrics/__init__.py | 20 +++ .../_tf_keras/keras/preprocessing/__init__.py | 15 +- .../keras/preprocessing/image/__init__.py | 21 +++ .../keras/preprocessing/sequence/__init__.py | 11 ++ .../keras/preprocessing/text/__init__.py | 11 ++ keras/src/version.py | 2 +- pip_build.py | 5 +- 12 files changed, 261 insertions(+), 12 deletions(-) create mode 100644 keras/api/_tf_keras/keras/preprocessing/text/__init__.py diff --git a/keras/api/__init__.py b/keras/api/__init__.py index 1750a42e8699..334dc282386a 100644 --- a/keras/api/__init__.py +++ b/keras/api/__init__.py @@ -4,7 +4,6 @@ since your modifications would be overwritten. """ -from keras.api import _tf_keras from keras.api import activations from keras.api import applications from keras.api import backend diff --git a/keras/api/_tf_keras/keras/__init__.py b/keras/api/_tf_keras/keras/__init__.py index 334dc282386a..34744db7e718 100644 --- a/keras/api/_tf_keras/keras/__init__.py +++ b/keras/api/_tf_keras/keras/__init__.py @@ -6,7 +6,6 @@ from keras.api import activations from keras.api import applications -from keras.api import backend from keras.api import callbacks from keras.api import config from keras.api import constraints @@ -15,19 +14,15 @@ from keras.api import dtype_policies from keras.api import export from keras.api import initializers -from keras.api import layers from keras.api import legacy -from keras.api import losses from keras.api import metrics from keras.api import mixed_precision from keras.api import models from keras.api import ops from keras.api import optimizers -from keras.api import preprocessing from keras.api import quantizers from keras.api import random from keras.api import regularizers -from keras.api import saving from keras.api import tree from keras.api import utils from keras.src.backend.common.keras_tensor import KerasTensor @@ -37,6 +32,7 @@ from keras.src.backend.exports import name_scope from keras.src.dtype_policies.dtype_policy import DTypePolicy from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy +from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy from keras.src.initializers.initializer import Initializer from keras.src.layers.core.input_layer import Input from keras.src.layers.input_spec import InputSpec @@ -48,7 +44,20 @@ from keras.src.ops.function import Function from keras.src.ops.operation import Operation from keras.src.optimizers.optimizer import Optimizer +from keras.src.quantizers.quantizers import AbsMaxQuantizer from keras.src.quantizers.quantizers import Quantizer from keras.src.regularizers.regularizers import Regularizer -from keras.src.version import __version__ from keras.src.version import version + +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + + +from keras._tf_keras.keras import backend +from keras._tf_keras.keras import layers +from keras._tf_keras.keras import losses +from keras._tf_keras.keras import metrics +from keras._tf_keras.keras import preprocessing diff --git a/keras/api/_tf_keras/keras/backend/__init__.py b/keras/api/_tf_keras/keras/backend/__init__.py index 840bde6e4ded..8b5ff7ecbfd3 100644 --- a/keras/api/_tf_keras/keras/backend/__init__.py +++ b/keras/api/_tf_keras/keras/backend/__init__.py @@ -18,3 +18,134 @@ from keras.src.backend.config import set_floatx from keras.src.backend.config import set_image_data_format from keras.src.utils.naming import get_uid + +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + + +from keras.src.legacy.backend import abs +from keras.src.legacy.backend import all +from keras.src.legacy.backend import any +from keras.src.legacy.backend import arange +from keras.src.legacy.backend import argmax +from keras.src.legacy.backend import argmin +from keras.src.legacy.backend import batch_dot +from keras.src.legacy.backend import batch_flatten +from keras.src.legacy.backend import batch_get_value +from keras.src.legacy.backend import batch_normalization +from keras.src.legacy.backend import batch_set_value +from keras.src.legacy.backend import bias_add +from keras.src.legacy.backend import binary_crossentropy +from keras.src.legacy.backend import binary_focal_crossentropy +from keras.src.legacy.backend import cast +from keras.src.legacy.backend import cast_to_floatx +from keras.src.legacy.backend import categorical_crossentropy +from keras.src.legacy.backend import categorical_focal_crossentropy +from keras.src.legacy.backend import clip +from keras.src.legacy.backend import concatenate +from keras.src.legacy.backend import constant +from keras.src.legacy.backend import conv1d +from keras.src.legacy.backend import conv2d +from keras.src.legacy.backend import conv2d_transpose +from keras.src.legacy.backend import conv3d +from keras.src.legacy.backend import cos +from keras.src.legacy.backend import count_params +from keras.src.legacy.backend import ctc_batch_cost +from keras.src.legacy.backend import ctc_decode +from keras.src.legacy.backend import ctc_label_dense_to_sparse +from keras.src.legacy.backend import cumprod +from keras.src.legacy.backend import cumsum +from keras.src.legacy.backend import depthwise_conv2d +from keras.src.legacy.backend import dot +from keras.src.legacy.backend import dropout +from keras.src.legacy.backend import dtype +from keras.src.legacy.backend import elu +from keras.src.legacy.backend import equal +from keras.src.legacy.backend import eval +from keras.src.legacy.backend import exp +from keras.src.legacy.backend import expand_dims +from keras.src.legacy.backend import eye +from keras.src.legacy.backend import flatten +from keras.src.legacy.backend import foldl +from keras.src.legacy.backend import foldr +from keras.src.legacy.backend import gather +from keras.src.legacy.backend import get_value +from keras.src.legacy.backend import gradients +from keras.src.legacy.backend import greater +from keras.src.legacy.backend import greater_equal +from keras.src.legacy.backend import hard_sigmoid +from keras.src.legacy.backend import in_top_k +from keras.src.legacy.backend import int_shape +from keras.src.legacy.backend import is_sparse +from keras.src.legacy.backend import l2_normalize +from keras.src.legacy.backend import less +from keras.src.legacy.backend import less_equal +from keras.src.legacy.backend import log +from keras.src.legacy.backend import map_fn +from keras.src.legacy.backend import max +from keras.src.legacy.backend import maximum +from keras.src.legacy.backend import mean +from keras.src.legacy.backend import min +from keras.src.legacy.backend import minimum +from keras.src.legacy.backend import moving_average_update +from keras.src.legacy.backend import name_scope +from keras.src.legacy.backend import ndim +from keras.src.legacy.backend import not_equal +from keras.src.legacy.backend import one_hot +from keras.src.legacy.backend import ones +from keras.src.legacy.backend import ones_like +from keras.src.legacy.backend import permute_dimensions +from keras.src.legacy.backend import pool2d +from keras.src.legacy.backend import pool3d +from keras.src.legacy.backend import pow +from keras.src.legacy.backend import prod +from keras.src.legacy.backend import random_bernoulli +from keras.src.legacy.backend import random_normal +from keras.src.legacy.backend import random_normal_variable +from keras.src.legacy.backend import random_uniform +from keras.src.legacy.backend import random_uniform_variable +from keras.src.legacy.backend import relu +from keras.src.legacy.backend import repeat +from keras.src.legacy.backend import repeat_elements +from keras.src.legacy.backend import reshape +from keras.src.legacy.backend import resize_images +from keras.src.legacy.backend import resize_volumes +from keras.src.legacy.backend import reverse +from keras.src.legacy.backend import rnn +from keras.src.legacy.backend import round +from keras.src.legacy.backend import separable_conv2d +from keras.src.legacy.backend import set_value +from keras.src.legacy.backend import shape +from keras.src.legacy.backend import sigmoid +from keras.src.legacy.backend import sign +from keras.src.legacy.backend import sin +from keras.src.legacy.backend import softmax +from keras.src.legacy.backend import softplus +from keras.src.legacy.backend import softsign +from keras.src.legacy.backend import sparse_categorical_crossentropy +from keras.src.legacy.backend import spatial_2d_padding +from keras.src.legacy.backend import spatial_3d_padding +from keras.src.legacy.backend import sqrt +from keras.src.legacy.backend import square +from keras.src.legacy.backend import squeeze +from keras.src.legacy.backend import stack +from keras.src.legacy.backend import std +from keras.src.legacy.backend import stop_gradient +from keras.src.legacy.backend import sum +from keras.src.legacy.backend import switch +from keras.src.legacy.backend import tanh +from keras.src.legacy.backend import temporal_padding +from keras.src.legacy.backend import tile +from keras.src.legacy.backend import to_dense +from keras.src.legacy.backend import transpose +from keras.src.legacy.backend import truncated_normal +from keras.src.legacy.backend import update +from keras.src.legacy.backend import update_add +from keras.src.legacy.backend import update_sub +from keras.src.legacy.backend import var +from keras.src.legacy.backend import variable +from keras.src.legacy.backend import zeros +from keras.src.legacy.backend import zeros_like diff --git a/keras/api/_tf_keras/keras/layers/__init__.py b/keras/api/_tf_keras/keras/layers/__init__.py index a4e1bf9a6bbd..536269ce7862 100644 --- a/keras/api/_tf_keras/keras/layers/__init__.py +++ b/keras/api/_tf_keras/keras/layers/__init__.py @@ -193,3 +193,15 @@ from keras.src.utils.jax_layer import FlaxLayer from keras.src.utils.jax_layer import JaxLayer from keras.src.utils.torch_utils import TorchModuleWrapper + +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + + +from keras.src.legacy.layers import AlphaDropout +from keras.src.legacy.layers import RandomHeight +from keras.src.legacy.layers import RandomWidth +from keras.src.legacy.layers import ThresholdedReLU diff --git a/keras/api/_tf_keras/keras/losses/__init__.py b/keras/api/_tf_keras/keras/losses/__init__.py index ecaadddf6b7e..9a134077e032 100644 --- a/keras/api/_tf_keras/keras/losses/__init__.py +++ b/keras/api/_tf_keras/keras/losses/__init__.py @@ -8,9 +8,9 @@ from keras.src.losses import get from keras.src.losses import serialize from keras.src.losses.loss import Loss -from keras.src.losses.losses import CTC from keras.src.losses.losses import BinaryCrossentropy from keras.src.losses.losses import BinaryFocalCrossentropy +from keras.src.losses.losses import CTC from keras.src.losses.losses import CategoricalCrossentropy from keras.src.losses.losses import CategoricalFocalCrossentropy from keras.src.losses.losses import CategoricalHinge @@ -48,3 +48,24 @@ from keras.src.losses.losses import sparse_categorical_crossentropy from keras.src.losses.losses import squared_hinge from keras.src.losses.losses import tversky + +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + + +from keras.src.legacy.losses import Reduction +from keras.src.losses.losses import kl_divergence as KLD +from keras.src.losses.losses import kl_divergence as kld +from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence +from keras.src.losses.losses import log_cosh as logcosh +from keras.src.losses.losses import mean_absolute_error as MAE +from keras.src.losses.losses import mean_absolute_error as mae +from keras.src.losses.losses import mean_absolute_percentage_error as MAPE +from keras.src.losses.losses import mean_absolute_percentage_error as mape +from keras.src.losses.losses import mean_squared_error as MSE +from keras.src.losses.losses import mean_squared_error as mse +from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE +from keras.src.losses.losses import mean_squared_logarithmic_error as msle diff --git a/keras/api/_tf_keras/keras/metrics/__init__.py b/keras/api/_tf_keras/keras/metrics/__init__.py index dc59b32a46c3..9ab8a93de305 100644 --- a/keras/api/_tf_keras/keras/metrics/__init__.py +++ b/keras/api/_tf_keras/keras/metrics/__init__.py @@ -74,3 +74,23 @@ from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError from keras.src.metrics.regression_metrics import R2Score from keras.src.metrics.regression_metrics import RootMeanSquaredError + +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + + +from keras.src.losses.losses import kl_divergence as KLD +from keras.src.losses.losses import kl_divergence as kld +from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence +from keras.src.losses.losses import log_cosh as logcosh +from keras.src.losses.losses import mean_absolute_error as MAE +from keras.src.losses.losses import mean_absolute_error as mae +from keras.src.losses.losses import mean_absolute_percentage_error as MAPE +from keras.src.losses.losses import mean_absolute_percentage_error as mape +from keras.src.losses.losses import mean_squared_error as MSE +from keras.src.losses.losses import mean_squared_error as mse +from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE +from keras.src.losses.losses import mean_squared_logarithmic_error as msle diff --git a/keras/api/_tf_keras/keras/preprocessing/__init__.py b/keras/api/_tf_keras/keras/preprocessing/__init__.py index c9ed7fd664c2..fda4779d0903 100644 --- a/keras/api/_tf_keras/keras/preprocessing/__init__.py +++ b/keras/api/_tf_keras/keras/preprocessing/__init__.py @@ -4,10 +4,21 @@ since your modifications would be overwritten. """ -from keras.api.preprocessing import image -from keras.api.preprocessing import sequence +from keras._tf_keras.keras.preprocessing import image +from keras._tf_keras.keras.preprocessing import sequence from keras.src.utils.image_dataset_utils import image_dataset_from_directory from keras.src.utils.text_dataset_utils import text_dataset_from_directory from keras.src.utils.timeseries_dataset_utils import ( timeseries_dataset_from_array, ) + +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + + +from keras._tf_keras.keras.preprocessing import image +from keras._tf_keras.keras.preprocessing import sequence +from keras._tf_keras.keras.preprocessing import text diff --git a/keras/api/_tf_keras/keras/preprocessing/image/__init__.py b/keras/api/_tf_keras/keras/preprocessing/image/__init__.py index f68afe8789d5..070878f2956a 100644 --- a/keras/api/_tf_keras/keras/preprocessing/image/__init__.py +++ b/keras/api/_tf_keras/keras/preprocessing/image/__init__.py @@ -9,3 +9,24 @@ from keras.src.utils.image_utils import load_img from keras.src.utils.image_utils import save_img from keras.src.utils.image_utils import smart_resize + +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + + +from keras.src.legacy.preprocessing.image import DirectoryIterator +from keras.src.legacy.preprocessing.image import ImageDataGenerator +from keras.src.legacy.preprocessing.image import Iterator +from keras.src.legacy.preprocessing.image import NumpyArrayIterator +from keras.src.legacy.preprocessing.image import apply_affine_transform +from keras.src.legacy.preprocessing.image import apply_brightness_shift +from keras.src.legacy.preprocessing.image import apply_channel_shift +from keras.src.legacy.preprocessing.image import random_brightness +from keras.src.legacy.preprocessing.image import random_channel_shift +from keras.src.legacy.preprocessing.image import random_rotation +from keras.src.legacy.preprocessing.image import random_shear +from keras.src.legacy.preprocessing.image import random_shift +from keras.src.legacy.preprocessing.image import random_zoom diff --git a/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py b/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py index 188e01af9c48..fbfdfaffe360 100644 --- a/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py +++ b/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py @@ -5,3 +5,14 @@ """ from keras.src.utils.sequence_utils import pad_sequences + +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + + +from keras.src.legacy.preprocessing.sequence import TimeseriesGenerator +from keras.src.legacy.preprocessing.sequence import make_sampling_table +from keras.src.legacy.preprocessing.sequence import skipgrams diff --git a/keras/api/_tf_keras/keras/preprocessing/text/__init__.py b/keras/api/_tf_keras/keras/preprocessing/text/__init__.py new file mode 100644 index 000000000000..2e8799f3d5dd --- /dev/null +++ b/keras/api/_tf_keras/keras/preprocessing/text/__init__.py @@ -0,0 +1,11 @@ +"""DO NOT EDIT. + +This file was autogenerated. Do not edit it by hand, +since your modifications would be overwritten. +""" + +from keras.src.legacy.preprocessing.text import Tokenizer +from keras.src.legacy.preprocessing.text import hashing_trick +from keras.src.legacy.preprocessing.text import one_hot +from keras.src.legacy.preprocessing.text import text_to_word_sequence +from keras.src.legacy.preprocessing.text import tokenizer_from_json diff --git a/keras/src/version.py b/keras/src/version.py index 36b1de170aef..3f3890b17af5 100644 --- a/keras/src/version.py +++ b/keras/src/version.py @@ -1,7 +1,7 @@ from keras.src.api_export import keras_export # Unique source of truth for the version number. -__version__ = "3.3.1" +__version__ = "3.3.2" @keras_export("keras.version") diff --git a/pip_build.py b/pip_build.py index ff3ddf78a6ca..d0c73a27efa9 100644 --- a/pip_build.py +++ b/pip_build.py @@ -84,8 +84,11 @@ def build(root_path, is_nightly=False, rc_index=None): try: copy_source_to_build_directory(root_path) create_legacy_directory() - from keras.src.version import __version__ # noqa: E402 + print(os.getcwd()) + # from keras.src.version import __version__ # noqa: E402 + + __version__ = "3.3.2" export_version_string(__version__, is_nightly, rc_index) return build_and_save_output(root_path, __version__) finally: From a070252632b5c19eb06e7a95b18f2ef0c0550158 Mon Sep 17 00:00:00 2001 From: Surya <116063290+SuryanarayanaY@users.noreply.github.com> Date: Tue, 23 Apr 2024 23:14:31 +0530 Subject: [PATCH 054/101] Convert return type imdb.load_data to nparray (#19598) Convert return type imdb.load_data to Numpy array. Currently X_train and X-test returned as list. --- keras/src/datasets/imdb.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keras/src/datasets/imdb.py b/keras/src/datasets/imdb.py index a8b5537b111f..f38dfaf0a158 100644 --- a/keras/src/datasets/imdb.py +++ b/keras/src/datasets/imdb.py @@ -135,8 +135,8 @@ def load_data( xs = [[w for w in x if skip_top <= w < num_words] for x in xs] idx = len(x_train) - x_train, y_train = xs[:idx], labels[:idx] - x_test, y_test = xs[idx:], labels[idx:] + x_train, y_train = np.array(xs[:idx], dtype="object"), labels[:idx] + x_test, y_test = np.array(xs[idx:], dtype="object"), labels[idx:] return (x_train, y_train), (x_test, y_test) From 87dc083e97f63fcd96fe4285fe759fe9b93a8b94 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Tue, 23 Apr 2024 10:55:39 -0700 Subject: [PATCH 055/101] Fix typo --- SECURITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index 90853890d8b3..e2ccb038246c 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -59,7 +59,7 @@ Besides the virtual environment, the hardware (GPUs or TPUs) can also be attacke ## Reporting a Vulnerability -Beware that none of the topics under [Using Keras Securely](#using-Keras-securely) are considered vulnerabilities of Keras. +Beware that none of the topics under [Using Keras Securely](#using-keras-securely) are considered vulnerabilities of Keras. If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you From 1ac17cc143b48bdbb23c4bedb82f752601a4dddb Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Tue, 23 Apr 2024 12:56:13 -0500 Subject: [PATCH 056/101] fix api_gen.py for legacy (#19590) * fix api_gen.py for legacy * merge api and legacy for _tf_keras --- api_gen.py | 18 ++++++++- keras/api/__init__.py | 1 + keras/api/_tf_keras/keras/__init__.py | 22 +++-------- keras/api/_tf_keras/keras/backend/__init__.py | 10 +---- keras/api/_tf_keras/keras/layers/__init__.py | 15 ++------ keras/api/_tf_keras/keras/losses/__init__.py | 26 +++---------- keras/api/_tf_keras/keras/metrics/__init__.py | 38 ++++++------------- .../_tf_keras/keras/preprocessing/__init__.py | 16 ++------ .../keras/preprocessing/image/__init__.py | 18 +++------ .../keras/preprocessing/sequence/__init__.py | 10 +---- pip_build.py | 32 +++++++++------- 11 files changed, 72 insertions(+), 134 deletions(-) diff --git a/api_gen.py b/api_gen.py index 28fac8fa4f10..a38ff20fd23a 100644 --- a/api_gen.py +++ b/api_gen.py @@ -7,6 +7,7 @@ """ import os +import re import shutil import namex @@ -78,8 +79,7 @@ def create_legacy_directory(package_dir): for path in os.listdir(os.path.join(src_dir, "legacy")) if os.path.isdir(os.path.join(src_dir, "legacy", path)) ] - - for root, _, fnames in os.walk(os.path.join(package_dir, "_legacy")): + for root, _, fnames in os.walk(os.path.join(api_dir, "_legacy")): for fname in fnames: if fname.endswith(".py"): legacy_fpath = os.path.join(root, fname) @@ -110,6 +110,20 @@ def create_legacy_directory(package_dir): f"keras.api.{legacy_submodule}", f"keras.api._tf_keras.keras.{legacy_submodule}", ) + # Remove duplicate generated comments string. + legacy_contents = re.sub(r"\n", r"\\n", legacy_contents) + legacy_contents = re.sub('""".*"""', "", legacy_contents) + legacy_contents = re.sub(r"\\n", r"\n", legacy_contents) + # If the same module is in legacy and core_api, use legacy + legacy_imports = re.findall( + r"import (\w+)", legacy_contents + ) + for import_name in legacy_imports: + core_api_contents = re.sub( + f"\n.* import {import_name}\n", + r"\n", + core_api_contents, + ) legacy_contents = core_api_contents + "\n" + legacy_contents with open(tf_keras_fpath, "w") as f: f.write(legacy_contents) diff --git a/keras/api/__init__.py b/keras/api/__init__.py index 334dc282386a..1750a42e8699 100644 --- a/keras/api/__init__.py +++ b/keras/api/__init__.py @@ -4,6 +4,7 @@ since your modifications would be overwritten. """ +from keras.api import _tf_keras from keras.api import activations from keras.api import applications from keras.api import backend diff --git a/keras/api/_tf_keras/keras/__init__.py b/keras/api/_tf_keras/keras/__init__.py index 34744db7e718..5e0a72294736 100644 --- a/keras/api/_tf_keras/keras/__init__.py +++ b/keras/api/_tf_keras/keras/__init__.py @@ -15,7 +15,6 @@ from keras.api import export from keras.api import initializers from keras.api import legacy -from keras.api import metrics from keras.api import mixed_precision from keras.api import models from keras.api import ops @@ -25,6 +24,11 @@ from keras.api import regularizers from keras.api import tree from keras.api import utils +from keras.api._tf_keras.keras import backend +from keras.api._tf_keras.keras import layers +from keras.api._tf_keras.keras import losses +from keras.api._tf_keras.keras import metrics +from keras.api._tf_keras.keras import preprocessing from keras.src.backend.common.keras_tensor import KerasTensor from keras.src.backend.common.stateless_scope import StatelessScope from keras.src.backend.exports import Variable @@ -32,7 +36,6 @@ from keras.src.backend.exports import name_scope from keras.src.dtype_policies.dtype_policy import DTypePolicy from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy -from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy from keras.src.initializers.initializer import Initializer from keras.src.layers.core.input_layer import Input from keras.src.layers.input_spec import InputSpec @@ -44,20 +47,7 @@ from keras.src.ops.function import Function from keras.src.ops.operation import Operation from keras.src.optimizers.optimizer import Optimizer -from keras.src.quantizers.quantizers import AbsMaxQuantizer from keras.src.quantizers.quantizers import Quantizer from keras.src.regularizers.regularizers import Regularizer +from keras.src.version import __version__ from keras.src.version import version - -"""DO NOT EDIT. - -This file was autogenerated. Do not edit it by hand, -since your modifications would be overwritten. -""" - - -from keras._tf_keras.keras import backend -from keras._tf_keras.keras import layers -from keras._tf_keras.keras import losses -from keras._tf_keras.keras import metrics -from keras._tf_keras.keras import preprocessing diff --git a/keras/api/_tf_keras/keras/backend/__init__.py b/keras/api/_tf_keras/keras/backend/__init__.py index 8b5ff7ecbfd3..94ccc4bf3d85 100644 --- a/keras/api/_tf_keras/keras/backend/__init__.py +++ b/keras/api/_tf_keras/keras/backend/__init__.py @@ -17,15 +17,6 @@ from keras.src.backend.config import set_epsilon from keras.src.backend.config import set_floatx from keras.src.backend.config import set_image_data_format -from keras.src.utils.naming import get_uid - -"""DO NOT EDIT. - -This file was autogenerated. Do not edit it by hand, -since your modifications would be overwritten. -""" - - from keras.src.legacy.backend import abs from keras.src.legacy.backend import all from keras.src.legacy.backend import any @@ -149,3 +140,4 @@ from keras.src.legacy.backend import variable from keras.src.legacy.backend import zeros from keras.src.legacy.backend import zeros_like +from keras.src.utils.naming import get_uid diff --git a/keras/api/_tf_keras/keras/layers/__init__.py b/keras/api/_tf_keras/keras/layers/__init__.py index 536269ce7862..3d10d172b19e 100644 --- a/keras/api/_tf_keras/keras/layers/__init__.py +++ b/keras/api/_tf_keras/keras/layers/__init__.py @@ -157,7 +157,6 @@ from keras.src.layers.regularization.activity_regularization import ( ActivityRegularization, ) -from keras.src.layers.regularization.alpha_dropout import AlphaDropout from keras.src.layers.regularization.dropout import Dropout from keras.src.layers.regularization.gaussian_dropout import GaussianDropout from keras.src.layers.regularization.gaussian_noise import GaussianNoise @@ -190,18 +189,10 @@ from keras.src.layers.rnn.simple_rnn import SimpleRNNCell from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells from keras.src.layers.rnn.time_distributed import TimeDistributed -from keras.src.utils.jax_layer import FlaxLayer -from keras.src.utils.jax_layer import JaxLayer -from keras.src.utils.torch_utils import TorchModuleWrapper - -"""DO NOT EDIT. - -This file was autogenerated. Do not edit it by hand, -since your modifications would be overwritten. -""" - - from keras.src.legacy.layers import AlphaDropout from keras.src.legacy.layers import RandomHeight from keras.src.legacy.layers import RandomWidth from keras.src.legacy.layers import ThresholdedReLU +from keras.src.utils.jax_layer import FlaxLayer +from keras.src.utils.jax_layer import JaxLayer +from keras.src.utils.torch_utils import TorchModuleWrapper diff --git a/keras/api/_tf_keras/keras/losses/__init__.py b/keras/api/_tf_keras/keras/losses/__init__.py index 9a134077e032..832d78f5fda0 100644 --- a/keras/api/_tf_keras/keras/losses/__init__.py +++ b/keras/api/_tf_keras/keras/losses/__init__.py @@ -4,13 +4,14 @@ since your modifications would be overwritten. """ +from keras.src.legacy.losses import Reduction from keras.src.losses import deserialize from keras.src.losses import get from keras.src.losses import serialize from keras.src.losses.loss import Loss +from keras.src.losses.losses import CTC from keras.src.losses.losses import BinaryCrossentropy from keras.src.losses.losses import BinaryFocalCrossentropy -from keras.src.losses.losses import CTC from keras.src.losses.losses import CategoricalCrossentropy from keras.src.losses.losses import CategoricalFocalCrossentropy from keras.src.losses.losses import CategoricalHinge @@ -38,25 +39,6 @@ from keras.src.losses.losses import dice from keras.src.losses.losses import hinge from keras.src.losses.losses import huber -from keras.src.losses.losses import kl_divergence -from keras.src.losses.losses import log_cosh -from keras.src.losses.losses import mean_absolute_error -from keras.src.losses.losses import mean_absolute_percentage_error -from keras.src.losses.losses import mean_squared_error -from keras.src.losses.losses import mean_squared_logarithmic_error -from keras.src.losses.losses import poisson -from keras.src.losses.losses import sparse_categorical_crossentropy -from keras.src.losses.losses import squared_hinge -from keras.src.losses.losses import tversky - -"""DO NOT EDIT. - -This file was autogenerated. Do not edit it by hand, -since your modifications would be overwritten. -""" - - -from keras.src.legacy.losses import Reduction from keras.src.losses.losses import kl_divergence as KLD from keras.src.losses.losses import kl_divergence as kld from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence @@ -69,3 +51,7 @@ from keras.src.losses.losses import mean_squared_error as mse from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE from keras.src.losses.losses import mean_squared_logarithmic_error as msle +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.losses.losses import tversky diff --git a/keras/api/_tf_keras/keras/metrics/__init__.py b/keras/api/_tf_keras/keras/metrics/__init__.py index 9ab8a93de305..9b029f7aecbc 100644 --- a/keras/api/_tf_keras/keras/metrics/__init__.py +++ b/keras/api/_tf_keras/keras/metrics/__init__.py @@ -11,12 +11,18 @@ from keras.src.losses.losses import categorical_hinge from keras.src.losses.losses import hinge from keras.src.losses.losses import huber -from keras.src.losses.losses import kl_divergence -from keras.src.losses.losses import log_cosh -from keras.src.losses.losses import mean_absolute_error -from keras.src.losses.losses import mean_absolute_percentage_error -from keras.src.losses.losses import mean_squared_error -from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.losses.losses import kl_divergence as KLD +from keras.src.losses.losses import kl_divergence as kld +from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence +from keras.src.losses.losses import log_cosh as logcosh +from keras.src.losses.losses import mean_absolute_error as MAE +from keras.src.losses.losses import mean_absolute_error as mae +from keras.src.losses.losses import mean_absolute_percentage_error as MAPE +from keras.src.losses.losses import mean_absolute_percentage_error as mape +from keras.src.losses.losses import mean_squared_error as MSE +from keras.src.losses.losses import mean_squared_error as mse +from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE +from keras.src.losses.losses import mean_squared_logarithmic_error as msle from keras.src.losses.losses import poisson from keras.src.losses.losses import sparse_categorical_crossentropy from keras.src.losses.losses import squared_hinge @@ -74,23 +80,3 @@ from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError from keras.src.metrics.regression_metrics import R2Score from keras.src.metrics.regression_metrics import RootMeanSquaredError - -"""DO NOT EDIT. - -This file was autogenerated. Do not edit it by hand, -since your modifications would be overwritten. -""" - - -from keras.src.losses.losses import kl_divergence as KLD -from keras.src.losses.losses import kl_divergence as kld -from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence -from keras.src.losses.losses import log_cosh as logcosh -from keras.src.losses.losses import mean_absolute_error as MAE -from keras.src.losses.losses import mean_absolute_error as mae -from keras.src.losses.losses import mean_absolute_percentage_error as MAPE -from keras.src.losses.losses import mean_absolute_percentage_error as mape -from keras.src.losses.losses import mean_squared_error as MSE -from keras.src.losses.losses import mean_squared_error as mse -from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE -from keras.src.losses.losses import mean_squared_logarithmic_error as msle diff --git a/keras/api/_tf_keras/keras/preprocessing/__init__.py b/keras/api/_tf_keras/keras/preprocessing/__init__.py index fda4779d0903..737515c3696c 100644 --- a/keras/api/_tf_keras/keras/preprocessing/__init__.py +++ b/keras/api/_tf_keras/keras/preprocessing/__init__.py @@ -4,21 +4,11 @@ since your modifications would be overwritten. """ -from keras._tf_keras.keras.preprocessing import image -from keras._tf_keras.keras.preprocessing import sequence +from keras.api._tf_keras.keras.preprocessing import image +from keras.api._tf_keras.keras.preprocessing import sequence +from keras.api._tf_keras.keras.preprocessing import text from keras.src.utils.image_dataset_utils import image_dataset_from_directory from keras.src.utils.text_dataset_utils import text_dataset_from_directory from keras.src.utils.timeseries_dataset_utils import ( timeseries_dataset_from_array, ) - -"""DO NOT EDIT. - -This file was autogenerated. Do not edit it by hand, -since your modifications would be overwritten. -""" - - -from keras._tf_keras.keras.preprocessing import image -from keras._tf_keras.keras.preprocessing import sequence -from keras._tf_keras.keras.preprocessing import text diff --git a/keras/api/_tf_keras/keras/preprocessing/image/__init__.py b/keras/api/_tf_keras/keras/preprocessing/image/__init__.py index 070878f2956a..2ca54805acba 100644 --- a/keras/api/_tf_keras/keras/preprocessing/image/__init__.py +++ b/keras/api/_tf_keras/keras/preprocessing/image/__init__.py @@ -4,19 +4,6 @@ since your modifications would be overwritten. """ -from keras.src.utils.image_utils import array_to_img -from keras.src.utils.image_utils import img_to_array -from keras.src.utils.image_utils import load_img -from keras.src.utils.image_utils import save_img -from keras.src.utils.image_utils import smart_resize - -"""DO NOT EDIT. - -This file was autogenerated. Do not edit it by hand, -since your modifications would be overwritten. -""" - - from keras.src.legacy.preprocessing.image import DirectoryIterator from keras.src.legacy.preprocessing.image import ImageDataGenerator from keras.src.legacy.preprocessing.image import Iterator @@ -30,3 +17,8 @@ from keras.src.legacy.preprocessing.image import random_shear from keras.src.legacy.preprocessing.image import random_shift from keras.src.legacy.preprocessing.image import random_zoom +from keras.src.utils.image_utils import array_to_img +from keras.src.utils.image_utils import img_to_array +from keras.src.utils.image_utils import load_img +from keras.src.utils.image_utils import save_img +from keras.src.utils.image_utils import smart_resize diff --git a/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py b/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py index fbfdfaffe360..1f6388250b60 100644 --- a/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py +++ b/keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py @@ -4,15 +4,7 @@ since your modifications would be overwritten. """ -from keras.src.utils.sequence_utils import pad_sequences - -"""DO NOT EDIT. - -This file was autogenerated. Do not edit it by hand, -since your modifications would be overwritten. -""" - - from keras.src.legacy.preprocessing.sequence import TimeseriesGenerator from keras.src.legacy.preprocessing.sequence import make_sampling_table from keras.src.legacy.preprocessing.sequence import skipgrams +from keras.src.utils.sequence_utils import pad_sequences diff --git a/pip_build.py b/pip_build.py index d0c73a27efa9..52cc08747436 100644 --- a/pip_build.py +++ b/pip_build.py @@ -83,7 +83,7 @@ def build(root_path, is_nightly=False, rc_index=None): try: copy_source_to_build_directory(root_path) - create_legacy_directory() + move_tf_keras_directory() print(os.getcwd()) # from keras.src.version import __version__ # noqa: E402 @@ -96,22 +96,26 @@ def build(root_path, is_nightly=False, rc_index=None): shutil.rmtree(build_directory) -def create_legacy_directory(): - shutil.move(os.path.join("keras", "api", "_tf_keras"), "keras") - with open(os.path.join("keras", "api", "__init__.py")) as f: +def move_tf_keras_directory(): + """Move `keras/api/_tf_keras` to `keras/_tf_keras`, update references.""" + shutil.move(os.path.join(package, "api", "_tf_keras"), "keras") + with open(os.path.join(package, "api", "__init__.py")) as f: contents = f.read() contents = contents.replace("from keras.api import _tf_keras", "") - with open(os.path.join("keras", "api", "__init__.py"), "w") as f: - f.write(contents) - - with open(os.path.join("keras", "_tf_keras", "__init__.py")) as f: - contents = f.read() - contents = contents.replace( - "from keras.api._tf_keras import keras", - "from keras._tf_keras import keras", - ) - with open(os.path.join("keras", "_tf_keras", "__init__.py"), "w") as f: + with open(os.path.join(package, "api", "__init__.py"), "w") as f: f.write(contents) + # Replace `keras.api._tf_keras` with `keras._tf_keras`. + for root, _, fnames in os.walk(os.path.join(package, "_tf_keras")): + for fname in fnames: + if fname.endswith(".py"): + tf_keras_fpath = os.path.join(root, fname) + with open(tf_keras_fpath) as f: + contents = f.read() + contents = contents.replace( + "keras.api._tf_keras", "keras._tf_keras" + ) + with open(tf_keras_fpath, "w") as f: + f.write(contents) def build_and_save_output(root_path, __version__): From c61857d380d3bf5529421db65cb03a20aca969e0 Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Wed, 24 Apr 2024 04:36:14 +0800 Subject: [PATCH 057/101] Improve int8 for `Embedding` (#19595) --- keras/src/layers/core/embedding.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/keras/src/layers/core/embedding.py b/keras/src/layers/core/embedding.py index 0de116379bd0..bb85c3dd13e9 100644 --- a/keras/src/layers/core/embedding.py +++ b/keras/src/layers/core/embedding.py @@ -315,7 +315,6 @@ def _int8_build( embeddings_initializer="zeros", embeddings_scale_initializer="ones", ): - self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) self._embeddings = self.add_weight( name="embeddings", shape=(self.input_dim, self.output_dim), @@ -323,9 +322,12 @@ def _int8_build( dtype="int8", trainable=False, ) + # We choose to reduce the axis of `output_dim` because, typically, + # `input_dim` is larger than `output_dim`. This reduces quantization + # error. self.embeddings_scale = self.add_weight( name="embeddings_scale", - shape=(self.output_dim,), + shape=(self.input_dim,), initializer=embeddings_scale_initializer, trainable=False, ) @@ -345,11 +347,12 @@ def _int8_call(self, inputs): # not needed if backend.standardize_dtype(inputs.dtype) not in ("int32", "int64"): inputs = ops.cast(inputs, "int32") + embeddings_scale = ops.take(self.embeddings_scale, inputs, axis=0) outputs = ops.take(self._embeddings, inputs, axis=0) # De-scale outputs - outputs = ops.cast(outputs, self.compute_dtype) outputs = ops.divide( - outputs, ops.expand_dims(self.embeddings_scale, axis=0) + ops.cast(outputs, dtype=self.compute_dtype), + ops.expand_dims(embeddings_scale, axis=-1), ) if self.lora_enabled: lora_outputs = ops.take(self.lora_embeddings_a, inputs, axis=0) @@ -379,14 +382,12 @@ def quantize(self, mode): self._tracker.unlock() if mode == "int8": - # Configure `self.inputs_quantizer` - self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) # Quantize `self._embeddings` to int8 and compute corresponding # scale embeddings_value, embeddings_scale = quantizers.abs_max_quantize( - self._embeddings, axis=0 + self._embeddings, axis=-1 ) - embeddings_scale = ops.squeeze(embeddings_scale, axis=0) + embeddings_scale = ops.squeeze(embeddings_scale, axis=-1) self._untrack_variable(self._embeddings) del self._embeddings # Utilize a lambda expression as an initializer to prevent adding a @@ -412,15 +413,15 @@ def _get_embeddings_with_merged_lora(self): # Dequantize & quantize to merge lora weights into embeddings # Note that this is a lossy compression embeddings_value = ops.divide( - embeddings_value, embeddings_scale + embeddings_value, ops.expand_dims(embeddings_scale, axis=-1) ) embeddings_value = ops.add( embeddings_value, ops.matmul(self.lora_embeddings_a, self.lora_embeddings_b), ) embeddings_value, embeddings_scale = ( - quantizers.abs_max_quantize(embeddings_value, axis=0) + quantizers.abs_max_quantize(embeddings_value, axis=-1) ) - embeddings_scale = ops.squeeze(embeddings_scale, axis=0) + embeddings_scale = ops.squeeze(embeddings_scale, axis=-1) return embeddings_value, embeddings_scale return self.embeddings, None From d84d5ccd06392dbf224390595f17d78b0d182b19 Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Tue, 23 Apr 2024 15:36:35 -0500 Subject: [PATCH 058/101] pin torch < 2.3.0 (#19603) --- requirements-jax-cuda.txt | 2 +- requirements-tensorflow-cuda.txt | 2 +- requirements.txt | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/requirements-jax-cuda.txt b/requirements-jax-cuda.txt index 343026cf6ff4..e21c1cb1c5bc 100644 --- a/requirements-jax-cuda.txt +++ b/requirements-jax-cuda.txt @@ -3,7 +3,7 @@ tensorflow-cpu~=2.16.1 # Pin to TF 2.16 # Torch cpu-only version (needed for testing). --extra-index-url https://download.pytorch.org/whl/cpu -torch>=2.1.0 +torch>=2.1.0, <2.3.0 torchvision>=0.16.0 # Jax with cuda support. diff --git a/requirements-tensorflow-cuda.txt b/requirements-tensorflow-cuda.txt index 69be284f7661..f3b946ddcfee 100644 --- a/requirements-tensorflow-cuda.txt +++ b/requirements-tensorflow-cuda.txt @@ -3,7 +3,7 @@ tensorflow[and-cuda]~=2.16.1 # Pin to TF 2.16 # Torch cpu-only version (needed for testing). --extra-index-url https://download.pytorch.org/whl/cpu -torch>=2.1.0 +torch>=2.1.0, <2.3.0 torchvision>=0.16.0 # Jax cpu-only version (needed for testing). diff --git a/requirements.txt b/requirements.txt index bd34860fe0ce..c759c9d18156 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,8 +2,9 @@ tensorflow-cpu~=2.16.1 # Pin to TF 2.16 # Torch. +# TODO: Pin to < 2.3.0 (GitHub issue #19602) --extra-index-url https://download.pytorch.org/whl/cpu -torch>=2.1.0 +torch>=2.1.0, <2.3.0 torchvision>=0.16.0 # Jax. From 4e0a920ea106afc36a03158b4f1a8f5a70a8e39c Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Wed, 24 Apr 2024 11:53:10 +0800 Subject: [PATCH 059/101] Clean up duplicated `inputs_quantizer` (#19604) * Cleanup duplicated `inputs_quantizer` and add type check for `input_spec` and `supports_masking` * Revert setter --- keras/src/layers/core/dense.py | 2 -- keras/src/layers/core/einsum_dense.py | 4 ---- 2 files changed, 6 deletions(-) diff --git a/keras/src/layers/core/dense.py b/keras/src/layers/core/dense.py index da32e01078b3..8b78a2851543 100644 --- a/keras/src/layers/core/dense.py +++ b/keras/src/layers/core/dense.py @@ -557,8 +557,6 @@ def quantize(self, mode): self._tracker.unlock() if mode == "int8": - # Configure `self.inputs_quantizer` - self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1) # Quantize `self._kernel` to int8 and compute corresponding scale kernel_value, kernel_scale = quantizers.abs_max_quantize( self._kernel, axis=0 diff --git a/keras/src/layers/core/einsum_dense.py b/keras/src/layers/core/einsum_dense.py index bfc7eff4341f..a884171dec48 100644 --- a/keras/src/layers/core/einsum_dense.py +++ b/keras/src/layers/core/einsum_dense.py @@ -684,10 +684,6 @@ def quantize(self, mode): self._custom_gradient_equation, self._kernel_reverse_transpose_axes, ) = _analyze_quantization_info(self.equation, self.input_spec.ndim) - # Configure `self.inputs_quantizer` - self.inputs_quantizer = quantizers.AbsMaxQuantizer( - axis=self._input_reduced_axes - ) # Quantize `self._kernel` to int8 and compute corresponding scale kernel_value, kernel_scale = quantizers.abs_max_quantize( self._kernel, axis=self._kernel_reduced_axes From 8e521f0482d5d97f4e11d21151caf5e63135f438 Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Wed, 24 Apr 2024 11:58:36 -0500 Subject: [PATCH 060/101] output format changes and errors in github (#19608) --- .github/workflows/actions.yml | 4 ++-- .github/workflows/nightly.yml | 5 +++-- shell/api_gen.sh | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index 2d2e17e1e025..1c23197e4d17 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -115,6 +115,8 @@ jobs: pip install -r requirements.txt --progress-bar off --upgrade pip uninstall -y keras keras-nightly pip install -e "." --progress-bar off --upgrade + - name: Lint + run: bash shell/lint.sh - name: Check for API changes run: | bash shell/api_gen.sh @@ -124,5 +126,3 @@ jobs: echo "Please run shell/api_gen.sh to generate API." exit 1 fi - - name: Lint - run: bash shell/lint.sh diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 5edfd2c988b1..3c1f279af709 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -81,6 +81,8 @@ jobs: pip install -r requirements.txt --progress-bar off --upgrade pip uninstall -y keras keras-nightly pip install -e "." --progress-bar off --upgrade + - name: Lint + run: bash shell/lint.sh - name: Check for API changes run: | bash shell/api_gen.sh @@ -90,8 +92,7 @@ jobs: echo "Please run shell/api_gen.sh to generate API." exit 1 fi - - name: Lint - run: bash shell/lint.sh + nightly: name: Build Wheel file and upload diff --git a/shell/api_gen.sh b/shell/api_gen.sh index 92cf6c7c2471..389874b890a1 100755 --- a/shell/api_gen.sh +++ b/shell/api_gen.sh @@ -9,4 +9,4 @@ python3 "${base_dir}"/api_gen.py echo "Formatting api directory..." # Format API Files -bash "${base_dir}"/shell/format.sh > /dev/null 2>&1 +bash "${base_dir}"/shell/format.sh From be0681e31a1f8df21968273c1f1b393ecc952d21 Mon Sep 17 00:00:00 2001 From: Shivam Mishra <124146945+shmishra99@users.noreply.github.com> Date: Wed, 24 Apr 2024 22:29:36 +0530 Subject: [PATCH 061/101] Provide write permission to action for cache management. (#19606) --- .github/workflows/stale-issue-pr.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/stale-issue-pr.yaml b/.github/workflows/stale-issue-pr.yaml index a5c570dd780e..309760a07512 100644 --- a/.github/workflows/stale-issue-pr.yaml +++ b/.github/workflows/stale-issue-pr.yaml @@ -10,6 +10,7 @@ jobs: permissions: issues: write pull-requests: write + actions: write steps: - name: Awaiting response issues uses: actions/stale@v9 From 2767762688465216ad507c119e404ec8fd79a3e7 Mon Sep 17 00:00:00 2001 From: Luke Wood Date: Wed, 24 Apr 2024 14:01:09 -0400 Subject: [PATCH 062/101] Pickle support for all saveables (#19592) * Pickle support * Add keras pickleable mixin * Reformat * Implement pickle all over * reformat * Reformat * Keras saveable * Keras saveable * Keras saveable * Keras saveable * Keras saveable * obj_type * Update pickleable --- keras/src/layers/layer.py | 6 +++- keras/src/layers/layer_test.py | 8 ++++++ keras/src/losses/loss.py | 6 +++- keras/src/losses/loss_test.py | 7 +++++ keras/src/metrics/metric.py | 6 +++- keras/src/metrics/metric_test.py | 7 +++++ keras/src/models/functional.py | 3 ++ keras/src/models/model.py | 26 ------------------ keras/src/models/sequential.py | 3 ++ keras/src/models/sequential_test.py | 9 ++++++ keras/src/optimizers/base_optimizer.py | 6 +++- keras/src/optimizers/optimizer_test.py | 25 ++++++++++++++++- keras/src/saving/keras_saveable.py | 38 ++++++++++++++++++++++++++ keras/src/saving/saving_lib.py | 35 ++++++++++++------------ 14 files changed, 136 insertions(+), 49 deletions(-) create mode 100644 keras/src/saving/keras_saveable.py diff --git a/keras/src/layers/layer.py b/keras/src/layers/layer.py index 4f72ec4c0e93..7d67339b6321 100644 --- a/keras/src/layers/layer.py +++ b/keras/src/layers/layer.py @@ -36,6 +36,7 @@ from keras.src.layers import input_spec from keras.src.metrics.metric import Metric from keras.src.ops.operation import Operation +from keras.src.saving.keras_saveable import KerasSaveable from keras.src.utils import python_utils from keras.src.utils import summary_utils from keras.src.utils import traceback_utils @@ -56,7 +57,7 @@ @keras_export(["keras.Layer", "keras.layers.Layer"]) -class Layer(BackendLayer, Operation): +class Layer(BackendLayer, Operation, KerasSaveable): """This is the class from which all layers inherit. A layer is a callable object that takes as input one or more tensors and @@ -422,6 +423,9 @@ def build_from_config(self, config): self.build(**config["shapes_dict"]) self.built = True + def _obj_type(self): + return "Layer" + def add_variable( self, shape, diff --git a/keras/src/layers/layer_test.py b/keras/src/layers/layer_test.py index ad274da84af2..6bc93859abc7 100644 --- a/keras/src/layers/layer_test.py +++ b/keras/src/layers/layer_test.py @@ -1,3 +1,5 @@ +import pickle + import numpy as np import pytest @@ -11,6 +13,7 @@ class LayerTest(testing.TestCase): + def test_compute_output_spec(self): # Test that implementing compute_output_shape # is enough to make compute_output_spec work. @@ -957,3 +960,8 @@ def test_dtype_policy_setter(self): self.assertEqual(layer.dtype_policy.name, "mixed_float16") self.assertEqual(layer.dtype_policy.compute_dtype, "float16") self.assertEqual(layer.dtype_policy.variable_dtype, "float32") + + def test_pickle_layer(self): + layer = layers.Dense(2) + reloaded = pickle.loads(pickle.dumps(layer)) + self.assertEqual(layer.get_config(), reloaded.get_config()) diff --git a/keras/src/losses/loss.py b/keras/src/losses/loss.py index ba4c78ebc5a5..a35ecf7d48fe 100644 --- a/keras/src/losses/loss.py +++ b/keras/src/losses/loss.py @@ -2,11 +2,12 @@ from keras.src import ops from keras.src import tree from keras.src.api_export import keras_export +from keras.src.saving.keras_saveable import KerasSaveable from keras.src.utils.naming import auto_name @keras_export(["keras.Loss", "keras.losses.Loss"]) -class Loss: +class Loss(KerasSaveable): """Loss base class. To be implemented by subclasses: @@ -69,6 +70,9 @@ def get_config(self): def from_config(cls, config): return cls(**config) + def _obj_type(self): + return "Loss" + def standardize_reduction(reduction): allowed = {"sum_over_batch_size", "sum", None, "none"} diff --git a/keras/src/losses/loss_test.py b/keras/src/losses/loss_test.py index 1d5725ffd3a1..e438f7d882b0 100644 --- a/keras/src/losses/loss_test.py +++ b/keras/src/losses/loss_test.py @@ -1,3 +1,5 @@ +import pickle + import numpy as np import pytest @@ -226,6 +228,11 @@ def test_mixed_dtypes(self): loss, ) + def test_pickle(self): + loss = losses_module.get("mse") + loss = pickle.loads(pickle.dumps(loss)) + self.assertEqual(loss, losses_module.mean_squared_error) + def test_get_method(self): loss = losses_module.get("mse") self.assertEqual(loss, losses_module.mean_squared_error) diff --git a/keras/src/metrics/metric.py b/keras/src/metrics/metric.py index 27f39f94d799..91e50dab8963 100644 --- a/keras/src/metrics/metric.py +++ b/keras/src/metrics/metric.py @@ -2,12 +2,13 @@ from keras.src import initializers from keras.src import ops from keras.src.api_export import keras_export +from keras.src.saving.keras_saveable import KerasSaveable from keras.src.utils.naming import auto_name from keras.src.utils.tracking import Tracker @keras_export(["keras.Metric", "keras.metrics.Metric"]) -class Metric: +class Metric(KerasSaveable): """Encapsulates metric logic and state. Args: @@ -179,6 +180,9 @@ def stateless_reset_state(self): def dtype(self): return self._dtype + def _obj_type(self): + return "Metric" + def add_variable( self, shape, initializer, dtype=None, aggregation="sum", name=None ): diff --git a/keras/src/metrics/metric_test.py b/keras/src/metrics/metric_test.py index 346e6140c89b..90bd1a9b9a0e 100644 --- a/keras/src/metrics/metric_test.py +++ b/keras/src/metrics/metric_test.py @@ -1,3 +1,5 @@ +import pickle + import numpy as np from keras.src import backend @@ -165,6 +167,11 @@ def test_serialization(self): custom_objects={"ExampleMetric": ExampleMetric}, ) + def test_pickle(self): + metric = metrics_module.get("mse") + reloaded = pickle.loads(pickle.dumps(metric)) + self.assertIsInstance(reloaded, metrics_module.MeanSquaredError) + def test_get_method(self): metric = metrics_module.get("mse") self.assertIsInstance(metric, metrics_module.MeanSquaredError) diff --git a/keras/src/models/functional.py b/keras/src/models/functional.py index 20e2266e00e6..8151e5eb0893 100644 --- a/keras/src/models/functional.py +++ b/keras/src/models/functional.py @@ -178,6 +178,9 @@ def _lock_state(self): # functional DAG. pass + def _obj_type(self): + return "Functional" + @property def layers(self): layers = [] diff --git a/keras/src/models/model.py b/keras/src/models/model.py index 3ee5abd4c728..ca12459354f8 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -1,10 +1,8 @@ import inspect -import io import json import typing import warnings -import keras.src.saving.saving_lib as saving_lib from keras.src import backend from keras.src import utils from keras.src.api_export import keras_export @@ -350,30 +348,6 @@ def load_weights(self, filepath, skip_mismatch=False, **kwargs): self, filepath, skip_mismatch=skip_mismatch, **kwargs ) - # Note: renaming this function will cause old pickles to be broken. - # This is probably not a huge deal, as pickle should not be a recommended - # saving format -- it should only be supported for use with distributed - # computing frameworks. - @classmethod - def _unpickle_model(cls, bytesio): - # pickle is not safe regardless of what you do. - return saving_lib._load_model_from_fileobj( - bytesio, custom_objects=None, compile=True, safe_mode=False - ) - - def __reduce__(self): - """__reduce__ is used to customize the behavior of `pickle.pickle()`. - - The method returns a tuple of two elements: a function, and a list of - arguments to pass to that function. In this case we just leverage the - keras saving library.""" - buf = io.BytesIO() - saving_lib._save_model_to_fileobj(self, buf, "h5") - return ( - self._unpickle_model, - (buf,), - ) - def quantize(self, mode): """Quantize the weights of the model. diff --git a/keras/src/models/sequential.py b/keras/src/models/sequential.py index ecdaa4058e8d..9d7b8149e967 100644 --- a/keras/src/models/sequential.py +++ b/keras/src/models/sequential.py @@ -142,6 +142,9 @@ def _lock_state(self): # Unlike other layers, Sequential is mutable after build. pass + def _obj_type(self): + return "Sequential" + def build(self, input_shape=None): if not isinstance(input_shape, (tuple, list)): # Do not attempt to build if the model does not have a single diff --git a/keras/src/models/sequential_test.py b/keras/src/models/sequential_test.py index 12c0703ab45a..6ce5ff5f2c36 100644 --- a/keras/src/models/sequential_test.py +++ b/keras/src/models/sequential_test.py @@ -1,3 +1,5 @@ +import pickle + import numpy as np import pytest @@ -246,6 +248,13 @@ def test_functional_properties(self): self.assertEqual(model.input_shape, (None, 2)) self.assertEqual(model.output_shape, (None, 4)) + def test_pickleable(self): + model = Sequential(name="seq") + model.add(layers.Dense(4)) + + result = pickle.loads(pickle.dumps(model)) + assert len(result.layers) == 1 + def test_bad_layer(self): model = Sequential(name="seq") with self.assertRaisesRegex(ValueError, "Only instances of"): diff --git a/keras/src/optimizers/base_optimizer.py b/keras/src/optimizers/base_optimizer.py index 94ca0ea1ceed..6cb63ae97097 100644 --- a/keras/src/optimizers/base_optimizer.py +++ b/keras/src/optimizers/base_optimizer.py @@ -6,11 +6,12 @@ from keras.src import ops from keras.src.optimizers.schedules import learning_rate_schedule from keras.src.saving import serialization_lib +from keras.src.saving.keras_saveable import KerasSaveable from keras.src.utils import tracking from keras.src.utils.naming import auto_name -class BaseOptimizer: +class BaseOptimizer(KerasSaveable): def __init__( self, learning_rate, @@ -814,6 +815,9 @@ def finalize_variable_values(self, var_list): # optimizer. self._overwrite_model_variables_with_average_value(var_list) + def _obj_type(self): + return "Optimizer" + def get_config(self): """Returns the config of the optimizer. diff --git a/keras/src/optimizers/optimizer_test.py b/keras/src/optimizers/optimizer_test.py index 6ab982d25d2e..5706633a62ad 100644 --- a/keras/src/optimizers/optimizer_test.py +++ b/keras/src/optimizers/optimizer_test.py @@ -1,7 +1,9 @@ import os +import pickle import numpy as np import pytest +from absl.testing import parameterized from keras.src import backend from keras.src import constraints @@ -11,7 +13,7 @@ from keras.src import testing -class OptimizerTest(testing.TestCase): +class OptimizerTest(testing.TestCase, parameterized.TestCase): def test_iterations_counter(self): v = backend.Variable([[1.0, 2.0], [3.0, 4.0]]) grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]]) @@ -318,3 +320,24 @@ def test_setting_lr_to_callable_untracks_lr_var(self): adam.learning_rate, 4 ) self.assertLen(adam.variables, 1) + + @parameterized.parameters( + [ + ("adam",), + ("sgd",), + ("adamw",), + ("adagrad",), + ("rmsprop",), + ("adadelta",), + ("adamax",), + ("lion",), + ("nadam",), + ("ftrl",), + ("adafactor",), + ] + ) + def test_pickleable_optimizers(self, optimizer): + optimizer = optimizers.get(optimizer) + reloaded = pickle.loads(pickle.dumps(optimizer)) + + self.assertEqual(optimizer.get_config(), reloaded.get_config()) diff --git a/keras/src/saving/keras_saveable.py b/keras/src/saving/keras_saveable.py new file mode 100644 index 000000000000..7fc536b470cb --- /dev/null +++ b/keras/src/saving/keras_saveable.py @@ -0,0 +1,38 @@ +import io + + +class KerasSaveable: + # Note: renaming this function will cause old pickles to be broken. + # This is probably not a huge deal, as pickle should not be a recommended + # saving format -- it should only be supported for use with distributed + # computing frameworks. + + def _obj_type(self): + raise NotImplementedError( + "KerasSaveable subclases must provide an " + "implementation for `obj_type()`" + ) + + @classmethod + def _unpickle_model(cls, bytesio): + import keras.src.saving.saving_lib as saving_lib + + # pickle is not safe regardless of what you do. + return saving_lib._load_model_from_fileobj( + bytesio, custom_objects=None, compile=True, safe_mode=False + ) + + def __reduce__(self): + """__reduce__ is used to customize the behavior of `pickle.pickle()`. + + The method returns a tuple of two elements: a function, and a list of + arguments to pass to that function. In this case we just leverage the + keras saving library.""" + import keras.src.saving.saving_lib as saving_lib + + buf = io.BytesIO() + saving_lib._save_model_to_fileobj(self, buf, "h5") + return ( + self._unpickle_model, + (buf,), + ) diff --git a/keras/src/saving/saving_lib.py b/keras/src/saving/saving_lib.py index 7de68802e91c..accd9c02efd4 100644 --- a/keras/src/saving/saving_lib.py +++ b/keras/src/saving/saving_lib.py @@ -319,23 +319,16 @@ def _name_key(name): def _walk_trackable(trackable): - from keras.src.models import Functional - from keras.src.models import Sequential - - if isinstance(trackable, Sequential): - obj_type = "Sequential" - elif isinstance(trackable, Functional): - obj_type = "Functional" - elif isinstance(trackable, Layer): - obj_type = "Layer" - elif isinstance(trackable, Optimizer): - obj_type = "Optimizer" - elif isinstance(trackable, Metric): - obj_type = "Metric" - elif isinstance(trackable, Loss): - obj_type = "Loss" - else: - raise ValueError(f"Invalid obj_type: {obj_type}") + from keras.src.saving.keras_saveable import KerasSaveable + + if not isinstance(trackable, KerasSaveable): + raise ValueError( + "Expected `trackable` to be an " + "instance of `KerasSaveable`, but " + f"got {trackable=}." + ) + + obj_type = trackable._obj_type() attr_skiplist = get_attr_skiplist(obj_type) # Save all layers directly tracked by Sequential and Functional first. @@ -793,7 +786,13 @@ def get_attr_skiplist(obj_type): ref_obj = Loss() skiplist += dir(ref_obj) else: - raise ValueError(f"Invalid obj_type: {obj_type}") + raise ValueError( + f"get_attr_skiplist got invalid {obj_type=}. " + "Accepted values for `obj_type` are " + "['Layer', 'Functional', 'Sequential', 'Metric', " + "'Optimizer', 'Loss']" + ) + global_state.set_global_attribute( f"saving_attr_skiplist_{obj_type}", skiplist ) From 3382b31f114049ccf0d01ef1d1acad051a8d1bbb Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Wed, 24 Apr 2024 12:52:25 -0700 Subject: [PATCH 063/101] Saveable logic touchups --- keras/src/models/model.py | 4 +- keras/src/models/variable_mapping.py | 45 +++--- keras/src/saving/saving_lib.py | 200 +++++++++++++-------------- 3 files changed, 123 insertions(+), 126 deletions(-) diff --git a/keras/src/models/model.py b/keras/src/models/model.py index ca12459354f8..3039275c7500 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -7,7 +7,7 @@ from keras.src import utils from keras.src.api_export import keras_export from keras.src.layers.layer import Layer -from keras.src.models.variable_mapping import map_trackable_variables +from keras.src.models.variable_mapping import map_saveable_variables from keras.src.saving import saving_api from keras.src.trainers import trainer as base_trainer from keras.src.utils import summary_utils @@ -544,7 +544,7 @@ def from_config(cls, config, custom_objects=None): def _get_variable_map(self): store = {} - map_trackable_variables(self, store=store, visited_trackables=set()) + map_saveable_variables(self, store=store, visited_saveables=set()) return store diff --git a/keras/src/models/variable_mapping.py b/keras/src/models/variable_mapping.py index ed9deb7340ec..e06ea5b09395 100644 --- a/keras/src/models/variable_mapping.py +++ b/keras/src/models/variable_mapping.py @@ -2,24 +2,25 @@ from keras.src.metrics.metric import Metric from keras.src.optimizers.optimizer import Optimizer from keras.src.saving import saving_lib +from keras.src.saving.keras_saveable import KerasSaveable -def map_trackable_variables(trackable, store, visited_trackables): - # If the trackable has already been saved, skip it. - if id(trackable) in visited_trackables: +def map_saveable_variables(saveable, store, visited_saveables): + # If the saveable has already been seen, skip it. + if id(saveable) in visited_saveables: return - visited_trackables.add(id(trackable)) + visited_saveables.add(id(saveable)) variables = [] - if isinstance(trackable, Layer): + if isinstance(saveable, Layer): variables = ( - trackable._trainable_variables + trackable._non_trainable_variables + saveable._trainable_variables + saveable._non_trainable_variables ) - elif isinstance(trackable, Optimizer): - variables = trackable._variables - elif isinstance(trackable, Metric): - variables = trackable._variables + elif isinstance(saveable, Optimizer): + variables = saveable._variables + elif isinstance(saveable, Metric): + variables = saveable._variables for v in variables: if v.path in store: raise ValueError( @@ -31,30 +32,30 @@ def map_trackable_variables(trackable, store, visited_trackables): ) store[v.path] = v - # Recursively save state of children trackables (layers, optimizers, etc.) - for child_attr, child_obj in saving_lib._walk_trackable(trackable): - if saving_lib._is_keras_trackable(child_obj): - map_trackable_variables( + # Recursively save state of children saveables (layers, optimizers, etc.) + for child_attr, child_obj in saving_lib._walk_saveable(saveable): + if isinstance(child_obj, KerasSaveable): + map_saveable_variables( child_obj, store, - visited_trackables=visited_trackables, + visited_saveables=visited_saveables, ) elif isinstance(child_obj, (list, dict, tuple, set)): map_container_variables( child_obj, store, - visited_trackables=visited_trackables, + visited_saveables=visited_saveables, ) -def map_container_variables(container, store, visited_trackables): +def map_container_variables(container, store, visited_saveables): if isinstance(container, dict): container = list(container.values()) - for trackable in container: - if saving_lib._is_keras_trackable(trackable): - map_trackable_variables( - trackable, + for saveable in container: + if isinstance(saveable, KerasSaveable): + map_saveable_variables( + saveable, store, - visited_trackables=visited_trackables, + visited_saveables=visited_saveables, ) diff --git a/keras/src/saving/saving_lib.py b/keras/src/saving/saving_lib.py index accd9c02efd4..c16d2ffafb4a 100644 --- a/keras/src/saving/saving_lib.py +++ b/keras/src/saving/saving_lib.py @@ -41,16 +41,16 @@ def save_model(model, filepath, weights_format="h5"): The zip-based archive contains the following structure: - JSON-based configuration file (config.json): Records of model, layer, and - other trackables' configuration. - - H5-based trackable state files, found in respective directories, such as + other saveables' configuration. + - H5-based saveable state files, found in respective directories, such as model/states.npz, model/dense_layer/states.npz, etc. - Metadata file. - The states of Keras trackables (layers, optimizers, loss, and metrics) are + The states of Keras saveables (layers, optimizers, loss, and metrics) are automatically saved as long as they can be discovered through the attributes returned by `dir(Model)`. Typically, the state includes the variables - associated with the trackable, but some specially purposed layers may - contain more such as the vocabularies stored in the hashmaps. The trackables + associated with the saveable, but some specially purposed layers may + contain more such as the vocabularies stored in the hashmaps. The saveables define how their states are saved by exposing `save_state()` and `load_state()` APIs. @@ -129,7 +129,7 @@ def _save_model_to_fileobj(model, fileobj, weights_format): weights_store=weights_store, assets_store=asset_store, inner_path="", - visited_trackables=set(), + visited_saveables=set(), ) weights_store.close() asset_store.close() @@ -188,22 +188,22 @@ def _load_model_from_fileobj(fileobj, custom_objects, compile, safe_mode): else: asset_store = None - failed_trackables = set() + failed_saveables = set() error_msgs = {} _load_state( model, weights_store=weights_store, assets_store=asset_store, inner_path="", - visited_trackables=set(), - failed_trackables=failed_trackables, + visited_saveables=set(), + failed_saveables=failed_saveables, error_msgs=error_msgs, ) weights_store.close() if asset_store: asset_store.close() - if failed_trackables: + if failed_saveables: _raise_loading_failure(error_msgs) return model @@ -223,15 +223,15 @@ def save_weights_only(model, filepath, objects_to_skip=None): ) weights_store = H5IOStore(filepath, mode="w") if objects_to_skip is not None: - visited_trackables = set(id(o) for o in objects_to_skip) + visited_saveables = set(id(o) for o in objects_to_skip) else: - visited_trackables = set() + visited_saveables = set() _save_state( model, weights_store=weights_store, assets_store=None, inner_path="", - visited_trackables=visited_trackables, + visited_saveables=visited_saveables, ) weights_store.close() @@ -254,11 +254,11 @@ def load_weights_only( _VARS_FNAME + ".h5", archive=archive, mode="r" ) - failed_trackables = set() + failed_saveables = set() if objects_to_skip is not None: - visited_trackables = set(id(o) for o in objects_to_skip) + visited_saveables = set(id(o) for o in objects_to_skip) else: - visited_trackables = set() + visited_saveables = set() error_msgs = {} _load_state( model, @@ -266,25 +266,25 @@ def load_weights_only( assets_store=None, inner_path="", skip_mismatch=skip_mismatch, - visited_trackables=visited_trackables, - failed_trackables=failed_trackables, + visited_saveables=visited_saveables, + failed_saveables=failed_saveables, error_msgs=error_msgs, ) weights_store.close() if archive: archive.close() - if failed_trackables: + if failed_saveables: _raise_loading_failure(error_msgs, warn_only=skip_mismatch) def _raise_loading_failure(error_msgs, warn_only=False): first_key = list(error_msgs.keys())[0] - ex_trackable, ex_error = error_msgs[first_key] + ex_saveable, ex_error = error_msgs[first_key] msg = ( f"A total of {len(error_msgs)} objects could not " "be loaded. Example error message for " - f"object {ex_trackable}:\n\n" + f"object {ex_saveable}:\n\n" f"{ex_error}\n\n" "List of objects that could not be loaded:\n" f"{[x[0] for x in error_msgs.values()]}" @@ -318,30 +318,30 @@ def _name_key(name): return name -def _walk_trackable(trackable): +def _walk_saveable(saveable): from keras.src.saving.keras_saveable import KerasSaveable - if not isinstance(trackable, KerasSaveable): + if not isinstance(saveable, KerasSaveable): raise ValueError( - "Expected `trackable` to be an " + "Expected object to be an " "instance of `KerasSaveable`, but " - f"got {trackable=}." + f"got {saveable} of type {type(saveable)}" ) - obj_type = trackable._obj_type() + obj_type = saveable._obj_type() attr_skiplist = get_attr_skiplist(obj_type) # Save all layers directly tracked by Sequential and Functional first. # This helps avoid ordering concerns for subclassed Sequential or Functional # models with extra attributes--the internal Keras state take precedence. if obj_type in ("Sequential", "Functional"): - yield "layers", trackable.layers + yield "layers", saveable.layers - for child_attr in sorted(dir(trackable), key=lambda x: _name_key(x)): + for child_attr in sorted(dir(saveable), key=lambda x: _name_key(x)): if child_attr.startswith("__") or child_attr in attr_skiplist: continue try: - child_obj = getattr(trackable, child_attr) + child_obj = getattr(saveable, child_attr) except Exception: # Avoid raising the exception when visiting the attributes. continue @@ -349,26 +349,28 @@ def _walk_trackable(trackable): def _save_state( - trackable, + saveable, weights_store, assets_store, inner_path, - visited_trackables, + visited_saveables, ): - # If the trackable has already been saved, skip it. - if id(trackable) in visited_trackables: + from keras.src.saving.keras_saveable import KerasSaveable + + # If the saveable has already been saved, skip it. + if id(saveable) in visited_saveables: return - if hasattr(trackable, "save_own_variables") and weights_store: - trackable.save_own_variables(weights_store.make(inner_path)) - if hasattr(trackable, "save_assets") and assets_store: - trackable.save_assets(assets_store.make(inner_path)) + if hasattr(saveable, "save_own_variables") and weights_store: + saveable.save_own_variables(weights_store.make(inner_path)) + if hasattr(saveable, "save_assets") and assets_store: + saveable.save_assets(assets_store.make(inner_path)) - visited_trackables.add(id(trackable)) + visited_saveables.add(id(saveable)) - # Recursively save state of children trackables (layers, optimizers, etc.) - for child_attr, child_obj in _walk_trackable(trackable): - if _is_keras_trackable(child_obj): + # Recursively save state of children saveables (layers, optimizers, etc.) + for child_attr, child_obj in _walk_saveable(saveable): + if isinstance(child_obj, KerasSaveable): _save_state( child_obj, weights_store, @@ -376,7 +378,7 @@ def _save_state( inner_path=file_utils.join(inner_path, child_attr).replace( "\\", "/" ), - visited_trackables=visited_trackables, + visited_saveables=visited_saveables, ) elif isinstance(child_obj, (list, dict, tuple, set)): _save_container_state( @@ -386,55 +388,57 @@ def _save_state( inner_path=file_utils.join(inner_path, child_attr).replace( "\\", "/" ), - visited_trackables=visited_trackables, + visited_saveables=visited_saveables, ) def _load_state( - trackable, + saveable, weights_store, assets_store, inner_path, skip_mismatch=False, - visited_trackables=None, - failed_trackables=None, + visited_saveables=None, + failed_saveables=None, error_msgs=None, ): - if visited_trackables and id(trackable) in visited_trackables: + from keras.src.saving.keras_saveable import KerasSaveable + + if visited_saveables and id(saveable) in visited_saveables: return failure = False - if hasattr(trackable, "load_own_variables") and weights_store: - if skip_mismatch or failed_trackables is not None: + if hasattr(saveable, "load_own_variables") and weights_store: + if skip_mismatch or failed_saveables is not None: try: - trackable.load_own_variables(weights_store.get(inner_path)) + saveable.load_own_variables(weights_store.get(inner_path)) except Exception as e: - failed_trackables.add(id(trackable)) - error_msgs[id(trackable)] = trackable, e + failed_saveables.add(id(saveable)) + error_msgs[id(saveable)] = saveable, e failure = True else: - trackable.load_own_variables(weights_store.get(inner_path)) + saveable.load_own_variables(weights_store.get(inner_path)) - if hasattr(trackable, "load_assets") and assets_store: - if skip_mismatch or failed_trackables is not None: + if hasattr(saveable, "load_assets") and assets_store: + if skip_mismatch or failed_saveables is not None: try: - trackable.load_assets(assets_store.get(inner_path)) + saveable.load_assets(assets_store.get(inner_path)) except Exception as e: - failed_trackables.add(id(trackable)) - error_msgs[id(trackable)] = trackable, e + failed_saveables.add(id(saveable)) + error_msgs[id(saveable)] = saveable, e failure = True else: - trackable.load_assets(assets_store.get(inner_path)) + saveable.load_assets(assets_store.get(inner_path)) - if failed_trackables is not None: - currently_failed = len(failed_trackables) + if failed_saveables is not None: + currently_failed = len(failed_saveables) else: currently_failed = 0 - # Recursively load states for Keras trackables such as layers/optimizers. - for child_attr, child_obj in _walk_trackable(trackable): - if _is_keras_trackable(child_obj): + # Recursively load states for Keras saveables such as layers/optimizers. + for child_attr, child_obj in _walk_saveable(saveable): + if isinstance(child_obj, KerasSaveable): _load_state( child_obj, weights_store, @@ -443,8 +447,8 @@ def _load_state( "\\", "/" ), skip_mismatch=skip_mismatch, - visited_trackables=visited_trackables, - failed_trackables=failed_trackables, + visited_saveables=visited_saveables, + failed_saveables=failed_saveables, error_msgs=error_msgs, ) elif isinstance(child_obj, (list, dict, tuple, set)): @@ -456,48 +460,50 @@ def _load_state( "\\", "/" ), skip_mismatch=skip_mismatch, - visited_trackables=visited_trackables, - failed_trackables=failed_trackables, + visited_saveables=visited_saveables, + failed_saveables=failed_saveables, error_msgs=error_msgs, ) - if failed_trackables is not None: - newly_failed = len(failed_trackables) - currently_failed + if failed_saveables is not None: + newly_failed = len(failed_saveables) - currently_failed else: newly_failed = 0 if not failure: - if visited_trackables is not None and newly_failed <= 0: - visited_trackables.add(id(trackable)) - if id(trackable) in failed_trackables: - failed_trackables.remove(id(trackable)) - error_msgs.pop(id(trackable)) + if visited_saveables is not None and newly_failed <= 0: + visited_saveables.add(id(saveable)) + if id(saveable) in failed_saveables: + failed_saveables.remove(id(saveable)) + error_msgs.pop(id(saveable)) def _save_container_state( - container, weights_store, assets_store, inner_path, visited_trackables + container, weights_store, assets_store, inner_path, visited_saveables ): + from keras.src.saving.keras_saveable import KerasSaveable + used_names = {} if isinstance(container, dict): container = list(container.values()) - for trackable in container: - if _is_keras_trackable(trackable): - # Do NOT address the trackable via `trackable.name`, since + for saveable in container: + if isinstance(saveable, KerasSaveable): + # Do NOT address the saveable via `saveable.name`, since # names are usually autogenerated and thus not reproducible # (i.e. they may vary across two instances of the same model). - name = naming.to_snake_case(trackable.__class__.__name__) + name = naming.to_snake_case(saveable.__class__.__name__) if name in used_names: used_names[name] += 1 name = f"{name}_{used_names[name]}" else: used_names[name] = 0 _save_state( - trackable, + saveable, weights_store, assets_store, inner_path=file_utils.join(inner_path, name).replace("\\", "/"), - visited_trackables=visited_trackables, + visited_saveables=visited_saveables, ) @@ -507,30 +513,32 @@ def _load_container_state( assets_store, inner_path, skip_mismatch, - visited_trackables, - failed_trackables, + visited_saveables, + failed_saveables, error_msgs, ): + from keras.src.saving.keras_saveable import KerasSaveable + used_names = {} if isinstance(container, dict): container = list(container.values()) - for trackable in container: - if _is_keras_trackable(trackable): - name = naming.to_snake_case(trackable.__class__.__name__) + for saveable in container: + if isinstance(saveable, KerasSaveable): + name = naming.to_snake_case(saveable.__class__.__name__) if name in used_names: used_names[name] += 1 name = f"{name}_{used_names[name]}" else: used_names[name] = 0 _load_state( - trackable, + saveable, weights_store, assets_store, inner_path=file_utils.join(inner_path, name).replace("\\", "/"), skip_mismatch=skip_mismatch, - visited_trackables=visited_trackables, - failed_trackables=failed_trackables, + visited_saveables=visited_saveables, + failed_saveables=failed_saveables, error_msgs=error_msgs, ) @@ -797,15 +805,3 @@ def get_attr_skiplist(obj_type): f"saving_attr_skiplist_{obj_type}", skiplist ) return skiplist - - -def _is_keras_trackable(obj): - return isinstance( - obj, - ( - Layer, - Optimizer, - Metric, - Loss, - ), - ) From e123d898349fa768f515544d0b670d0c3d56bcba Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Wed, 24 Apr 2024 13:45:01 -0700 Subject: [PATCH 064/101] Add slogdet op. --- keras/src/backend/jax/numpy.py | 5 + keras/src/backend/numpy/numpy.py | 4 + keras/src/backend/tensorflow/numpy.py | 5 + keras/src/backend/torch/numpy.py | 5 + keras/src/ops/numpy.py | 179 +++++--------------------- keras/src/ops/numpy_test.py | 11 ++ 6 files changed, 64 insertions(+), 145 deletions(-) diff --git a/keras/src/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py index 3e69cb0cf02f..b202f35e1661 100644 --- a/keras/src/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -1162,3 +1162,8 @@ def correlate(x1, x2, mode="valid"): def select(condlist, choicelist, default=0): return jnp.select(condlist, choicelist, default=default) + + +def slogdet(x): + x = convert_to_tensor(x) + return tuple(jnp.linalg.slogdet(x)) diff --git a/keras/src/backend/numpy/numpy.py b/keras/src/backend/numpy/numpy.py index f3090a4d6163..939cf6caece3 100644 --- a/keras/src/backend/numpy/numpy.py +++ b/keras/src/backend/numpy/numpy.py @@ -1098,3 +1098,7 @@ def correlate(x1, x2, mode="valid"): def select(condlist, choicelist, default=0): return np.select(condlist, choicelist, default=default) + + +def slogdet(x): + return tuple(np.linalg.slogdet(x)) diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index f69b9883af06..60462d3c1997 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -2430,3 +2430,8 @@ def correlate(x1, x2, mode="valid"): def select(condlist, choicelist, default=0): return tf.experimental.numpy.select(condlist, choicelist, default=default) + + +def slogdet(x): + x = convert_to_tensor(x) + return tuple(tf.linalg.slogdet(x)) diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 9438ed07dac5..53b22a8db54c 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -1608,3 +1608,8 @@ def select(condlist, choicelist, default=0): for c, v in reversed(list(zip(condlist, choicelist))): out = torch.where(c, v, out) return out + + +def slogdet(x): + x = convert_to_tensor(x) + return tuple(torch.linalg.slogdet(x)) diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index 4ad40edc06da..82d99a165c72 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -1,148 +1,5 @@ -""" -MANIFEST: - -abs -absolute -add -all -amax -amin -append -arange -arccos -arccosh -arcsin -arcsinh -arctan -arctan2 -arctanh -argmax -argmin -argsort -array -average -bincount -broadcast_to -ceil -clip -concatenate -conj -conjugate -copy -correlate -cos -cosh -count_nonzero -cross -cumprod -cumsum -diag -diagonal -diff -digitize -divide -dot -dtype -einsum -empty -equal -exp -expand_dims -expm1 -eye -flip -floor -full -full_like -greater -greater_equal -hstack -identity -imag -interp -isclose -isfinite -isinf -isnan -less -less_equal -linspace -log -log10 -log1p -log2 -logaddexp -logical_and -logical_not -logical_or -logspace -matmul -max -maximum -mean -median -meshgrid -mgrid -min -minimum -mod -moveaxis -multiply -nan_to_num -ndim -nonzero -not_equal -ones -ones_like -outer -pad -percentile -power -prod -quantile -ravel -real -reciprocal -repeat -reshape -roll -round -sign -sin -sinh -size -sort -split -sqrt -square -squeeze -stack -std -subtract -sum -swapaxes -take -take_along_axis -tan -tanh -tensordot -tile -trace -transpose -tri -tril -triu -true_divide -vdot -vstack -where -zeros -zeros_like - - -""" - import builtins +import collections import re import numpy as np @@ -6247,7 +6104,7 @@ def __init__(self): super().__init__() def call(self, condlist, choicelist, default=0): - return backend.numpy.correlate(condlist, choicelist, default) + return backend.numpy.select(condlist, choicelist, default) def compute_output_spec(self, condlist, choicelist, default=0): first_element = choicelist[0] @@ -6303,3 +6160,35 @@ def select(condlist, choicelist, default=0): if any_symbolic_tensors(condlist + choicelist + [default]): return Select().symbolic_call(condlist, choicelist, default) return backend.numpy.select(condlist, choicelist, default) + + +class Slogdet(Operation): + def __init__(self): + super().__init__() + + def call(self, x): + return backend.numpy.slogdet(x) + + def compute_output_spec(self, x): + sign = KerasTensor((), dtype=x.dtype) + logabsdet = KerasTensor((), dtype=x.dtype) + return (sign, logabsdet) + + +@keras_export(["keras.ops.slogdet", "keras.ops.numpy.slogdet"]) +def slogdet(x): + """Compute the sign and natural logarithm of the determinant of a matrix. + + Args: + x: Input matrix. It must 2D and square. + + Returns: + A tuple `(sign, logabsdet)`. `sign` is a number representing + the sign of the determinant. For a real matrix, this is 1, 0, or -1. + For a complex matrix, this is a complex number with absolute value 1 + (i.e., it is on the unit circle), or else 0. + `logabsdet` is the natural log of the absolute value of the determinant. + """ + if any_symbolic_tensors((x,)): + return Slogdet().symbolic_call(x) + return backend.numpy.slogdet(x) diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index f2046c3907d1..5df2980fe475 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -4250,6 +4250,17 @@ def test_select(self): y = knp.select(condlist, choicelist, 42) self.assertEqual(y.shape, (6,)) + def test_slogdet(self): + x = np.ones((4, 4)) * 2.0 + out = knp.slogdet(x) + self.assertAllClose(out[0], 0) + self.assertAllClose(out[0], 0) + + x = backend.KerasTensor((3, 3)) + out = knp.slogdet(x) + self.assertEqual(out[0].shape, ()) + self.assertEqual(out[1].shape, ()) + def test_nan_to_num(self): x = knp.array([1.0, np.nan, np.inf, -np.inf]) self.assertAllClose( From c3c035d5fc72e37deb1d5fe8c20d8ab4608bdf55 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Wed, 24 Apr 2024 14:14:23 -0700 Subject: [PATCH 065/101] Update APIs --- keras/api/_tf_keras/keras/ops/__init__.py | 1 + keras/api/_tf_keras/keras/ops/numpy/__init__.py | 1 + keras/api/ops/__init__.py | 1 + keras/api/ops/numpy/__init__.py | 1 + 4 files changed, 4 insertions(+) diff --git a/keras/api/_tf_keras/keras/ops/__init__.py b/keras/api/_tf_keras/keras/ops/__init__.py index dec15361c0ae..118c5a692eea 100644 --- a/keras/api/_tf_keras/keras/ops/__init__.py +++ b/keras/api/_tf_keras/keras/ops/__init__.py @@ -196,6 +196,7 @@ from keras.src.ops.numpy import sin from keras.src.ops.numpy import sinh from keras.src.ops.numpy import size +from keras.src.ops.numpy import slogdet from keras.src.ops.numpy import sort from keras.src.ops.numpy import split from keras.src.ops.numpy import sqrt diff --git a/keras/api/_tf_keras/keras/ops/numpy/__init__.py b/keras/api/_tf_keras/keras/ops/numpy/__init__.py index 7cb19b551198..d0ca72fa9d59 100644 --- a/keras/api/_tf_keras/keras/ops/numpy/__init__.py +++ b/keras/api/_tf_keras/keras/ops/numpy/__init__.py @@ -117,6 +117,7 @@ from keras.src.ops.numpy import sin from keras.src.ops.numpy import sinh from keras.src.ops.numpy import size +from keras.src.ops.numpy import slogdet from keras.src.ops.numpy import sort from keras.src.ops.numpy import split from keras.src.ops.numpy import sqrt diff --git a/keras/api/ops/__init__.py b/keras/api/ops/__init__.py index dec15361c0ae..118c5a692eea 100644 --- a/keras/api/ops/__init__.py +++ b/keras/api/ops/__init__.py @@ -196,6 +196,7 @@ from keras.src.ops.numpy import sin from keras.src.ops.numpy import sinh from keras.src.ops.numpy import size +from keras.src.ops.numpy import slogdet from keras.src.ops.numpy import sort from keras.src.ops.numpy import split from keras.src.ops.numpy import sqrt diff --git a/keras/api/ops/numpy/__init__.py b/keras/api/ops/numpy/__init__.py index 7cb19b551198..d0ca72fa9d59 100644 --- a/keras/api/ops/numpy/__init__.py +++ b/keras/api/ops/numpy/__init__.py @@ -117,6 +117,7 @@ from keras.src.ops.numpy import sin from keras.src.ops.numpy import sinh from keras.src.ops.numpy import size +from keras.src.ops.numpy import slogdet from keras.src.ops.numpy import sort from keras.src.ops.numpy import split from keras.src.ops.numpy import sqrt From 7e22cadb8d75f84dff09e2354496ecae353c7d03 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Wed, 24 Apr 2024 14:29:24 -0700 Subject: [PATCH 066/101] Remove unused import --- keras/src/ops/numpy.py | 1 - 1 file changed, 1 deletion(-) diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index 82d99a165c72..62428df7cb9b 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -1,5 +1,4 @@ import builtins -import collections import re import numpy as np From 63586fa698cad7005f561fcdbb5ce590fb2484b1 Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Fri, 26 Apr 2024 00:57:01 +0800 Subject: [PATCH 067/101] Refactor CTC APIs (#19611) * Add `ctc_loss` and `ctc_decode` for numpy backend, improve imports and tests * Support "beam_search" strategy for torch's `ctc_decode` * Improve `ctc_loss` * Cleanup * Refactor `ctc_decode` * Update docstring * Update docstring * Add `CTCDecode` operation and ensure dtype inference of `ctc_decode` * Fix `name` of `losses.CTC` --- keras/src/backend/jax/nn.py | 250 ++++++++++--------- keras/src/backend/numpy/nn.py | 380 +++++++++++++++++++++++++++-- keras/src/backend/tensorflow/nn.py | 95 ++++---- keras/src/backend/torch/nn.py | 113 +++++++-- keras/src/losses/losses.py | 2 +- keras/src/ops/nn.py | 85 +++++-- keras/src/ops/nn_test.py | 194 ++++++++++++--- 7 files changed, 866 insertions(+), 253 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 9b612760db3d..7fc623d831cb 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1,16 +1,16 @@ +import builtins +import math + import jax import jax.experimental.sparse as jax_sparse import jax.numpy as jnp -import numpy as np from jax import lax from jax import nn as jnn -from keras.src.backend import standardize_data_format -from keras.src.backend import standardize_dtype +from keras.src import backend from keras.src.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_jax, ) -from keras.src.backend.config import epsilon from keras.src.backend.jax.core import cast from keras.src.backend.jax.core import convert_to_tensor @@ -157,7 +157,7 @@ def max_pool( padding="valid", data_format=None, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = inputs.ndim - 2 pool_size = _convert_to_spatial_operand( pool_size, num_spatial_dims, data_format @@ -176,7 +176,7 @@ def average_pool( padding, data_format=None, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = inputs.ndim - 2 pool_size = _convert_to_spatial_operand( pool_size, num_spatial_dims, data_format @@ -189,7 +189,7 @@ def average_pool( pooled = _pool(inputs, 0.0, lax.add, pool_size, strides, padding) if padding == "valid": # Avoid the extra reduce_window. - return pooled / np.prod(pool_size) + return pooled / math.prod(pool_size) else: # Count the number of valid entries at each input point, then use that # for computing average. Assumes that any two arrays of same shape will @@ -242,7 +242,7 @@ def conv( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = inputs.ndim - 2 dimension_numbers = _convert_to_lax_conv_dimension_numbers( num_spatial_dims, @@ -292,7 +292,7 @@ def depthwise_conv( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = inputs.ndim - 2 dimension_numbers = _convert_to_lax_conv_dimension_numbers( num_spatial_dims, @@ -338,7 +338,7 @@ def separable_conv( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) depthwise_conv_output = depthwise_conv( inputs, depthwise_kernel, @@ -366,7 +366,7 @@ def conv_transpose( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = inputs.ndim - 2 padding_values = compute_conv_transpose_padding_args_for_jax( input_shape=inputs.shape, @@ -477,7 +477,7 @@ def categorical_crossentropy(target, output, from_logits=False, axis=-1): log_prob = jax.nn.log_softmax(output, axis=axis) else: output = output / jnp.sum(output, axis, keepdims=True) - output = jnp.clip(output, epsilon(), 1.0 - epsilon()) + output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) log_prob = jnp.log(output) return -jnp.sum(target * log_prob, axis=axis) @@ -504,7 +504,7 @@ def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): log_prob = jax.nn.log_softmax(output, axis=axis) else: output = output / jnp.sum(output, axis, keepdims=True) - output = jnp.clip(output, epsilon(), 1.0 - epsilon()) + output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) log_prob = jnp.log(output) target = jnn.one_hot(target, output.shape[axis], axis=axis) return -jnp.sum(target * log_prob, axis=axis) @@ -526,7 +526,7 @@ def binary_crossentropy(target, output, from_logits=False): log_neg_logits = jax.nn.log_sigmoid(-output) return -1.0 * target * log_logits - (1.0 - target) * log_neg_logits - output = jnp.clip(output, epsilon(), 1.0 - epsilon()) + output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) bce = target * jnp.log(output) bce += (1.0 - target) * jnp.log(1.0 - output) return -bce @@ -541,7 +541,7 @@ def moments(x, axes, keepdims=False, synchronized=False): # workaround, we simply perform the operations on float32 and convert back # to float16 need_cast = False - ori_dtype = standardize_dtype(x.dtype) + ori_dtype = backend.standardize_dtype(x.dtype) if ori_dtype in ("float16", "bfloat16"): need_cast = True x = cast(x, "float32") @@ -586,129 +586,159 @@ def batch_normalization( return jnp.add(x * inv, res) -def ctc_loss( - target, - output, - target_length, - output_length, - mask_index=0, -): - batch_size, _, _ = output.shape - batch_size, max_target_length = target.shape +def ctc_loss(target, output, target_length, output_length, mask_index=0): + # Ref: https://github.com/google-deepmind/optax + # optax.ctc_loss_with_forward_probs + target = convert_to_tensor(target, dtype="int32") + output = convert_to_tensor(output) + target_length = convert_to_tensor(target_length, "int32") + output_length = convert_to_tensor(output_length, "int32") + batch_size, _, num_classes = output.shape + batch_size, max_label_length = target.shape + log_epsilon = -1e5 + + # Ensure that the dtype promotion behavior matchs that of `tf.nn.ctc_loss` + dtype = backend.result_type(output.dtype, "float32") + output = cast(output, dtype) + + def _lengths_to_paddings(lengths, max_length): + indices = jnp.arange(max_length).reshape( + (1,) * lengths.ndim + (max_length,) + ) + lengths = jnp.expand_dims(lengths, axis=-1) + elem_valid = indices < lengths + return jnp.logical_not(elem_valid) - output = output.transpose((1, 0, 2)) - target = target.transpose((1, 0)).astype("int32") + target_paddings = _lengths_to_paddings(target_length, max_label_length) + output_paddings = _lengths_to_paddings(output_length, max_label_length) + target_paddings = target_paddings.astype(output.dtype) + output_paddings = output_paddings.astype(output.dtype) - logits = jnn.log_softmax(output) - mgrid_t, mgrid_b = jnp.meshgrid( - jnp.arange(max_target_length), jnp.arange(batch_size) + logprobs = jnn.log_softmax(output) + label_lengths = max_label_length - jnp.sum(target_paddings, axis=1).astype( + jnp.int32 ) - logprobs_emit = logits[mgrid_t, mgrid_b, target[:, :, None]] - logprobs_mask = logits[:, :, mask_index] - logit_paddings = jnp.array( - jnp.arange(max_target_length) < output_length[:, None], - dtype=jnp.float32, - ) + # repeat[b, n] == 1.0 when label[b, n] == label[b, n+1]. + repeat = (target[:, :-1] == target[:, 1:]).astype(jnp.float32) + repeat = jnp.pad(repeat, ((0, 0), (0, 1))) - repeat = jnp.array(target[1:] == target[:-1]) - repeat = jnp.pad(repeat, ((0, 1), (0, 0))).transpose((1, 0)) + logprobs_phi = logprobs[:, :, mask_index : mask_index + 1] # [B, T, 1] + logprobs_phi = jnp.transpose(logprobs_phi, (1, 0, 2)) # [T, B, 1] - _logepsilon = -100000.0 + _one_hot = jax.nn.one_hot(target, num_classes=num_classes) # [B, N, K] + logprobs_emit = jnp.einsum("btk,bnk->btn", logprobs, _one_hot) + logprobs_emit = jnp.transpose(logprobs_emit, (1, 0, 2)) # [T, B, N] - def _iterate(prev, x): - prev_mask, prev_emit = prev - logprob_mask, logprob_emit, pad = x + # [B, N] + logalpha_phi_init = ( + jnp.ones((batch_size, max_label_length + 1), dtype=output.dtype) + * log_epsilon + ) + logalpha_phi_init = logalpha_phi_init.at[:, 0].set(0.0) + logalpha_emit_init = ( + jnp.ones((batch_size, max_label_length), dtype=output.dtype) + * log_epsilon + ) - prev_mask_orig = prev_mask - prev_mask = prev_mask.at[:, 1:].set( - jnp.logaddexp(prev_mask[:, 1:], prev_emit + _logepsilon * repeat), - ) - emit = jnp.logaddexp( - prev_mask[:, :-1] + logprob_emit, prev_emit + logprob_emit + def update_phi_score(phi, added_score): + # Update `phi[:, 1:]`` with adding `added_score` in log space. + return jnp.concatenate( + [phi[:, :1], jnp.logaddexp(phi[:, 1:], added_score)], axis=-1 ) - mask = prev_mask + logprob_mask[:, None] - mask = mask.at[:, 1:].set( - jnp.logaddexp( - mask[:, 1:], - prev_emit + logprob_mask[:, None] + _logepsilon * (1 - repeat), - ) - ) + def loop_body(prev, x): + prev_phi, prev_emit = prev + # emit-to-phi epsilon transition, except if the next label is repetition + prev_phi_orig = prev_phi + prev_phi = update_phi_score(prev_phi, prev_emit + log_epsilon * repeat) - pad = pad[:, None] - emit = emit * pad + prev_emit * (1 - pad) - mask = mask * pad + prev_mask_orig * (1 - pad) + logprob_emit, logprob_phi, pad = x - return (mask, emit), (mask, emit) + # phi-to-emit transition + next_emit = jnp.logaddexp( + prev_phi[:, :-1] + logprob_emit, prev_emit + logprob_emit + ) + # self-loop transition + next_phi = prev_phi + logprob_phi + # emit-to-phi blank transition only when the next label is repetition + next_phi = update_phi_score( + next_phi, prev_emit + logprob_phi + log_epsilon * (1.0 - repeat) + ) - mask_init = jnp.full((batch_size, max_target_length + 1), _logepsilon) - mask_init = mask_init.at[:, 0].set(0.0) - emit_init = jnp.full((batch_size, max_target_length), _logepsilon) + pad = pad.reshape((batch_size, 1)) + next_emit = pad * prev_emit + (1.0 - pad) * next_emit + next_phi = pad * prev_phi_orig + (1.0 - pad) * next_phi - _, (alphas_mask, alphas_emit) = lax.scan( - _iterate, - (mask_init, emit_init), - (logprobs_mask, logprobs_emit, logit_paddings.transpose()), - ) + return (next_phi, next_emit), (next_phi, next_emit) - last_alpha_mask = ( - alphas_mask[-1] - .at[:, 1:] - .set(jnp.logaddexp(alphas_mask[-1, :, 1:], alphas_emit[-1])) + xs = (logprobs_emit, logprobs_phi, output_paddings.transpose((1, 0))) + _, (logalpha_phi, logalpha_emit) = jax.lax.scan( + loop_body, (logalpha_phi_init, logalpha_emit_init), xs ) - return -last_alpha_mask[jnp.arange(batch_size), target_length] + # last row needs to be updated with the last epsilon transition + logalpha_phi_last = update_phi_score(logalpha_phi[-1], logalpha_emit[-1]) + logalpha_phi = logalpha_phi.at[-1].set(logalpha_phi_last) + + # extract per_seq_loss + # [B, N+1] + _one_hot = jax.nn.one_hot(label_lengths, num_classes=max_label_length + 1) + per_seq_loss = -jnp.einsum("bn,bn->b", logalpha_phi_last, _one_hot) + return per_seq_loss -def ctc_greedy_decode( +def _ctc_greedy_decode( inputs, sequence_length, merge_repeated=True, mask_index=None, ): - inputs = jnp.array(inputs) - sequence_length = jnp.array(sequence_length, dtype=jnp.int32) + inputs = convert_to_tensor(inputs) + sequence_length = convert_to_tensor(sequence_length, dtype="int32") + batch_size, max_length, num_classes = inputs.shape if mask_index is None: - mask_index = inputs.shape[-1] - 1 + mask_index = num_classes - 1 indices = jnp.argmax(inputs, axis=-1) scores = jnp.max(inputs, axis=-1) - seqlen_mask = jnp.arange(inputs.shape[1])[None, :] + seqlen_mask = jnp.arange(max_length)[None, :] seqlen_mask = seqlen_mask >= sequence_length[:, None] - if merge_repeated: - repeat = indices[:, 1:] == indices[:, :-1] - repeat = jnp.pad(repeat, ((0, 0), (1, 0))) - - indices = jnp.where(repeat, mask_index, indices) - else: - repeat = jnp.zeros_like(indices, dtype=bool) - indices = jnp.where(seqlen_mask, mask_index, indices) - indices = [batch[batch != mask_index] for batch in indices] - max_len = max(len(batch) for batch in indices) - indices = jnp.array( - [jnp.pad(batch, (0, max_len - len(batch))) for batch in indices] - ) - scores = jnp.where(seqlen_mask, 0.0, scores) - scores = -jnp.sum(scores, axis=1)[:, None] - return [indices], scores + if merge_repeated: + repeat_mask = indices[:, 1:] == indices[:, :-1] + repeat_mask = jnp.pad(repeat_mask, ((0, 0), (1, 0))) + indices = jnp.where(repeat_mask, mask_index, indices) + + # We rearrange the indices by moving `mask_index` to the end of the array + invalid_mask = indices == mask_index + order = jnp.expand_dims(jnp.arange(max_length), axis=0) # [1, N] + order = jnp.tile(order, (batch_size, 1)) # [B, N] + order = jnp.where(invalid_mask, max_length, order) + order = jnp.argsort(order, axis=-1) + indices = jnp.take_along_axis(indices, order, axis=-1) + + # We set to -1 for blank labels + indices = jnp.where(invalid_mask, -1, indices) + scores = -jnp.sum(scores, axis=1)[:, None] + indices = jnp.expand_dims(indices, axis=0) + return indices, scores -def ctc_beam_search_decode( +def _ctc_beam_search_decode( inputs, sequence_length, beam_width=100, top_paths=1, mask_index=None, ): - inputs = jnp.array(inputs) - sequence_length = jnp.array(sequence_length) + inputs = convert_to_tensor(inputs) + sequence_length = convert_to_tensor(sequence_length) batch_size, max_seq_len, num_classes = inputs.shape inputs = jnn.log_softmax(inputs) @@ -730,13 +760,13 @@ def ctc_beam_search_decode( (batch_size, 2 * beam_width, max_seq_len), _pad, dtype=jnp.int32 ) - num_init_paths = jnp.min(jnp.array([num_classes, beam_width])) + num_init_paths = builtins.min(num_classes, beam_width) max_classes = jnp.argsort(inputs[:, 0], axis=1)[:, -num_init_paths:] init_classes = jnp.where(max_classes == mask_index, _pad, max_classes) init_paths = init_paths.at[:, :num_init_paths, 0].set(init_classes) init_scores = ( - jnp.full((batch_size, 2 * beam_width), -jnp.inf) + jnp.full((batch_size, 2 * beam_width), -jnp.inf, dtype=inputs.dtype) .at[:, :num_init_paths] .set(jnp.take_along_axis(inputs[:, 0], max_classes, axis=1)) ) @@ -859,38 +889,40 @@ def _decode_batch( # convert classes back to the correct indices paths = jnp.where(paths == _pad, _pad, num_classes - paths - 1) - - lengths = jnp.argmax(paths == _pad, axis=2) - lengths = jnp.max(lengths, axis=0) - paths = jnp.where(paths == _pad, 0, paths) - - paths = paths.transpose((1, 0, 2)) - paths = [path[:, :length] for path, length in zip(paths, lengths)] - + paths = jnp.transpose(paths, [1, 0, 2]) return paths, scores def ctc_decode( inputs, sequence_length, - strategy, + strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, mask_index=None, ): + inputs = convert_to_tensor(inputs) + dtype = backend.result_type(inputs.dtype, "float32") + inputs = cast(inputs, dtype) + if strategy == "greedy": - return ctc_greedy_decode( + return _ctc_greedy_decode( inputs, sequence_length, merge_repeated=merge_repeated, mask_index=mask_index, ) - else: - return ctc_beam_search_decode( + elif strategy == "beam_search": + return _ctc_beam_search_decode( inputs, sequence_length, beam_width=beam_width, top_paths=top_paths, mask_index=mask_index, ) + else: + raise ValueError( + f"Invalid strategy {strategy}. Supported values are " + "'greedy' and 'beam_search'." + ) diff --git a/keras/src/backend/numpy/nn.py b/keras/src/backend/numpy/nn.py index 9e42d64536c2..2c27fad23b44 100644 --- a/keras/src/backend/numpy/nn.py +++ b/keras/src/backend/numpy/nn.py @@ -1,14 +1,11 @@ import jax import numpy as np from jax import lax -from jax import numpy as jnp -from keras.src.backend import standardize_data_format -from keras.src.backend import standardize_dtype +from keras.src import backend from keras.src.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_jax, ) -from keras.src.backend.config import epsilon from keras.src.backend.numpy.core import cast from keras.src.backend.numpy.core import convert_to_tensor from keras.src.backend.numpy.core import is_tensor @@ -191,7 +188,7 @@ def max_pool( padding="valid", data_format=None, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = inputs.ndim - 2 pool_size = _convert_to_spatial_operand( pool_size, num_spatial_dims, data_format @@ -200,7 +197,7 @@ def max_pool( strides = _convert_to_spatial_operand( strides, num_spatial_dims, data_format ) - return _pool(inputs, -jnp.inf, lax.max, pool_size, strides, padding) + return _pool(inputs, -np.inf, lax.max, pool_size, strides, padding) def average_pool( @@ -210,7 +207,7 @@ def average_pool( padding, data_format=None, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = inputs.ndim - 2 pool_size = _convert_to_spatial_operand( pool_size, num_spatial_dims, data_format @@ -233,7 +230,7 @@ def average_pool( (a if b != 1 else 1) for (a, b) in zip(inputs.shape, pool_size) ] window_counts = _pool( - jnp.ones(shape, inputs.dtype), + np.ones(shape, inputs.dtype), 0.0, lax.add, pool_size, @@ -276,7 +273,7 @@ def conv( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = inputs.ndim - 2 dimension_numbers = _convert_to_lax_conv_dimension_numbers( num_spatial_dims, @@ -328,7 +325,7 @@ def depthwise_conv( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = inputs.ndim - 2 dimension_numbers = _convert_to_lax_conv_dimension_numbers( num_spatial_dims, @@ -350,7 +347,7 @@ def depthwise_conv( feature_group_count = ( inputs.shape[-1] if data_format == "channels_last" else inputs.shape[1] ) - kernel = jnp.reshape( + kernel = np.reshape( kernel if is_tensor(kernel) else kernel.numpy(), kernel.shape[:-2] + (1, feature_group_count * kernel.shape[-1]), ) @@ -376,7 +373,7 @@ def separable_conv( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) depthwise_conv_output = depthwise_conv( inputs, depthwise_kernel, @@ -404,7 +401,7 @@ def conv_transpose( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = inputs.ndim - 2 padding_values = compute_conv_transpose_padding_args_for_jax( input_shape=inputs.shape, @@ -508,7 +505,7 @@ def categorical_crossentropy(target, output, from_logits=False, axis=-1): log_prob = log_softmax(output, axis=axis) else: output = output / np.sum(output, axis, keepdims=True) - output = np.clip(output, epsilon(), 1.0 - epsilon()) + output = np.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) log_prob = np.log(output) return -np.sum(target * log_prob, axis=axis) @@ -535,7 +532,7 @@ def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): log_prob = log_softmax(output, axis=axis) else: output = output / np.sum(output, axis, keepdims=True) - output = np.clip(output, epsilon(), 1.0 - epsilon()) + output = np.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) log_prob = np.log(output) target = one_hot(target, output.shape[axis], axis=axis) return -np.sum(target * log_prob, axis=axis) @@ -555,7 +552,7 @@ def binary_crossentropy(target, output, from_logits=False): if from_logits: output = sigmoid(output) - output = np.clip(output, epsilon(), 1.0 - epsilon()) + output = np.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) bce = target * np.log(output) bce += (1.0 - target) * np.log(1.0 - output) return -bce @@ -571,7 +568,7 @@ def moments(x, axes, keepdims=False, synchronized=False): # workaround, we simply perform the operations on float32 and convert back # to float16 need_cast = False - ori_dtype = standardize_dtype(x.dtype) + ori_dtype = backend.standardize_dtype(x.dtype) if ori_dtype == "float16": need_cast = True x = cast(x, "float32") @@ -617,15 +614,356 @@ def batch_normalization( return x * inv + res +def ctc_loss(target, output, target_length, output_length, mask_index=0): + # Ref: https://github.com/google-deepmind/optax + # optax.ctc_loss_with_forward_probs + target = convert_to_tensor(target, dtype="int32") + output = convert_to_tensor(output) + target_length = convert_to_tensor(target_length, "int32") + output_length = convert_to_tensor(output_length, "int32") + batch_size, _, num_classes = output.shape + batch_size, max_label_length = target.shape + log_epsilon = -1e5 + + # Ensure that the dtype promotion behavior matchs that of `tf.nn.ctc_loss` + dtype = backend.result_type(output.dtype, "float32") + output = output.astype(dtype) + + def _lengths_to_paddings(lengths, max_length): + indices = np.arange(max_length).reshape( + (1,) * lengths.ndim + (max_length,) + ) + lengths = np.expand_dims(lengths, axis=-1) + elem_valid = indices < lengths + return np.logical_not(elem_valid) + + target_paddings = _lengths_to_paddings(target_length, max_label_length) + output_paddings = _lengths_to_paddings(output_length, max_label_length) + target_paddings = target_paddings.astype(output.dtype) + output_paddings = output_paddings.astype(output.dtype) + + logprobs = log_softmax(output, axis=-1) + label_lengths = max_label_length - np.sum(target_paddings, axis=1).astype( + np.int32 + ) + + # repeat[b, n] == 1.0 when label[b, n] == label[b, n+1]. + repeat = (target[:, :-1] == target[:, 1:]).astype(np.float32) + repeat = np.pad(repeat, ((0, 0), (0, 1))) + + logprobs_phi = logprobs[:, :, mask_index : mask_index + 1] # [B, T, 1] + logprobs_phi = np.transpose(logprobs_phi, (1, 0, 2)) # [T, B, 1] + + _one_hot = one_hot(target, num_classes=num_classes) # [B, N, K] + logprobs_emit = np.einsum("btk,bnk->btn", logprobs, _one_hot) + logprobs_emit = np.transpose(logprobs_emit, (1, 0, 2)) # [T, B, N] + + # [B, N] + logalpha_phi_init = ( + np.ones((batch_size, max_label_length + 1), dtype=output.dtype) + * log_epsilon + ) + logalpha_phi_init[:, 0] = 0.0 + logalpha_emit_init = ( + np.ones((batch_size, max_label_length), dtype=output.dtype) + * log_epsilon + ) + + def update_phi_score(phi, added_score): + # Update `phi[:, 1:]`` with adding `added_score` in log space. + return np.concatenate( + [phi[:, :1], np.logaddexp(phi[:, 1:], added_score)], axis=-1 + ) + + def loop_body(prev, x): + prev_phi, prev_emit = prev + # emit-to-phi epsilon transition, except if the next label is repetition + prev_phi_orig = prev_phi + prev_phi = update_phi_score(prev_phi, prev_emit + log_epsilon * repeat) + + logprob_emit, logprob_phi, pad = x + + # phi-to-emit transition + next_emit = np.logaddexp( + prev_phi[:, :-1] + logprob_emit, prev_emit + logprob_emit + ) + # self-loop transition + next_phi = prev_phi + logprob_phi + # emit-to-phi blank transition only when the next label is repetition + next_phi = update_phi_score( + next_phi, prev_emit + logprob_phi + log_epsilon * (1.0 - repeat) + ) + + pad = pad.reshape((batch_size, 1)) + next_emit = pad * prev_emit + (1.0 - pad) * next_emit + next_phi = pad * prev_phi_orig + (1.0 - pad) * next_phi + + return (next_phi, next_emit), (next_phi, next_emit) + + def np_scan(f, init, xs): + carry = init + ys = [] + for x in zip(*xs): + carry, y = f(carry, x) + ys.append(y) + result = [] + for i in range(len(ys[0])): + result.append(np.stack([y[i] for y in ys])) + return carry, result + + xs = (logprobs_emit, logprobs_phi, output_paddings.transpose((1, 0))) + _, (logalpha_phi, logalpha_emit) = np_scan( + loop_body, (logalpha_phi_init, logalpha_emit_init), xs + ) + + # last row needs to be updated with the last epsilon transition + logalpha_phi_last = update_phi_score(logalpha_phi[-1], logalpha_emit[-1]) + logalpha_phi[-1] = logalpha_phi_last + + # extract per_seq_loss + # [B, N+1] + _one_hot = one_hot(label_lengths, num_classes=max_label_length + 1) + per_seq_loss = -np.einsum("bn,bn->b", logalpha_phi_last, _one_hot) + return per_seq_loss + + +def _ctc_greedy_decode( + inputs, + sequence_length, + merge_repeated=True, + mask_index=None, +): + inputs = convert_to_tensor(inputs) + sequence_length = convert_to_tensor(sequence_length, dtype="int32") + batch_size, max_length, num_classes = inputs.shape + + if mask_index is None: + mask_index = num_classes - 1 + + indices = np.argmax(inputs, axis=-1).astype("int32") + scores = np.max(inputs, axis=-1) + + seqlen_mask = np.arange(max_length)[None, :] + seqlen_mask = seqlen_mask >= sequence_length[:, None] + + indices = np.where(seqlen_mask, mask_index, indices) + scores = np.where(seqlen_mask, 0.0, scores) + + if merge_repeated: + repeat_mask = indices[:, 1:] == indices[:, :-1] + repeat_mask = np.pad(repeat_mask, ((0, 0), (1, 0))) + indices = np.where(repeat_mask, mask_index, indices) + + # We rearrange the indices by moving `mask_index` to the end of the array + invalid_mask = indices == mask_index + order = np.expand_dims(np.arange(max_length), axis=0) # [1, N] + order = np.tile(order, (batch_size, 1)) # [B, N] + order = np.where(invalid_mask, max_length, order) + order = np.argsort(order, axis=-1) + indices = np.take_along_axis(indices, order, axis=-1) + + # We set to -1 for blank labels + indices = np.where(invalid_mask, -1, indices) + scores = -np.sum(scores, axis=1)[:, None] + indices = np.expand_dims(indices, axis=0) + return indices, scores + + +def _ctc_beam_search_decode( + inputs, + sequence_length, + beam_width=100, + top_paths=1, + mask_index=None, +): + inputs = convert_to_tensor(inputs) + sequence_length = convert_to_tensor(sequence_length) + + batch_size, max_seq_len, num_classes = inputs.shape + inputs = log_softmax(inputs, axis=-1) + seqlen_mask = np.arange(max_seq_len)[None, :] >= sequence_length[:, None] + + if mask_index is None: + mask_index = num_classes - 1 + + # This is a workaround for the fact that np.argsort does not support + # the order parameter which is used to break ties when scores are equal. + # For compatibility with the tensorflow implementation, we flip the inputs + # and the mask_index, and then flip the classes back to the correct indices + inputs = np.flip(inputs, axis=2) + mask_index = num_classes - mask_index - 1 + + _pad = -1 + + init_paths = np.full( + (batch_size, 2 * beam_width, max_seq_len), _pad, dtype=np.int32 + ) + + num_init_paths = np.min(np.array([num_classes, beam_width])) + max_classes = np.argsort(inputs[:, 0], axis=1)[:, -num_init_paths:] + init_classes = np.where(max_classes == mask_index, _pad, max_classes) + init_paths[:, :num_init_paths, 0] = init_classes + + init_scores = np.full( + (batch_size, 2 * beam_width), -np.inf, dtype=inputs.dtype + ) + init_scores[:, :num_init_paths] = np.take_along_axis( + inputs[:, 0], max_classes, axis=1 + ) + init_masked = init_paths[:, :, 0] == _pad + + def _extend_paths(paths, scores, masked, x): + paths = np.repeat(paths, num_classes, axis=0) + scores = np.repeat(scores, num_classes) + masked = np.repeat(masked, num_classes) + + path_tail_index = np.argmax(paths == _pad, axis=1) + paths_arange = np.arange(2 * beam_width * num_classes) + path_tails = paths[paths_arange, path_tail_index - 1] + path_tails = np.where(path_tail_index == 0, _pad, path_tails) + + classes = np.arange(num_classes) + classes[mask_index] = _pad + classes = np.tile(classes, 2 * beam_width) + + prev_masked = masked + masked = classes == _pad + + masked_repeat = ~prev_masked & (path_tails == classes) + classes = np.where(masked_repeat, _pad, classes) + paths[paths_arange, path_tail_index] = classes + + x = np.tile(x, 2 * beam_width) + scores = scores + x + + return paths, scores, masked + + def _merge_scores(unique_inverse, scores): + scores_max = np.max(scores) + scores_exp = np.exp(scores - scores_max) + scores = np.zeros_like(scores) + for i, u in enumerate(unique_inverse): + scores[u] += scores_exp[i] + scores = np.log(scores) + scores_max + return scores + + def _prune_paths(paths, scores, masked): + paths, unique_inverse = np.unique(paths, return_inverse=True, axis=0) + pad_size = (2 * num_classes * beam_width) - len(paths) + if pad_size > 0: + paths = np.pad(paths, [[0, pad_size], [0, 0]], constant_values=_pad) + paths = paths[: 2 * num_classes * beam_width] + if len(unique_inverse.shape) >= 2: + unique_inverse = np.squeeze(unique_inverse, axis=1) + + emit_scores = np.where(masked, -np.inf, scores) + mask_scores = np.where(masked, scores, -np.inf) + + emit_scores = _merge_scores(unique_inverse, emit_scores) + mask_scores = _merge_scores(unique_inverse, mask_scores) + + total_scores = np.logaddexp(emit_scores, mask_scores) + top_indices = np.argsort(total_scores, kind="stable")[-beam_width:] + + paths = paths[top_indices] + emit_scores = emit_scores[top_indices] + mask_scores = mask_scores[top_indices] + + paths = np.tile(paths, (2, 1)) + scores = np.concatenate([emit_scores, mask_scores]) + masked = np.concatenate( + [np.zeros(beam_width, bool), np.ones(beam_width, bool)] + ) + + return paths, scores, masked + + def _decode_step(paths, scores, masked, x): + paths, scores, masked = _extend_paths(paths, scores, masked, x) + paths, scores, masked = _prune_paths(paths, scores, masked) + return paths, scores, masked + + def _step(prev, x): + paths, scores, masked = prev + x, seqlen_mask = x + if not seqlen_mask: + paths, scores, masked = _decode_step(paths, scores, masked, x) + return (paths, scores, masked), None + + def _decode_batch( + init_paths, init_scores, init_masked, inputs, seqlen_mask + ): + def np_scan_only_carry(f, init, xs): + carry = init + for x in zip(*xs): + carry, y = f(carry, x) + return carry, None + + (paths, scores, masked), _ = np_scan_only_carry( + _step, + (init_paths, init_scores, init_masked), + (inputs[1:], seqlen_mask[1:]), + ) + + paths, unique_inverse = np.unique(paths, return_inverse=True, axis=0) + pad_size = (2 * num_classes * beam_width) - len(paths) + if pad_size > 0: + paths = np.pad(paths, [[0, pad_size], [0, 0]], constant_values=_pad) + paths = paths[: 2 * num_classes * beam_width] + if len(unique_inverse.shape) >= 2: + unique_inverse = np.squeeze(unique_inverse, axis=1) + scores = _merge_scores(unique_inverse, scores) + + top_indices = np.argsort(scores)[-top_paths:][::-1] + paths = paths[top_indices] + scores = scores[top_indices] + + return paths, scores + + results = [ + _decode_batch(p, s, m, i, sm) + for p, s, m, i, sm in zip( + init_paths, init_scores, init_masked, inputs, seqlen_mask + ) + ] + paths = np.stack([r[0] for r in results]) + scores = np.stack([r[1] for r in results]) + + # convert classes back to the correct indices + paths = np.where(paths == _pad, _pad, num_classes - paths - 1) + paths = np.transpose(paths, [1, 0, 2]) + return paths, scores + + def ctc_decode( inputs, sequence_length, - strategy, + strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, mask_index=None, ): - raise NotImplementedError( - "NumPy backend does not yet support CTC decoding." - ) + inputs = convert_to_tensor(inputs) + dtype = backend.result_type(inputs.dtype, "float32") + inputs = cast(inputs, dtype) + + if strategy == "greedy": + return _ctc_greedy_decode( + inputs, + sequence_length, + merge_repeated=merge_repeated, + mask_index=mask_index, + ) + elif strategy == "beam_search": + return _ctc_beam_search_decode( + inputs, + sequence_length, + beam_width=beam_width, + top_paths=top_paths, + mask_index=mask_index, + ) + else: + raise ValueError( + f"Invalid strategy {strategy}. Supported values are " + "'greedy' and 'beam_search'." + ) diff --git a/keras/src/backend/tensorflow/nn.py b/keras/src/backend/tensorflow/nn.py index 454ce5f7c275..2a53c25f1f98 100644 --- a/keras/src/backend/tensorflow/nn.py +++ b/keras/src/backend/tensorflow/nn.py @@ -3,12 +3,10 @@ import tensorflow as tf -from keras.src.backend import standardize_data_format -from keras.src.backend import standardize_dtype +from keras.src import backend from keras.src.backend.common.backend_utils import ( compute_conv_transpose_output_shape, ) -from keras.src.backend.config import epsilon from keras.src.backend.tensorflow.core import cast from keras.src.backend.tensorflow.core import convert_to_tensor @@ -75,30 +73,7 @@ def selu(x): def gelu(x, approximate=True): x = convert_to_tensor(x) - # we need to explicitly implement gelu because bfloat16 will trigger - # DTypePromotionError when using enable_numpy_behavior() - if approximate: - coeff = tf.constant(0.044715, x.dtype) - return ( - tf.constant(0.5, x.dtype) - * x - * ( - tf.constant(1.0, x.dtype) - + tf.math.tanh( - tf.constant(0.7978845608028654, x.dtype) - * (x + coeff * tf.pow(x, 3)) - ) - ) - ) - else: - return ( - tf.constant(0.5, x.dtype) - * x - * ( - tf.constant(1.0, x.dtype) - + tf.math.erf(x / tf.constant(1.4142135623730951, x.dtype)) - ) - ) + return tf.nn.gelu(x, approximate=approximate) def softmax(x, axis=-1): @@ -162,7 +137,7 @@ def max_pool( padding="valid", data_format=None, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) strides = pool_size if strides is None else strides padding = padding.upper() tf_data_format = _convert_data_format("channels_last", len(inputs.shape)) @@ -190,7 +165,7 @@ def average_pool( padding="valid", data_format=None, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) strides = pool_size if strides is None else strides padding = padding.upper() tf_data_format = _convert_data_format("channels_last", len(inputs.shape)) @@ -268,7 +243,7 @@ def _conv(): def _conv_xla(): return _conv() - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) if data_format == "channels_last": channels = inputs.shape[-1] else: @@ -288,7 +263,7 @@ def depthwise_conv( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = len(inputs.shape) - 2 if num_spatial_dims > 2: raise ValueError( @@ -351,7 +326,7 @@ def separable_conv( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) num_spatial_dims = len(inputs.shape) - 2 if num_spatial_dims > 2: raise ValueError( @@ -414,7 +389,7 @@ def conv_transpose( data_format=None, dilation_rate=1, ): - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) tf_data_format = _convert_data_format(data_format, len(inputs.shape)) kernel_size = kernel.shape[:-2] filters = kernel.shape[-2] @@ -597,7 +572,9 @@ def categorical_crossentropy(target, output, from_logits=False, axis=-1): output = output / tf.reduce_sum(output, axis, keepdims=True) # Compute cross entropy from probabilities. - output = tf.clip_by_value(output, epsilon(), 1.0 - epsilon()) + output = tf.clip_by_value( + output, backend.epsilon(), 1.0 - backend.epsilon() + ) return -tf.reduce_sum(target * tf.math.log(output), axis) @@ -653,7 +630,9 @@ def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): ) if not from_logits: - output = tf.clip_by_value(output, epsilon(), 1 - epsilon()) + output = tf.clip_by_value( + output, backend.epsilon(), 1 - backend.epsilon() + ) output = tf.math.log(output) result = tf.nn.sparse_softmax_cross_entropy_with_logits( @@ -702,7 +681,9 @@ def binary_crossentropy(target, output, from_logits=False): ) # Compute cross entropy from probabilities. - output = tf.clip_by_value(output, epsilon(), 1.0 - epsilon()) + output = tf.clip_by_value( + output, backend.epsilon(), 1.0 - backend.epsilon() + ) bce = target * tf.math.log(output) bce += (1 - target) * tf.math.log(1 - output) return -bce @@ -713,7 +694,7 @@ def moments(x, axes, keepdims=False, synchronized=False): # workaround, we simply perform the operations on float32 and convert back # to float16 need_cast = False - ori_dtype = standardize_dtype(x.dtype) + ori_dtype = backend.standardize_dtype(x.dtype) if ori_dtype in ("float16", "bfloat16"): need_cast = True x = cast(x, "float32") @@ -797,11 +778,18 @@ def ctc_loss( output_length, mask_index=0, ): - target = tf.convert_to_tensor(target) + target = convert_to_tensor(target) + output = convert_to_tensor(output) target = tf.cast(target, dtype="int32") - output = tf.convert_to_tensor(output) - output = tf.cast(output, dtype="float32") - return tf.nn.ctc_loss( + + # `tf.nn.ctc_loss` will internally cast to float32 when the input is float16 + # or bfloat16. Additionally, it will raise an error when the input is + # float64. As a result, we perform the casting externally and add support + # for float64. + result_dtype = backend.result_type(output.dtype, "float32") + compute_dtype = "float32" if result_dtype == "float64" else result_dtype + output = tf.cast(output, compute_dtype) + loss = tf.nn.ctc_loss( labels=target, logits=output, label_length=target_length, @@ -809,30 +797,36 @@ def ctc_loss( blank_index=mask_index, logits_time_major=False, ) + return tf.cast(loss, result_dtype) def ctc_decode( inputs, sequence_length, - strategy, + strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, mask_index=None, ): - inputs = tf.convert_to_tensor(inputs) + inputs = convert_to_tensor(inputs) + input_shape = tf.shape(inputs) + num_samples, num_steps = input_shape[0], input_shape[1] inputs = tf.transpose(inputs, (1, 0, 2)) - sequence_length = tf.convert_to_tensor(sequence_length, dtype="int32") + dtype = backend.result_type(inputs.dtype, "float32") + inputs = tf.cast(inputs, dtype) + + sequence_length = convert_to_tensor(sequence_length, dtype="int32") if strategy == "greedy": - return tf.nn.ctc_greedy_decoder( + (decoded, scores) = tf.nn.ctc_greedy_decoder( inputs=inputs, sequence_length=sequence_length, merge_repeated=merge_repeated, blank_index=mask_index, ) elif strategy == "beam_search": - return tf.nn.ctc_beam_search_decoder( + (decoded, scores) = tf.nn.ctc_beam_search_decoder( inputs=inputs, sequence_length=sequence_length, beam_width=beam_width, @@ -843,3 +837,12 @@ def ctc_decode( f"Invalid strategy {strategy}. Supported values are " "'greedy' and 'beam_search'." ) + + # Postprocess sparse tensor + decoded_dense = [] + for st in decoded: + st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps)) + decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) + decoded_dense = tf.stack(decoded_dense, axis=0) + decoded_dense = tf.cast(decoded_dense, "int32") + return decoded_dense, scores diff --git a/keras/src/backend/torch/nn.py b/keras/src/backend/torch/nn.py index d20f73b95ac2..4eef2c18977e 100644 --- a/keras/src/backend/torch/nn.py +++ b/keras/src/backend/torch/nn.py @@ -1,13 +1,11 @@ import torch import torch.nn.functional as tnn +from keras.src import backend from keras.src import tree -from keras.src.backend import standardize_data_format -from keras.src.backend import standardize_dtype from keras.src.backend.common.backend_utils import ( compute_conv_transpose_padding_args_for_torch, ) -from keras.src.backend.config import epsilon from keras.src.backend.torch.core import cast from keras.src.backend.torch.core import convert_to_tensor from keras.src.backend.torch.core import get_device @@ -92,9 +90,12 @@ def gelu(x, approximate=True): def softmax(x, axis=-1): x = convert_to_tensor(x) - dtype = standardize_dtype(x.dtype) + dtype = backend.standardize_dtype(x.dtype) # TODO: tnn.softmax doesn't support float16 using cpu - if get_device() == "cpu" and standardize_dtype(x.dtype) == "float16": + if ( + get_device() == "cpu" + and backend.standardize_dtype(x.dtype) == "float16" + ): x = cast(x, "float32") if axis is None: # Unlike numpy, PyTorch will handle axis=None as axis=-1. @@ -109,9 +110,12 @@ def softmax(x, axis=-1): def log_softmax(x, axis=-1): x = convert_to_tensor(x) - dtype = standardize_dtype(x.dtype) + dtype = backend.standardize_dtype(x.dtype) # TODO: tnn.log_softmax doesn't support float16 using cpu - if get_device() == "cpu" and standardize_dtype(x.dtype) == "float16": + if ( + get_device() == "cpu" + and backend.standardize_dtype(x.dtype) == "float16" + ): x = cast(x, "float32") if axis is None: # Unlike numpy, PyTorch will handle axis=None as axis=-1. @@ -240,7 +244,7 @@ def max_pool( else: strides = standardize_tuple(strides, num_spatial_dims, "strides") - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) if data_format == "channels_last": inputs = _transpose_spatial_inputs(inputs) @@ -301,7 +305,7 @@ def average_pool( else: strides = standardize_tuple(strides, num_spatial_dims, "strides") - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) if data_format == "channels_last": inputs = _transpose_spatial_inputs(inputs) padding_value = 0 @@ -375,7 +379,7 @@ def conv( num_spatial_dims = inputs.ndim - 2 strides = standardize_tuple(strides, num_spatial_dims, "strides") - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) if data_format == "channels_last": inputs = _transpose_spatial_inputs(inputs) # Transpose kernel from keras format to torch format. @@ -494,7 +498,7 @@ def conv_transpose( num_spatial_dims = inputs.ndim - 2 strides = standardize_tuple(strides, num_spatial_dims, "strides") - data_format = standardize_data_format(data_format) + data_format = backend.standardize_data_format(data_format) ( torch_padding, torch_output_padding, @@ -610,7 +614,7 @@ def categorical_crossentropy(target, output, from_logits=False, axis=-1): log_prob = tnn.log_softmax(output, dim=axis) else: output = output / torch.sum(output, dim=axis, keepdim=True) - output = torch.clip(output, epsilon(), 1.0 - epsilon()) + output = torch.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) log_prob = torch.log(output) return -torch.sum(target * log_prob, dim=axis) @@ -638,7 +642,7 @@ def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): log_prob = tnn.log_softmax(output, dim=axis) else: output = output / torch.sum(output, dim=axis, keepdim=True) - output = torch.clip(output, epsilon(), 1.0 - epsilon()) + output = torch.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) log_prob = torch.log(output) target = one_hot(target, output.shape[axis], axis=axis) return -torch.sum(target * log_prob, dim=axis) @@ -661,7 +665,7 @@ def binary_crossentropy(target, output, from_logits=False): output, target, reduction="none" ) else: - output = torch.clip(output, epsilon(), 1.0 - epsilon()) + output = torch.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) return tnn.binary_cross_entropy(output, target, reduction="none") @@ -675,7 +679,7 @@ def moments(x, axes, keepdims=False, synchronized=False): # workaround, we simply perform the operations on float32 and convert back # to float16 need_cast = False - ori_dtype = standardize_dtype(x.dtype) + ori_dtype = backend.standardize_dtype(x.dtype) if ori_dtype == "float16": need_cast = True x = cast(x, "float32") @@ -752,10 +756,13 @@ def ctc_loss( target_length = convert_to_tensor(target_length) output_length = convert_to_tensor(output_length) + # Ensure that the dtype promotion behavior matchs that of `tf.nn.ctc_loss` + dtype = backend.result_type(output.dtype, "float32") + output = cast(output, dtype) + output = torch.transpose(output, 1, 0) logits = tnn.log_softmax(output, dim=-1) - - return tnn.ctc_loss( + loss = tnn.ctc_loss( logits, target, output_length, @@ -763,17 +770,81 @@ def ctc_loss( blank=mask_index, reduction="none", ) + return loss + + +def _ctc_greedy_decode( + inputs, + sequence_length, + merge_repeated=True, + mask_index=None, +): + inputs = convert_to_tensor(inputs) + sequence_length = convert_to_tensor(sequence_length, dtype="int32") + batch_size, max_length, num_classes = inputs.shape + + if mask_index is None: + mask_index = num_classes - 1 + + indices = torch.argmax(inputs, axis=-1) + indices = cast(indices, "int32") + scores = torch.max(inputs, axis=-1)[0] + + seqlen_mask = torch.arange(max_length, device=indices.device)[None, :] + seqlen_mask = seqlen_mask >= sequence_length[:, None] + + indices = torch.where(seqlen_mask, mask_index, indices) + scores = torch.where(seqlen_mask, 0.0, scores) + + if merge_repeated: + repeat = indices[:, 1:] == indices[:, :-1] + repeat = tnn.pad(repeat, (1, 0, 0, 0)) + indices = torch.where(repeat, mask_index, indices) + + # We rearrange the indices by moving `mask_index` to the end of the array + invalid_mask = indices == mask_index + order = torch.unsqueeze( + torch.arange(max_length, device=indices.device), dim=0 + ) # [1, N] + order = torch.tile(order, (batch_size, 1)) # [B, N] + order = torch.where(invalid_mask, max_length, order) + order = torch.argsort(order, dim=-1) + indices = torch.take_along_dim(indices, order, dim=-1) + + # We set to -1 for blank labels + indices = torch.where(invalid_mask, -1, indices) + scores = -torch.sum(scores, axis=1)[:, None] + indices = torch.unsqueeze(indices, dim=0) + return indices, scores def ctc_decode( inputs, sequence_length, - strategy, + strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, mask_index=None, ): - raise NotImplementedError( - "Torch backend does not yet support CTC decoding." - ) + inputs = convert_to_tensor(inputs) + dtype = backend.result_type(inputs.dtype, "float32") + inputs = cast(inputs, dtype) + + if strategy == "greedy": + return _ctc_greedy_decode( + inputs, + sequence_length, + merge_repeated=merge_repeated, + mask_index=mask_index, + ) + elif strategy == "beam_search": + raise NotImplementedError( + "Torch backend doesn't yet support the beam search strategy for CTC" + "decoding." + ) + else: + raise ValueError( + f"Invalid strategy {strategy}. Supported values are " + "'greedy' and 'beam_search'." + ) diff --git a/keras/src/losses/losses.py b/keras/src/losses/losses.py index f3f997616a00..0df1f9542dfd 100644 --- a/keras/src/losses/losses.py +++ b/keras/src/losses/losses.py @@ -1893,7 +1893,7 @@ class CTC(LossFunctionWrapper): def __init__( self, reduction="sum_over_batch_size", - name="sparse_categorical_crossentropy", + name="ctc", ): super().__init__( ctc, diff --git a/keras/src/ops/nn.py b/keras/src/ops/nn.py index 71af40e82384..b2b248154e30 100644 --- a/keras/src/ops/nn.py +++ b/keras/src/ops/nn.py @@ -1810,8 +1810,8 @@ def batch_normalization( ) -class CtcLoss(Operation): - def __init__(self, mask_index): +class CTCLoss(Operation): + def __init__(self, mask_index=0): super().__init__() self.mask_index = mask_index @@ -1838,8 +1838,8 @@ def compute_output_spec(self, target, output, target_length, output_length): self._check_shape_first_dim( "output_length", output_length.shape, "output", output.shape ) - - return KerasTensor((target.shape[0],), dtype=target.dtype) + dtype = backend.result_type(output.dtype, "float32") + return KerasTensor((target.shape[0],), dtype=dtype) @keras_export( @@ -1865,7 +1865,7 @@ def ctc_loss(target, output, target_length, output_length, mask_index=0): """ if any_symbolic_tensors((target, output, target_length, output_length)): - return CtcLoss(mask_index).symbolic_call( + return CTCLoss(mask_index).symbolic_call( target, output, target_length, output_length ) return backend.nn.ctc_loss( @@ -1873,6 +1873,48 @@ def ctc_loss(target, output, target_length, output_length, mask_index=0): ) +class CTCDecode(Operation): + def __init__( + self, + strategy="greedy", + beam_width=100, + top_paths=1, + merge_repeated=True, + mask_index=None, + ): + super().__init__() + self.strategy = strategy + self.beam_width = beam_width + self.top_paths = top_paths + self.merge_repeated = merge_repeated + self.mask_index = mask_index + + def call(self, inputs, sequence_lengths): + return backend.nn.ctc_decode( + inputs, + sequence_lengths, + strategy=self.strategy, + beam_width=self.beam_width, + top_paths=self.top_paths, + merge_repeated=self.merge_repeated, + mask_index=self.mask_index, + ) + + def compute_output_spec(self, inputs, sequence_lengths): + inputs_shape = inputs.shape + if self.strategy == "greedy": + top_paths = 1 + else: + top_paths = self.top_paths + dtype = backend.result_type(inputs.dtype, "float32") + return ( + KerasTensor( + (top_paths, inputs_shape[0], inputs_shape[1]), dtype="int32" + ), + KerasTensor((inputs_shape[0], top_paths), dtype=dtype), + ) + + @keras_export( [ "keras.ops.ctc_decode", @@ -1882,7 +1924,7 @@ def ctc_loss(target, output, target_length, output_length, mask_index=0): def ctc_decode( inputs, sequence_lengths, - strategy, + strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, @@ -1892,7 +1934,8 @@ def ctc_decode( Args: inputs: A tensor of shape `(batch_size, max_length, num_classes)` - containing the logits (output of the model). + containing the logits (the output of the model). + They should *not* be normalized via softmax. sequence_lengths: A tensor of shape `(batch_size,)` containing the sequence lengths for the batch. strategy: A string for the decoding strategy. Supported values are @@ -1908,20 +1951,26 @@ def ctc_decode( Returns: A tuple containing: - - - A list of decoded sequences. - - A list of the negative of the sum of the probability logits - (if strategy is `"greedy"`) or the log probability (if strategy is - `"beam_search"`) for each sequence. + - The tensor representing the list of decoded sequences. If + `strategy="greedy"`, the shape is `(1, batch_size, max_length)`. If + `strategy="beam_seatch"`, the shape is + `(top_paths, batch_size, max_length)`. Note that: `-1` indicates the + blank label. + - If `strategy="greedy"`, a tensor of shape `(batch_size, 1)` + representing the negative of the sum of the probability logits for + each sequence. If `strategy="beam_seatch"`, a tensor of shape + `(batch_size, top_paths)` representing the log probability for each + sequence. """ if any_symbolic_tensors((inputs, sequence_lengths)): - raise NotImplementedError( - "CTC decoding is not supported with KerasTensors. Use it " - "inside the call() method of a Layer or the predict_step " - "method of a model." - ) - + return CTCDecode( + strategy=strategy, + beam_width=beam_width, + top_paths=top_paths, + merge_repeated=merge_repeated, + mask_index=mask_index, + ).symbolic_call(inputs, sequence_lengths) return backend.nn.ctc_decode( inputs=inputs, sequence_length=sequence_lengths, diff --git a/keras/src/ops/nn_test.py b/keras/src/ops/nn_test.py index 3d155e2c973f..5e52a6c263f3 100644 --- a/keras/src/ops/nn_test.py +++ b/keras/src/ops/nn_test.py @@ -633,6 +633,23 @@ def test_batch_normalization(self): scale=KerasTensor([3]), ) + def test_ctc_decode(self): + # Test strategy="greedy" + inputs = KerasTensor([None, 2, 3]) + sequence_lengths = KerasTensor([None]) + decoded, scores = knn.ctc_decode(inputs, sequence_lengths) + self.assertEqual(decoded.shape, (1, None, 2)) + self.assertEqual(scores.shape, (None, 1)) + + # Test strategy="beam_search" + inputs = KerasTensor([None, 2, 3]) + sequence_lengths = KerasTensor([None]) + decoded, scores = knn.ctc_decode( + inputs, sequence_lengths, strategy="beam_search", top_paths=2 + ) + self.assertEqual(decoded.shape, (2, None, 2)) + self.assertEqual(scores.shape, (None, 2)) + def test_normalize(self): x = KerasTensor([None, 2, 3]) self.assertEqual(knn.normalize(x).shape, (None, 2, 3)) @@ -1069,10 +1086,6 @@ def test_batch_normalization(self): (10, 3, 4, 5), ) - @pytest.mark.skipif( - backend.backend() == "numpy", - reason="Numpy does not support CTC loss", - ) def test_ctc_loss(self): x = KerasTensor([10, 3, 4]) y = KerasTensor([10, 3], dtype="int32") @@ -1080,6 +1093,23 @@ def test_ctc_loss(self): y_lengths = KerasTensor([10], dtype="int32") self.assertEqual(knn.ctc_loss(x, y, x_lengths, y_lengths).shape, (10,)) + def test_ctc_decode(self): + # Test strategy="greedy" + inputs = KerasTensor([10, 2, 3]) + sequence_lengths = KerasTensor([10]) + decoded, scores = knn.ctc_decode(inputs, sequence_lengths) + self.assertEqual(decoded.shape, (1, 10, 2)) + self.assertEqual(scores.shape, (10, 1)) + + # Test strategy="beam_search" + inputs = KerasTensor([10, 2, 3]) + sequence_lengths = KerasTensor([10]) + decoded, scores = knn.ctc_decode( + inputs, sequence_lengths, strategy="beam_search", top_paths=2 + ) + self.assertEqual(decoded.shape, (2, 10, 2)) + self.assertEqual(scores.shape, (10, 2)) + def test_normalize(self): x = KerasTensor([1, 2, 3]) self.assertEqual(knn.normalize(x).shape, (1, 2, 3)) @@ -1884,10 +1914,6 @@ def test_batch_normalization(self): ) self.assertEqual(tuple(output.shape), (2, 3, 3, 5)) - @pytest.mark.skipif( - backend.backend() == "numpy", - reason="Numpy does not support CTC loss", - ) def test_ctc_loss(self): labels = np.array([[1, 2, 1], [1, 2, 2]]) outputs = np.array( @@ -1903,10 +1929,6 @@ def test_ctc_loss(self): result = knn.ctc_loss(labels, outputs, label_length, output_length) self.assertAllClose(result, np.array([3.4411672, 1.91680186])) - @pytest.mark.skipif( - backend.backend() not in ["tensorflow", "jax"], - reason="CTC decode only supported for TF and JAX backends", - ) def test_ctc_decode(self): inputs = np.array( [ @@ -1927,19 +1949,35 @@ def test_ctc_decode(self): ], ] ) - labels = np.array([[1, 2], [2, 0], [0, 0]]) + labels = np.array([[1, 2, -1], [2, -1, -1], [-1, -1, -1]]) score_labels = np.array([[-1.2], [-1.6], [-0.7]]) + repeated_labels = np.array([[1, 2, 2], [2, 2, -1], [-1, -1, -1]]) + # Test strategy="greedy" and merge_repeated=True (decoded,), scores = knn.ctc_decode( - inputs, sequence_lengths=[3, 3, 1], strategy="greedy" + inputs, + sequence_lengths=[3, 3, 1], + strategy="greedy", ) - self.assertAllClose(decoded, labels) self.assertAllClose(scores, score_labels) + # Test strategy="greedy" and merge_repeated=False + (decoded,), scores = knn.ctc_decode( + inputs, + sequence_lengths=[3, 3, 1], + strategy="greedy", + merge_repeated=False, + ) + self.assertAllClose(decoded, repeated_labels) + self.assertAllClose(scores, score_labels) + + if backend.backend() == "torch": + self.skipTest("torch doesn't support 'beam_search' strategy") + labels = [ - np.array([[1, 2], [2, 0], [0, 0]]), - np.array([[2, 0], [2, 0], [1, 0]]), + np.array([[1, 2, -1], [2, -1, -1], [-1, -1, -1]]), + np.array([[2, -1, -1], [2, 0, -1], [1, -1, -1]]), ] score_labels = np.array( [ @@ -1948,10 +1986,10 @@ def test_ctc_decode(self): [-1.0633859, -1.3633859], ] ) - beam_width = 4 top_paths = 2 + # Test strategy="beam_search" decoded, scores = knn.ctc_decode( inputs, sequence_lengths=[3, 3, 1], @@ -1959,7 +1997,6 @@ def test_ctc_decode(self): beam_width=beam_width, top_paths=top_paths, ) - for i in range(top_paths): self.assertAllClose(decoded[i], labels[i]) self.assertAllClose(scores, score_labels) @@ -1996,23 +2033,6 @@ def test_normalize(self): ) -class TestLogitRecovery(testing.TestCase): - def test_logit_recovery_binary_crossentropy(self): - layer = layers.Dense( - 4, activation="sigmoid", use_bias=False, kernel_initializer="ones" - ) - loss = losses.BinaryCrossentropy() - x = np.array([[1.4, 1.6, 0.8]]) - y = np.array([[0.2, 0.6, 0.1, 0.3]]) - loss_value = loss(y, layer(x)) - self.assertAllClose(loss_value, 2.682124) - - model = models.Sequential([layer]) - model.compile(loss="binary_crossentropy", optimizer="sgd") - out = model.evaluate(x, y) - self.assertAllClose(out, 2.682124) - - class NNOpsDtypeTest(testing.TestCase, parameterized.TestCase): """Test the dtype to verify that the behavior matches JAX.""" @@ -2312,6 +2332,85 @@ def test_softsign(self, dtype): expected_dtype, ) + @parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES)) + def test_ctc_loss(self, dtype): + labels = knp.array([[1, 2, 1]], dtype="int32") + outputs = knp.array( + [[[0.4, 0.8, 0.4], [0.2, 0.8, 0.3], [0.9, 0.4, 0.5]]], dtype=dtype + ) + label_length = knp.array([3]) + output_length = knp.array([3]) + expected_dtype = ( + "float32" if dtype in ("float16", "bfloat16") else dtype + ) + + self.assertEqual( + standardize_dtype( + knn.ctc_loss(labels, outputs, label_length, output_length).dtype + ), + expected_dtype, + ) + self.assertEqual( + standardize_dtype( + knn.CTCLoss() + .symbolic_call(labels, outputs, label_length, output_length) + .dtype + ), + expected_dtype, + ) + + @parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES)) + def test_ctc_decode(self, dtype): + inputs = knp.array( + [[[0.4, 0.8, 0.4], [0.2, 0.8, 0.3], [0.9, 0.4, 0.5]]], dtype=dtype + ) + sequence_length = knp.array([3]) + expected_dtype = backend.result_type(dtype, "float32") + + # Test strategy="greedy" + decoded, scores = knn.ctc_decode( + inputs, sequence_length, strategy="greedy" + ) + self.assertEqual(standardize_dtype(decoded.dtype), "int32") + self.assertEqual(standardize_dtype(scores.dtype), expected_dtype) + decoded, scores = knn.CTCDecode(strategy="greedy").symbolic_call( + inputs, sequence_length + ) + self.assertEqual(standardize_dtype(decoded.dtype), "int32") + self.assertEqual(standardize_dtype(scores.dtype), expected_dtype) + + if backend.backend() == "torch": + self.skipTest("torch doesn't support 'beam_search' strategy") + + # Test strategy="beam_search" + decoded, scores = knn.ctc_decode( + inputs, sequence_length, strategy="beam_search" + ) + self.assertEqual(standardize_dtype(decoded.dtype), "int32") + self.assertEqual(standardize_dtype(scores.dtype), expected_dtype) + decoded, scores = knn.CTCDecode(strategy="beam_search").symbolic_call( + inputs, sequence_length + ) + self.assertEqual(standardize_dtype(decoded.dtype), "int32") + self.assertEqual(standardize_dtype(scores.dtype), expected_dtype) + + +class NNOpsBehaviorTest(testing.TestCase, parameterized.TestCase): + def test_logit_recovery_binary_crossentropy(self): + layer = layers.Dense( + 4, activation="sigmoid", use_bias=False, kernel_initializer="ones" + ) + loss = losses.BinaryCrossentropy() + x = np.array([[1.4, 1.6, 0.8]]) + y = np.array([[0.2, 0.6, 0.1, 0.3]]) + loss_value = loss(y, layer(x)) + self.assertAllClose(loss_value, 2.682124) + + model = models.Sequential([layer]) + model.compile(loss="binary_crossentropy", optimizer="sgd") + out = model.evaluate(x, y) + self.assertAllClose(out, 2.682124) + def test_softmax_on_axis_with_size_one_warns(self): x = np.array([[1.0]]) # Applying softmax on the second axis, which has size 1 @@ -2357,10 +2456,31 @@ def test_normalize_order_validation(self): def test_check_shape_first_dim_mismatch(self): name1, shape1 = "labels", (2, 3) name2, shape2 = "logits", (3, 4, 5) - ctc_loss_instance = knn.CtcLoss(mask_index=-1) + ctc_loss_instance = knn.CTCLoss(mask_index=-1) with self.assertRaisesRegex( ValueError, "must have the same first dimension" ): ctc_loss_instance._check_shape_first_dim( name1, shape1, name2, shape2 ) + + def test_invalid_strategy_ctc_decode(self): + inputs = np.array( + [ + [ + [0.1, 0.4, 0.2, 0.4], + [0.3, 0.3, 0.4, 0.2], + [0.3, 0.2, 0.4, 0.3], + ] + ] + ) + beam_width = 4 + top_paths = 2 + with self.assertRaisesRegex(ValueError, "Invalid strategy"): + knn.ctc_decode( + inputs, + sequence_lengths=[3, 3, 1], + strategy="invalid", + beam_width=beam_width, + top_paths=top_paths, + ) From 6524242fb6220eee532d96df2b659f8de741b049 Mon Sep 17 00:00:00 2001 From: Haifeng Jin <5476582+haifeng-jin@users.noreply.github.com> Date: Thu, 25 Apr 2024 19:48:34 +0000 Subject: [PATCH 068/101] update the namex version requirements (#19617) --- requirements-common.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-common.txt b/requirements-common.txt index 5d15f7ac6151..f645c7ba9409 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -1,4 +1,4 @@ -namex +namex>=0.0.8 black>=22 flake8 isort From 74df92625eb5dc5a403b820a855fa50ede6ad921 Mon Sep 17 00:00:00 2001 From: IMvision12 <88665786+IMvision12@users.noreply.github.com> Date: Fri, 26 Apr 2024 01:54:13 +0530 Subject: [PATCH 069/101] Add `PSNR` API (#19616) * PSNR * Fix --- keras/api/_tf_keras/keras/ops/__init__.py | 1 + keras/api/_tf_keras/keras/ops/nn/__init__.py | 1 + keras/api/ops/__init__.py | 1 + keras/api/ops/nn/__init__.py | 1 + keras/src/backend/jax/nn.py | 13 ++++ keras/src/backend/numpy/nn.py | 13 ++++ keras/src/backend/tensorflow/nn.py | 15 ++++ keras/src/backend/torch/nn.py | 17 +++++ keras/src/ops/nn.py | 74 ++++++++++++++++++++ keras/src/ops/nn_test.py | 31 ++++++++ 10 files changed, 167 insertions(+) diff --git a/keras/api/_tf_keras/keras/ops/__init__.py b/keras/api/_tf_keras/keras/ops/__init__.py index 118c5a692eea..386730deb33f 100644 --- a/keras/api/_tf_keras/keras/ops/__init__.py +++ b/keras/api/_tf_keras/keras/ops/__init__.py @@ -72,6 +72,7 @@ from keras.src.ops.nn import multi_hot from keras.src.ops.nn import normalize from keras.src.ops.nn import one_hot +from keras.src.ops.nn import psnr from keras.src.ops.nn import relu from keras.src.ops.nn import relu6 from keras.src.ops.nn import selu diff --git a/keras/api/_tf_keras/keras/ops/nn/__init__.py b/keras/api/_tf_keras/keras/ops/nn/__init__.py index 61efc22a5701..8c7e3d921b3b 100644 --- a/keras/api/_tf_keras/keras/ops/nn/__init__.py +++ b/keras/api/_tf_keras/keras/ops/nn/__init__.py @@ -26,6 +26,7 @@ from keras.src.ops.nn import multi_hot from keras.src.ops.nn import normalize from keras.src.ops.nn import one_hot +from keras.src.ops.nn import psnr from keras.src.ops.nn import relu from keras.src.ops.nn import relu6 from keras.src.ops.nn import selu diff --git a/keras/api/ops/__init__.py b/keras/api/ops/__init__.py index 118c5a692eea..386730deb33f 100644 --- a/keras/api/ops/__init__.py +++ b/keras/api/ops/__init__.py @@ -72,6 +72,7 @@ from keras.src.ops.nn import multi_hot from keras.src.ops.nn import normalize from keras.src.ops.nn import one_hot +from keras.src.ops.nn import psnr from keras.src.ops.nn import relu from keras.src.ops.nn import relu6 from keras.src.ops.nn import selu diff --git a/keras/api/ops/nn/__init__.py b/keras/api/ops/nn/__init__.py index 61efc22a5701..8c7e3d921b3b 100644 --- a/keras/api/ops/nn/__init__.py +++ b/keras/api/ops/nn/__init__.py @@ -26,6 +26,7 @@ from keras.src.ops.nn import multi_hot from keras.src.ops.nn import normalize from keras.src.ops.nn import one_hot +from keras.src.ops.nn import psnr from keras.src.ops.nn import relu from keras.src.ops.nn import relu6 from keras.src.ops.nn import selu diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 7fc623d831cb..3cbc61126fc8 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -926,3 +926,16 @@ def ctc_decode( f"Invalid strategy {strategy}. Supported values are " "'greedy' and 'beam_search'." ) + + +def psnr(x1, x2, max_val): + if x1.shape != x2.shape: + raise ValueError( + f"Input shapes {x1.shape} and {x2.shape} must " + "match for PSNR calculation. " + ) + + max_val = convert_to_tensor(max_val, dtype=x2.dtype) + mse = jnp.mean(jnp.square(x1 - x2)) + psnr = 20 * jnp.log10(max_val) - 10 * jnp.log10(mse) + return psnr diff --git a/keras/src/backend/numpy/nn.py b/keras/src/backend/numpy/nn.py index 2c27fad23b44..a2d89c323e67 100644 --- a/keras/src/backend/numpy/nn.py +++ b/keras/src/backend/numpy/nn.py @@ -967,3 +967,16 @@ def ctc_decode( f"Invalid strategy {strategy}. Supported values are " "'greedy' and 'beam_search'." ) + + +def psnr(x1, x2, max_val): + if x1.shape != x2.shape: + raise ValueError( + f"Input shapes {x1.shape} and {x2.shape} must " + "match for PSNR calculation. " + ) + + max_val = convert_to_tensor(max_val, dtype=x2.dtype) + mse = np.mean(np.square(x1 - x2)) + psnr = 20 * np.log10(max_val) - 10 * np.log10(mse) + return psnr diff --git a/keras/src/backend/tensorflow/nn.py b/keras/src/backend/tensorflow/nn.py index 2a53c25f1f98..e7317599af0e 100644 --- a/keras/src/backend/tensorflow/nn.py +++ b/keras/src/backend/tensorflow/nn.py @@ -846,3 +846,18 @@ def ctc_decode( decoded_dense = tf.stack(decoded_dense, axis=0) decoded_dense = tf.cast(decoded_dense, "int32") return decoded_dense, scores + + +def psnr(x1, x2, max_val): + from keras.src.backend.tensorflow.numpy import log10 + + if x1.shape != x2.shape: + raise ValueError( + f"Input shapes {x1.shape} and {x2.shape} must " + "match for PSNR calculation. " + ) + + max_val = convert_to_tensor(max_val, dtype=x2.dtype) + mse = tf.reduce_mean(tf.square(x1 - x2)) + psnr = 20 * log10(max_val) - 10 * log10(mse) + return psnr diff --git a/keras/src/backend/torch/nn.py b/keras/src/backend/torch/nn.py index 4eef2c18977e..62749bc163b4 100644 --- a/keras/src/backend/torch/nn.py +++ b/keras/src/backend/torch/nn.py @@ -848,3 +848,20 @@ def ctc_decode( f"Invalid strategy {strategy}. Supported values are " "'greedy' and 'beam_search'." ) + + +def psnr(x1, x2, max_val): + if x1.shape != x2.shape: + raise ValueError( + f"Input shapes {x1.shape} and {x2.shape} must " + "match for PSNR calculation. " + ) + + x1, x2 = ( + convert_to_tensor(x1), + convert_to_tensor(x2), + ) + max_val = convert_to_tensor(max_val, dtype=x1.dtype) + mse = torch.mean((x1 - x2) ** 2) + psnr = 20 * torch.log10(max_val) - 10 * torch.log10(mse) + return psnr diff --git a/keras/src/ops/nn.py b/keras/src/ops/nn.py index b2b248154e30..84d1bbd077ea 100644 --- a/keras/src/ops/nn.py +++ b/keras/src/ops/nn.py @@ -2042,3 +2042,77 @@ def _normalize(x, axis=-1, order=2): norm = backend.linalg.norm(x, ord=order, axis=axis, keepdims=True) denom = backend.numpy.maximum(norm, epsilon) return backend.numpy.divide(x, denom) + + +class PSNR(Operation): + def __init__( + self, + max_val, + ): + super().__init__() + self.max_val = max_val + + def call(self, x1, x2): + return backend.nn.psnr( + x1=x1, + x2=x2, + max_val=self.max_val, + ) + + def compute_output_spec(self, x1, x2): + if len(x1.shape) != len(x2.shape): + raise ValueError("Inputs must have the same rank") + + return KerasTensor(shape=()) + + +@keras_export( + [ + "keras.ops.psnr", + "keras.ops.nn.psnr", + ] +) +def psnr( + x1, + x2, + max_val, +): + """Peak Signal-to-Noise Ratio (PSNR) calculation. + + This function calculates the Peak Signal-to-Noise Ratio between two signals, + `x1` and `x2`. PSNR is a measure of the quality of a reconstructed signal. + The higher the PSNR, the closer the reconstructed signal is to the original + signal. + + Args: + x1: The first input signal. + x2: The second input signal. Must have the same shape as `x1`. + max_val: The maximum possible value in the signals. + + Returns: + float: The PSNR value between `x1` and `x2`. + + Examples: + >>> import numpy as np + >>> from keras import ops + >>> x = np.random.random((2, 4, 4, 3)) + >>> y = np.random.random((2, 4, 4, 3)) + >>> max_val = 1.0 + >>> psnr_value = ops.nn.psnr(x, y, max_val) + >>> psnr_value + 20.0 + """ + if any_symbolic_tensors( + ( + x1, + x2, + ) + ): + return PSNR( + max_val, + ).symbolic_call(x1, x2) + return backend.nn.psnr( + x1, + x2, + max_val, + ) diff --git a/keras/src/ops/nn_test.py b/keras/src/ops/nn_test.py index 5e52a6c263f3..433bb0f46afa 100644 --- a/keras/src/ops/nn_test.py +++ b/keras/src/ops/nn_test.py @@ -654,6 +654,12 @@ def test_normalize(self): x = KerasTensor([None, 2, 3]) self.assertEqual(knn.normalize(x).shape, (None, 2, 3)) + def test_psnr(self): + x1 = KerasTensor([None, 2, 3]) + x2 = KerasTensor([None, 5, 6]) + out = knn.psnr(x1, x2, max_val=224) + self.assertEqual(out.shape, ()) + class NNOpsStaticShapeTest(testing.TestCase): def test_relu(self): @@ -1114,6 +1120,12 @@ def test_normalize(self): x = KerasTensor([1, 2, 3]) self.assertEqual(knn.normalize(x).shape, (1, 2, 3)) + def test_psnr(self): + x1 = KerasTensor([1, 2, 3]) + x2 = KerasTensor([5, 6, 7]) + out = knn.psnr(x1, x2, max_val=224) + self.assertEqual(out.shape, ()) + class NNOpsCorrectnessTest(testing.TestCase, parameterized.TestCase): def test_relu(self): @@ -2032,6 +2044,25 @@ def test_normalize(self): ], ) + def test_psnr(self): + x1 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]) + x2 = np.array([[0.2, 0.2, 0.3], [0.4, 0.6, 0.6]]) + max_val = 1.0 + expected_psnr_1 = 20 * np.log10(max_val) - 10 * np.log10( + np.mean(np.square(x1 - x2)) + ) + psnr_1 = knn.psnr(x1, x2, max_val) + self.assertAlmostEqual(psnr_1, expected_psnr_1) + + x3 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]) + x4 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]) + max_val = 1.0 + expected_psnr_2 = 20 * np.log10(max_val) - 10 * np.log10( + np.mean(np.square(x3 - x4)) + ) + psnr_2 = knn.psnr(x3, x4, max_val) + self.assertAlmostEqual(psnr_2, expected_psnr_2) + class NNOpsDtypeTest(testing.TestCase, parameterized.TestCase): """Test the dtype to verify that the behavior matches JAX.""" From a3fe639d4554e31fb916f4a77b9ea2ee3a9ce29c Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Thu, 25 Apr 2024 13:59:54 -0700 Subject: [PATCH 070/101] Docstring format --- keras/src/callbacks/early_stopping.py | 1 - keras/src/ops/nn.py | 21 ++++++++++----------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/keras/src/callbacks/early_stopping.py b/keras/src/callbacks/early_stopping.py index e7c1fe9c0dc0..5571cf606de7 100644 --- a/keras/src/callbacks/early_stopping.py +++ b/keras/src/callbacks/early_stopping.py @@ -50,7 +50,6 @@ class EarlyStopping(Callback): improvement is expected and thus training will not be stopped. Defaults to `0`. - Example: >>> callback = keras.callbacks.EarlyStopping(monitor='loss', diff --git a/keras/src/ops/nn.py b/keras/src/ops/nn.py index 84d1bbd077ea..1d900c8ccc4f 100644 --- a/keras/src/ops/nn.py +++ b/keras/src/ops/nn.py @@ -2077,12 +2077,13 @@ def psnr( x2, max_val, ): - """Peak Signal-to-Noise Ratio (PSNR) calculation. + """Peak Signal-to-Noise Ratio (PSNR) function. - This function calculates the Peak Signal-to-Noise Ratio between two signals, + This function computes the Peak Signal-to-Noise Ratio between two signals, `x1` and `x2`. PSNR is a measure of the quality of a reconstructed signal. The higher the PSNR, the closer the reconstructed signal is to the original - signal. + signal. Note that it can become negative when the signal power is + smaller that the noise power. Args: x1: The first input signal. @@ -2093,14 +2094,12 @@ def psnr( float: The PSNR value between `x1` and `x2`. Examples: - >>> import numpy as np - >>> from keras import ops - >>> x = np.random.random((2, 4, 4, 3)) - >>> y = np.random.random((2, 4, 4, 3)) - >>> max_val = 1.0 - >>> psnr_value = ops.nn.psnr(x, y, max_val) - >>> psnr_value - 20.0 + + >>> x1 = keras.random.normal((2, 4, 4, 3)) + >>> x2 = keras.random.normal((2, 4, 4, 3)) + >>> max_val = 1.0 + >>> keras.ops.nn.psnr(x1, x2, max_val) + -3.1697404 """ if any_symbolic_tensors( ( From d0ca6bc65db4fb84c5838e7b7e7e8bb4044b1d7b Mon Sep 17 00:00:00 2001 From: Gabriel Rasskin <43894452+grasskin@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:41:51 -0400 Subject: [PATCH 071/101] Remove `PYTORCH_ENABLE_MPS_FALLBACK` flag requirement for mps (#19618) * Remove `PYTORCH_ENABLE_MPS_FALLBACK` flag requirement for mps * Formatting --- keras/src/backend/torch/core.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/keras/src/backend/torch/core.py b/keras/src/backend/torch/core.py index 257afeeec699..8cc6c5b5b564 100644 --- a/keras/src/backend/torch/core.py +++ b/keras/src/backend/torch/core.py @@ -1,5 +1,4 @@ import contextlib -import os import ml_dtypes import numpy as np @@ -19,10 +18,7 @@ # Some operators such as 'aten::_foreach_mul_.Scalar' # are not currently implemented for the MPS device. # check https://github.com/pytorch/pytorch/issues/77764. -if ( - torch.backends.mps.is_available() - and os.getenv("PYTORCH_ENABLE_MPS_FALLBACK") == "1" -): +if torch.backends.mps.is_available(): DEFAULT_DEVICE = "mps" elif torch.cuda.is_available(): DEFAULT_DEVICE = "cuda" From 90e0eccbfa832fbff5fd67012608d210f6cd861e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Chollet?= Date: Thu, 25 Apr 2024 15:51:18 -0700 Subject: [PATCH 072/101] Implement custom layer insertion in clone_model. (#19610) * Implement custom layer insertion in clone_model. * Add recursive arg and tests. * Add nested sequential cloning test --- keras/src/models/cloning.py | 169 ++++++++++++++++++++++++++----- keras/src/models/cloning_test.py | 102 ++++++++++++++++++- keras/src/ops/function.py | 8 +- 3 files changed, 247 insertions(+), 32 deletions(-) diff --git a/keras/src/models/cloning.py b/keras/src/models/cloning.py index 3875b3522a6e..7e28e4438c3c 100644 --- a/keras/src/models/cloning.py +++ b/keras/src/models/cloning.py @@ -11,7 +11,14 @@ @keras_export("keras.models.clone_model") -def clone_model(model, input_tensors=None, clone_function=None): +def clone_model( + model, + input_tensors=None, + clone_function=None, + call_function=None, + recursive=False, + **kwargs, +): """Clone a Functional or Sequential `Model` instance. Model cloning is similar to calling a model on new inputs, @@ -29,24 +36,44 @@ def clone_model(model, input_tensors=None, clone_function=None): input_tensors: optional list of input tensors or InputLayer objects to build the model upon. If not provided, new `Input` objects will be created. - clone_function: Callable to be used to clone each layer in the target + clone_function: Callable with signature `fn(layer)` + to be used to clone each layer in the target model (except `Input` instances). It takes as argument the layer instance to be cloned, and returns the corresponding layer instance to be used in the model copy. If unspecified, this callable - becomes the following serialization/deserialization function: + defaults to the following serialization/deserialization function: `lambda layer: layer.__class__.from_config(layer.get_config())`. By passing a custom callable, you can customize your copy of the model, e.g. by wrapping certain layers of interest (you might want to replace all `LSTM` instances with equivalent `Bidirectional(LSTM(...))` instances, for example). Defaults to `None`. + call_function: Callable with signature + `fn(layer, *args, **kwargs)` to be used to call each + cloned layer and a set of inputs. It takes the layer instance, + the call arguments and keyword arguments, and returns the + call outputs. If unspecified, this callable defaults to + the regular `__call__()` method: + `def fn(layer, *args, **kwargs): return layer(*args, **kwargs)`. + By passing a custom callable, you can insert new layers before or + after a given layer. Note: this argument can only be used with + Functional models. + recursive: Boolean. Whether to recursively clone any Sequential + or Functional models encountered in the original + Sequential/Functional model. If `False`, + then inner models are cloned by calling `clone_function()`. + If `True`, then inner models are cloned by calling `clone_model()` + with the same `clone_function`, `call_function`, and `recursive` + arguments. Note that in this case, `call_function` + will not be propagated to any Sequential model + (since it is not applicable to Sequential models). Returns: An instance of `Model` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. The cloned model may behave differently from the original model if a custom `clone_function` - modifies the layer. + or `call_function` modifies a layer or layer call. Example: @@ -74,6 +101,23 @@ def clone_function(layer): new_model = clone_model(model) ``` + Using a `call_function` to add a `Dropout` layer after each `Dense` layer + (without recreating new layers): + + ```python + def call_function(layer, *args, **kwargs): + out = layer(*args, **kwargs) + if isinstance(layer, keras.layers.Dense): + out = keras.layers.Dropout(0.5)(out) + return out + + new_model = clone_model( + model, + clone_function=lambda x: x, # Reuse the same layers. + call_function=call_function, + ) + ``` + Note that subclassed models cannot be cloned by default, since their internal layer structure is not known. To achieve equivalent functionality @@ -88,11 +132,44 @@ def clone_function(layer): In the case of a subclassed model, you cannot using a custom `clone_function`. """ + cache = kwargs.pop("cache", None) + if kwargs: + raise ValueError( + f"Unexpected keyword argument(s): {tuple(kwargs.keys())}" + ) + if isinstance(model, Sequential): + # Wrap clone_function to handle recursiveness and layer sharing. + clone_function = _wrap_clone_function( + clone_function, + call_function=call_function, + recursive=recursive, + cache=cache, + ) + if call_function is not None: + raise ValueError( + "`call_function` argument is not supported with Sequential " + "models. In a Sequential model, layers aren't called " + "at model-construction time (they're merely listed). " + "Use `call_function` with Functional models only. " + "Received model of " + f"type '{model.__class__.__name__}', with " + f"call_function={clone_function}" + ) return _clone_sequential_model( - model, input_tensors=input_tensors, clone_function=clone_function + model, + clone_function=clone_function, + input_tensors=input_tensors, ) if isinstance(model, Functional): + # Wrap clone_function to handle recursiveness and layer sharing. + clone_function = _wrap_clone_function( + clone_function, + call_function=call_function, + recursive=recursive, + cache=cache, + ) + # If the get_config() method is the same as a regular Functional # model, we're safe to use _clone_functional_model (which relies # on a Functional constructor). In the case where the get_config @@ -104,27 +181,78 @@ def clone_function(layer): ): return _clone_functional_model( model, - input_tensors=input_tensors, clone_function=clone_function, + call_function=call_function, + input_tensors=input_tensors, ) # Case of a custom model class if clone_function or input_tensors: raise ValueError( - "Arguments clone_function and input_tensors " + "Arguments `clone_function` and `input_tensors` " "are only supported for Sequential models " "or Functional models. Received model of " f"type '{model.__class__.__name__}', with " f"clone_function={clone_function} and " f"input_tensors={input_tensors}" ) + if call_function is not None: + raise ValueError( + "Argument `call_function` is only supported " + "for Functional models. Received model of " + f"type '{model.__class__.__name__}', with " + f"call_function={clone_function}" + ) config = serialization_lib.serialize_keras_object(model) return serialization_lib.deserialize_keras_object( config, custom_objects={model.__class__.__name__: model.__class__} ) -def _clone_sequential_model(model, input_tensors=None, clone_function=None): +def _wrap_clone_function( + clone_function, call_function=None, recursive=False, cache=None +): + """Wrapper to handle recursiveness and layer sharing.""" + if clone_function is None: + + def _clone_layer(layer): + return layer.__class__.from_config(layer.get_config()) + + clone_function = _clone_layer + + if cache is None: + cache = {} + + def wrapped_clone_function(layer): + if id(layer) in cache: + return cache[id(layer)] + if recursive: + if isinstance(layer, Sequential): + # Note: Sequential doens't support call_function. + clone = clone_model( + layer, + clone_function=clone_function, + cache=cache, + ) + cache[id(layer)] = clone + return clone + elif isinstance(layer, Functional): + clone = clone_model( + layer, + clone_function=clone_function, + call_function=call_function, + cache=cache, + ) + cache[id(layer)] = clone + return clone + clone = clone_function(layer) + cache[id(layer)] = clone + return clone + + return wrapped_clone_function + + +def _clone_sequential_model(model, clone_function, input_tensors=None): """Clone a `Sequential` model instance. Model cloning is similar to calling a model on new inputs, @@ -144,12 +272,6 @@ def _clone_sequential_model(model, input_tensors=None, clone_function=None): of the original model, on top of new inputs tensors, using newly instantiated weights. """ - if clone_function is None: - - def _clone_layer(layer): - return layer.__class__.from_config(layer.get_config()) - - clone_function = _clone_layer if not isinstance(model, Sequential): raise ValueError( @@ -202,7 +324,9 @@ def _clone_layer(layer): return Sequential(new_layers, name=model.name, trainable=model.trainable) -def _clone_functional_model(model, input_tensors=None, clone_function=None): +def _clone_functional_model( + model, clone_function, input_tensors=None, call_function=None +): """Clone a `Functional` model instance. Model cloning is similar to calling a model on new inputs, @@ -224,17 +348,6 @@ def _clone_functional_model(model, input_tensors=None, clone_function=None): of the original model, on top of new inputs tensors, using newly instantiated weights. """ - if clone_function is None: - seen = {} - - def _clone_layer(layer): - if layer in seen: - return seen[layer] - new_layer = layer.__class__.from_config(layer.get_config()) - seen[layer] = new_layer - return new_layer - - clone_function = _clone_layer if not callable(clone_function): raise ValueError( @@ -276,7 +389,9 @@ def operation_fn(layer): return new_layer output_tensors = model._run_through_graph( - input_tensors, operation_fn=operation_fn + input_tensors, + operation_fn=operation_fn, + call_fn=call_function, ) if functional_like_constructor(model.__class__): diff --git a/keras/src/models/cloning_test.py b/keras/src/models/cloning_test.py index d9a46ac29cfa..b77122b28a2d 100644 --- a/keras/src/models/cloning_test.py +++ b/keras/src/models/cloning_test.py @@ -4,6 +4,7 @@ from keras.src import layers from keras.src import models +from keras.src import ops from keras.src import testing from keras.src import tree from keras.src.models.cloning import clone_model @@ -21,6 +22,24 @@ def get_mlp_functional_model(shared_layers=False): return model +def get_nested_functional_model(): + inputs = layers.Input(shape=(4,)) + x = layers.Dense(3)(inputs) + mlp = get_mlp_functional_model() + x = mlp(x) + outputs = layers.Dense(2)(x) + model = models.Model(inputs, outputs) + return model + + +def get_nested_sequential_model(): + model = models.Sequential() + model.add(layers.Dense(2)) + model.add(get_sequential_model(explicit_input=False)) + model.add(layers.Dense(2)) + return model + + def get_cnn_functional_model(shared_layers=False): inputs = layers.Input(shape=(7, 3)) x = layers.Conv1D(2, 2, padding="same")(inputs) @@ -57,6 +76,19 @@ def call(self, x): @pytest.mark.requires_trainable_backend class CloneModelTest(testing.TestCase, parameterized.TestCase): + + def assert_models_equal(self, model1, model2, ref_input): + result1 = model1(ref_input) + result2 = model2(ref_input) + for r1, r2 in zip(tree.flatten(result1), tree.flatten(result2)): + self.assertAllClose( + ops.convert_to_numpy(r1), ops.convert_to_numpy(r2) + ) + + def assert_weights_equal(self, model1, model2): + for a, b in zip(model1.weights, model2.weights): + self.assertAllClose(a.numpy(), b.numpy()) + @parameterized.named_parameters( ("mlp_functional", get_mlp_functional_model), ("cnn_functional", get_cnn_functional_model, True), @@ -71,11 +103,10 @@ def test_cloning_correctness(self, model_fn, is_conv=False): ref_input = np.random.random((2, 7, 3) if is_conv else (2, 3)) model = model_fn() new_model = clone_model(model) - ref_output = model(ref_input) + model(ref_input) # Maybe needed to build the model new_model(ref_input) # Maybe needed to build the model new_model.set_weights(model.get_weights()) - output = new_model(ref_input) - self.assertAllClose(ref_output, output) + self.assert_models_equal(model, new_model, ref_input) @parameterized.named_parameters( ("mlp_functional", get_mlp_functional_model), @@ -121,3 +152,68 @@ def test_structured_io_cloning(self): "`input_tensors` must have the same structure as model.input", ): model = clone_model(model0, input_tensors=(x, y)) + + def test_call_fn(self): + model = get_mlp_functional_model(shared_layers=False) + + def call_function(layer, *args, **kwargs): + out = layer(*args, **kwargs) + if isinstance(layer, layers.Dense): + out = layers.Dropout(0.5)(out) + return out + + new_model = clone_model( + model, + clone_function=lambda x: x, # Reuse the same layers. + call_function=call_function, + ) + self.assertLen(model.layers, 3) + self.assertLen(new_model.layers, 5) + self.assertIsInstance(new_model.layers[2], layers.Dropout) + self.assertIsInstance(new_model.layers[4], layers.Dropout) + ref_input = np.random.random((2, 3)) + self.assert_models_equal(model, new_model, ref_input) + + def test_recursive(self): + model = get_nested_functional_model() + + def call_function(layer, *args, **kwargs): + out = layer(*args, **kwargs) + if isinstance(layer, layers.Dense): + out = layers.Dropout(0.5)(out) + return out + + new_model = clone_model( + model, + clone_function=lambda x: x, # Reuse the same layers. + call_function=call_function, + recursive=True, + ) + self.assertLen(model._flatten_layers(), 8) + self.assertLen(new_model._flatten_layers(), 12) + self.assertIsInstance(new_model.layers[3].layers[2], layers.Dropout) + self.assertIsInstance(new_model.layers[3].layers[4], layers.Dropout) + ref_input = np.random.random((2, 4)) + self.assert_models_equal(model, new_model, ref_input) + + # Sequential. + def clone_function(layer): + layer = layer.__class__.from_config(layer.get_config()) + layer.flag = True + return layer + + model = get_nested_sequential_model() + new_model = clone_model( + model, + clone_function=clone_function, + recursive=True, + ) + ref_input = np.random.random((2, 3)) + model(ref_input) # Maybe needed to build the model + new_model(ref_input) # Maybe needed to build the model + new_model.set_weights(model.get_weights()) + self.assert_models_equal(model, new_model, ref_input) + for l1, l2 in zip(model._flatten_layers(), new_model._flatten_layers()): + if isinstance(l2, layers.Dense): + self.assertFalse(hasattr(l1, "flag")) + self.assertTrue(hasattr(l2, "flag")) diff --git a/keras/src/ops/function.py b/keras/src/ops/function.py index 8b6930ac12ab..04f2b409614e 100644 --- a/keras/src/ops/function.py +++ b/keras/src/ops/function.py @@ -121,7 +121,7 @@ def call(self, inputs): self._assert_input_compatibility(inputs) return self._run_through_graph(inputs, operation_fn=lambda op: op) - def _run_through_graph(self, inputs, operation_fn): + def _run_through_graph(self, inputs, operation_fn, call_fn=None): """Execute the graph. At each node we compute outputs via @@ -148,7 +148,11 @@ def _run_through_graph(self, inputs, operation_fn): continue # Node is not computable, try skipping. args, kwargs = node.arguments.fill_in(tensor_dict) - outputs = operation_fn(node.operation)(*args, **kwargs) + op = operation_fn(node.operation) + if call_fn is not None: + outputs = call_fn(op, *args, **kwargs) + else: + outputs = op(*args, **kwargs) # Update tensor_dict. for x, y in zip(node.outputs, tree.flatten(outputs)): From 8200a78d85a9371afaf78563c5f78744720be4e6 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Thu, 25 Apr 2024 16:57:24 -0700 Subject: [PATCH 073/101] Fix bidir lstm saving issue. --- keras/src/layers/rnn/bidirectional.py | 8 ++++++-- keras/src/saving/saving_lib_test.py | 12 ++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/keras/src/layers/rnn/bidirectional.py b/keras/src/layers/rnn/bidirectional.py index 9d9d29d24602..6d912d5fc330 100644 --- a/keras/src/layers/rnn/bidirectional.py +++ b/keras/src/layers/rnn/bidirectional.py @@ -272,8 +272,12 @@ def states(self): return None def build(self, sequences_shape, initial_state_shape=None): - self.forward_layer.build(sequences_shape) - self.backward_layer.build(sequences_shape) + if not self.layer.built: + self.layer.build(sequences_shape) + if not self.forward_layer.built: + self.forward_layer.build(sequences_shape) + if not self.backward_layer.built: + self.backward_layer.build(sequences_shape) self.built = True def compute_mask(self, _, mask): diff --git a/keras/src/saving/saving_lib_test.py b/keras/src/saving/saving_lib_test.py index 6b2d483d7591..23ac2a52e737 100644 --- a/keras/src/saving/saving_lib_test.py +++ b/keras/src/saving/saving_lib_test.py @@ -906,3 +906,15 @@ def func(in_size=4, out_size=2, name=None): out = new_model(x) self.assertAllClose(ref_out[0], out[0]) self.assertAllClose(ref_out[1], out[1]) + + def test_bidirectional_lstm_saving(self): + inputs = keras.Input((3, 2)) + outputs = keras.layers.Bidirectional(keras.layers.LSTM(64))(inputs) + model = keras.Model(inputs, outputs) + temp_filepath = os.path.join(self.get_temp_dir(), "bidir_lstm.keras") + model.save(temp_filepath) + new_model = keras.saving.load_model(temp_filepath) + x = np.random.random((1, 3, 2)) + ref_out = model(x) + out = new_model(x) + self.assertAllClose(ref_out, out) From 6b3d96057ff45541dc5ca7dd6ef5c8062536bce3 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Thu, 25 Apr 2024 21:46:27 -0700 Subject: [PATCH 074/101] Fix CI --- keras/src/layers/rnn/bidirectional_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keras/src/layers/rnn/bidirectional_test.py b/keras/src/layers/rnn/bidirectional_test.py index 476965f935f6..f50758dc1280 100644 --- a/keras/src/layers/rnn/bidirectional_test.py +++ b/keras/src/layers/rnn/bidirectional_test.py @@ -14,7 +14,7 @@ def test_basics(self): init_kwargs={"layer": layers.SimpleRNN(4)}, input_shape=(3, 2, 4), expected_output_shape=(3, 8), - expected_num_trainable_weights=6, + expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, supports_masking=True, ) @@ -27,7 +27,7 @@ def test_basics(self): }, input_shape=(3, 2, 4), expected_output_shape=(3, 4), - expected_num_trainable_weights=6, + expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, supports_masking=True, ) From 504bb958903afc84b2810c27b88775e8f0d40bfb Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Fri, 26 Apr 2024 08:28:55 -0700 Subject: [PATCH 075/101] Fix cholesky tracing with jax --- keras/src/backend/jax/linalg.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/keras/src/backend/jax/linalg.py b/keras/src/backend/jax/linalg.py index 7984a734e9d0..1e1c1cedf9b6 100644 --- a/keras/src/backend/jax/linalg.py +++ b/keras/src/backend/jax/linalg.py @@ -11,11 +11,18 @@ def cholesky(a): out = jnp.linalg.cholesky(a) - if jnp.any(jnp.isnan(out)): - raise ValueError( - "Cholesky decomposition failed. " - "The input might not be a valid positive definite matrix." - ) + try: + # In eager mode, raise for nan to + # achieve behavior consistency with numpy + if jnp.any(jnp.isnan(out)): + raise ValueError( + "Cholesky decomposition failed. " + "The input might not be a valid " + "positive definite matrix." + ) + except jax.errors.TracerBoolConversionError: + # Cannot raise for nan in tracing mode + pass return out From d7824ac5f476acea4145d6ebe7256c24d5e463c0 Mon Sep 17 00:00:00 2001 From: Vachan V Y <109357590+VachanVY@users.noreply.github.com> Date: Fri, 26 Apr 2024 21:57:38 +0530 Subject: [PATCH 076/101] made extract_patches dtype agnostic (#19621) --- keras/src/ops/image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/ops/image.py b/keras/src/ops/image.py index bb817ec4abe2..a9971b2aba4d 100644 --- a/keras/src/ops/image.py +++ b/keras/src/ops/image.py @@ -548,7 +548,7 @@ def _extract_patches( if not strides: strides = size out_dim = patch_h * patch_w * channels_in - kernel = backend.numpy.eye(out_dim) + kernel = backend.numpy.eye(out_dim, dtype=image.dtype) kernel = backend.numpy.reshape( kernel, (patch_h, patch_w, channels_in, out_dim) ) From 67419fb2986ebc11110d20f8e555298d26cb106d Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Fri, 26 Apr 2024 11:39:33 -0700 Subject: [PATCH 077/101] Simplify Bidirectional implementation --- keras/src/layers/rnn/bidirectional.py | 7 ++----- keras/src/layers/rnn/bidirectional_test.py | 4 ++-- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/keras/src/layers/rnn/bidirectional.py b/keras/src/layers/rnn/bidirectional.py index 6d912d5fc330..a89c30f9a4ee 100644 --- a/keras/src/layers/rnn/bidirectional.py +++ b/keras/src/layers/rnn/bidirectional.py @@ -3,13 +3,12 @@ from keras.src import ops from keras.src import utils from keras.src.api_export import keras_export -from keras.src.layers.core.wrapper import Wrapper from keras.src.layers.layer import Layer from keras.src.saving import serialization_lib @keras_export("keras.layers.Bidirectional") -class Bidirectional(Wrapper): +class Bidirectional(Layer): """Bidirectional wrapper for RNNs. Args: @@ -105,7 +104,7 @@ def __init__( "Merge mode should be one of " '{"sum", "mul", "ave", "concat", None}' ) - super().__init__(layer, **kwargs) + super().__init__(**kwargs) # Recreate the forward layer from the original layer config, so that it # will not carry over any state from the layer. @@ -272,8 +271,6 @@ def states(self): return None def build(self, sequences_shape, initial_state_shape=None): - if not self.layer.built: - self.layer.build(sequences_shape) if not self.forward_layer.built: self.forward_layer.build(sequences_shape) if not self.backward_layer.built: diff --git a/keras/src/layers/rnn/bidirectional_test.py b/keras/src/layers/rnn/bidirectional_test.py index f50758dc1280..476965f935f6 100644 --- a/keras/src/layers/rnn/bidirectional_test.py +++ b/keras/src/layers/rnn/bidirectional_test.py @@ -14,7 +14,7 @@ def test_basics(self): init_kwargs={"layer": layers.SimpleRNN(4)}, input_shape=(3, 2, 4), expected_output_shape=(3, 8), - expected_num_trainable_weights=9, + expected_num_trainable_weights=6, expected_num_non_trainable_weights=0, supports_masking=True, ) @@ -27,7 +27,7 @@ def test_basics(self): }, input_shape=(3, 2, 4), expected_output_shape=(3, 4), - expected_num_trainable_weights=9, + expected_num_trainable_weights=6, expected_num_non_trainable_weights=0, supports_masking=True, ) From f063002cccea182d43ed64328d0b5df7157e3fba Mon Sep 17 00:00:00 2001 From: hertschuh <1091026+hertschuh@users.noreply.github.com> Date: Fri, 26 Apr 2024 13:29:20 -0700 Subject: [PATCH 078/101] Add support for infinite `PyDataset`s. (#19624) `PyDataset` now uses the `num_batches` property instead of `__len__` to support `None`, which is how one indicates the dataset is infinite. Note that infinite datasets are not shuffled. Fixes https://github.com/keras-team/keras/issues/19528 Also added exception reporting when using multithreading / multiprocessing. Previously, the program would just hang with no error reported. --- keras/src/trainers/data_adapters/__init__.py | 7 ++ .../data_adapters/py_dataset_adapter.py | 114 +++++++++++------- .../data_adapters/py_dataset_adapter_test.py | 104 ++++++++++++++-- 3 files changed, 174 insertions(+), 51 deletions(-) diff --git a/keras/src/trainers/data_adapters/__init__.py b/keras/src/trainers/data_adapters/__init__.py index 41f2a91f11a0..3dc04b754981 100644 --- a/keras/src/trainers/data_adapters/__init__.py +++ b/keras/src/trainers/data_adapters/__init__.py @@ -71,6 +71,13 @@ def get_data_adapter( "sample_weights", "the sample weights", "PyDataset" ) return PyDatasetAdapter(x, class_weight=class_weight, shuffle=shuffle) + # TODO: should we warn or not? + # if x.num_batches is None and shuffle: + # warnings.warn( + # "`shuffle=True` was passed, but will be ignored since the " + # "data `x` was provided as a infinite PyDataset. The " + # "PyDataset is expected to already be shuffled." + # ) elif is_torch_dataloader(x): if y is not None: raise_unsupported_arg("y", "the targets", "torch DataLoader") diff --git a/keras/src/trainers/data_adapters/py_dataset_adapter.py b/keras/src/trainers/data_adapters/py_dataset_adapter.py index 71ab2a67736a..daa56a1313f4 100644 --- a/keras/src/trainers/data_adapters/py_dataset_adapter.py +++ b/keras/src/trainers/data_adapters/py_dataset_adapter.py @@ -1,3 +1,4 @@ +import itertools import multiprocessing.dummy import queue import random @@ -153,23 +154,26 @@ def __getitem__(self, index): """ raise NotImplementedError - def __len__(self): - """Number of batch in the PyDataset. + @property + def num_batches(self): + """Number of batches in the PyDataset. Returns: - The number of batches in the PyDataset. + The number of batches in the PyDataset or `None` to indicate that + the dataset is infinite. """ - raise NotImplementedError + # For backwards compatibility, support `__len__`. + if hasattr(self, "__len__"): + return len(self) + raise NotImplementedError( + "You need to implement the `num_batches` property:\n\n" + "@property\ndef num_batches(self):\n return ..." + ) def on_epoch_end(self): """Method called at the end of every epoch.""" pass - def __iter__(self): - """Create a generator that iterate over the PyDataset.""" - for i in range(len(self)): - yield self[i] - class PyDatasetAdapter(DataAdapter): """Adapter for `keras.utils.PyDataset` instances.""" @@ -234,23 +238,33 @@ def generator_fn(): else: def generator_fn(): - order = range(len(self.py_dataset)) - if self.shuffle: + num_batches = self.py_dataset.num_batches + indices = ( + range(num_batches) + if num_batches is not None + else itertools.count() + ) + if self.shuffle and num_batches is not None: # Match the shuffle convention in OrderedEnqueuer. - order = list(order) - random.shuffle(order) + indices = list(indices) + random.shuffle(indices) - for i in order: + for i in indices: yield self.py_dataset[i] return generator_fn def _get_iterator(self): + num_batches = self.py_dataset.num_batches gen_fn = self._make_multiprocessed_generator_fn() for i, batch in enumerate(gen_fn()): batch = self._standardize_batch(batch) yield batch - if i >= len(self.py_dataset) - 1 and self.enqueuer: + if ( + self.enqueuer + and num_batches is not None + and i >= num_batches - 1 + ): self.enqueuer.stop() def get_numpy_iterator(self): @@ -262,11 +276,11 @@ def get_jax_iterator(self): def get_tf_dataset(self): from keras.src.utils.module_utils import tensorflow as tf + num_batches = self.py_dataset.num_batches if self._output_signature is None: - num_samples = min( - data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC, - len(self.py_dataset), - ) + num_samples = data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC + if num_batches is not None: + num_samples = min(num_samples, num_batches) batches = [ self._standardize_batch(self.py_dataset[i]) for i in range(num_samples) @@ -277,7 +291,7 @@ def get_tf_dataset(self): self._get_iterator, output_signature=self._output_signature, ) - if self.shuffle: + if self.shuffle and num_batches is not None: ds = ds.shuffle(8) ds = ds.prefetch(tf.data.AUTOTUNE) return ds @@ -292,7 +306,7 @@ def on_epoch_end(self): @property def num_batches(self): - return len(self.py_dataset) + return self.py_dataset.num_batches @property def batch_size(self): @@ -520,31 +534,40 @@ def _wait_queue(self): def _run(self): """Submits request to the executor and queue the `Future` objects.""" - indices = list(range(len(self.py_dataset))) - if self.shuffle: - random.shuffle(indices) - self._send_py_dataset() # Share the initial py_dataset - while True: - with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: - for i in indices: + try: + num_batches = self.py_dataset.num_batches + indices = ( + range(num_batches) + if num_batches is not None + else itertools.count() + ) + if self.shuffle and num_batches is not None: + indices = list(indices) + random.shuffle(indices) + self._send_py_dataset() # Share the initial py_dataset + while True: + with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: + for i in indices: + if self.stop_signal.is_set(): + return + + self.queue.put( + executor.apply_async(get_index, (self.uid, i)), + block=True, + ) + + # Done with the current epoch, waiting for the final batches + self._wait_queue() + if self.stop_signal.is_set(): + # We're done return - self.queue.put( - executor.apply_async(get_index, (self.uid, i)), - block=True, - ) - - # Done with the current epoch, waiting for the final batches - self._wait_queue() - - if self.stop_signal.is_set(): - # We're done - return - - # Call the internal on epoch end. - self.py_dataset.on_epoch_end() - self._send_py_dataset() # Update the pool + # Call the internal on epoch end. + self.py_dataset.on_epoch_end() + self._send_py_dataset() # Update the pool + except Exception as e: + self.queue.put(e) # Report exception def get(self): """Creates a generator to extract data from the queue. @@ -558,7 +581,10 @@ def get(self): """ while self.is_running(): try: - inputs = self.queue.get(block=True, timeout=5).get() + value = self.queue.get(block=True, timeout=5) + if isinstance(value, Exception): + raise value # Propagate exception from other thread + inputs = value.get() if self.is_running(): self.queue.task_done() if inputs is not None: diff --git a/keras/src/trainers/data_adapters/py_dataset_adapter_test.py b/keras/src/trainers/data_adapters/py_dataset_adapter_test.py index b1be7002ac54..7c41971db561 100644 --- a/keras/src/trainers/data_adapters/py_dataset_adapter_test.py +++ b/keras/src/trainers/data_adapters/py_dataset_adapter_test.py @@ -15,21 +15,34 @@ class ExamplePyDataset(py_dataset_adapter.PyDataset): def __init__( - self, x_set, y_set, sample_weight=None, batch_size=32, delay=0, **kwargs + self, + x_set, + y_set, + sample_weight=None, + batch_size=32, + delay=0, + infinite=False, + **kwargs ): super().__init__(**kwargs) self.x, self.y = x_set, y_set self.batch_size = batch_size self.sample_weight = sample_weight self.delay = delay + self.infinite = infinite - def __len__(self): + @property + def num_batches(self): + if self.infinite: + return None return math.ceil(len(self.x) / self.batch_size) def __getitem__(self, idx): # Create artificial delay to test multiprocessing time.sleep(self.delay) + if self.infinite: + idx = idx % math.ceil(len(self.x) / self.batch_size) # Return x, y for batch idx. low = idx * self.batch_size # Cap upper bound at array length; the last batch may be smaller @@ -48,7 +61,8 @@ def __init__(self, inputs, batch_size=32, **kwargs): self.inputs = inputs self.batch_size = batch_size - def __len__(self): + @property + def num_batches(self): return math.ceil(len(self.inputs["x"]) / self.batch_size) def __getitem__(self, idx): @@ -98,6 +112,7 @@ class PyDatasetAdapterTest(testing.TestCase, parameterized.TestCase): "dataset_type": "torch", }, ], + infinite=[True, False], iterator_type=["np", "tf", "jax", "torch"], shuffle=[True, False], ) @@ -106,6 +121,7 @@ def test_basic_flow( self, shuffle, dataset_type, + infinite, iterator_type, workers=0, use_multiprocessing=False, @@ -127,6 +143,7 @@ def test_basic_flow( workers=workers, use_multiprocessing=use_multiprocessing, max_queue_size=max_queue_size, + infinite=infinite, ) adapter = py_dataset_adapter.PyDatasetAdapter( py_dataset, shuffle=shuffle @@ -157,10 +174,16 @@ def test_basic_flow( self.assertEqual(by.shape, (16, 2)) for i in range(by.shape[0]): sample_order.append(by[i, 0]) - if shuffle: - self.assertNotAllClose(sample_order, list(range(64))) + if infinite and len(sample_order) >= 128: + break + expected_order = list(range(64)) + if infinite: + # When the dataset is infinite, we cycle through the data twice. + expected_order = expected_order + expected_order + if shuffle and not infinite: + self.assertNotAllClose(sample_order, expected_order) else: - self.assertAllClose(sample_order, list(range(64))) + self.assertAllClose(sample_order, expected_order) # TODO: test class_weight # TODO: test sample weights @@ -240,7 +263,8 @@ def test_dict_inputs(self): def test_with_different_shapes(self, iterator_type): class TestPyDataset(py_dataset_adapter.PyDataset): - def __len__(self): + @property + def num_batches(self): return 3 def __getitem__(self, idx): @@ -284,3 +308,69 @@ def __getitem__(self, idx): else: self.assertEqual(bx.shape, (2, 6)) self.assertEqual(by.shape, (2, 2)) + + @parameterized.named_parameters( + named_product( + [ + { + "testcase_name": "multiprocessing", + "workers": 2, + "use_multiprocessing": True, + "max_queue_size": 10, + }, + { + "testcase_name": "multithreading", + "workers": 2, + "max_queue_size": 10, + }, + { + "testcase_name": "single", + }, + ], + iterator_type=["np", "tf", "jax", "torch"], + ) + ) + def test_exception_reported( + self, + iterator_type, + workers=0, + use_multiprocessing=False, + max_queue_size=0, + ): + class ExceptionPyDataset(py_dataset_adapter.PyDataset): + + @property + def num_batches(self): + return 4 + + def __getitem__(self, index): + if index < 2: + return ( + np.random.random((64, 4)).astype("float32"), + np.random.random((64, 2)).astype("float32"), + ) + raise ValueError("Excepted exception") + + adapter = py_dataset_adapter.PyDatasetAdapter( + ExceptionPyDataset(), shuffle=False + ) + + expected_exception_class = ValueError + if iterator_type == "np": + it = adapter.get_numpy_iterator() + elif iterator_type == "tf": + it = adapter.get_tf_dataset() + # tf.data wraps the exception + expected_exception_class = tf.errors.InvalidArgumentError + elif iterator_type == "jax": + it = adapter.get_jax_iterator() + elif iterator_type == "torch": + it = adapter.get_torch_dataloader() + + it = iter(it) + next(it) + next(it) + with self.assertRaisesRegex( + expected_exception_class, "Excepted exception" + ): + next(it) From 5883a25f1b7c6eacc3f21f1821751a4109700796 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Fri, 26 Apr 2024 14:48:39 -0700 Subject: [PATCH 079/101] Fix dataset shuffling issue. --- keras/src/utils/audio_dataset_utils.py | 51 +++++++++++++++++--------- keras/src/utils/image_dataset_utils.py | 28 ++++++++------ keras/src/utils/text_dataset_utils.py | 29 +++++++++------ 3 files changed, 69 insertions(+), 39 deletions(-) diff --git a/keras/src/utils/audio_dataset_utils.py b/keras/src/utils/audio_dataset_utils.py index ac1bab223b82..7e320188225d 100644 --- a/keras/src/utils/audio_dataset_utils.py +++ b/keras/src/utils/audio_dataset_utils.py @@ -409,27 +409,44 @@ def paths_and_labels_to_dataset( ): """Constructs a fixed-size dataset of audio and labels.""" path_ds = tf.data.Dataset.from_tensor_slices(file_paths) - if shuffle: - path_ds = path_ds.shuffle( - buffer_size=shuffle_buffer_size or 1024, seed=seed + if label_mode: + label_ds = dataset_utils.labels_to_dataset( + labels, label_mode, num_classes ) + ds = tf.data.Dataset.zip((path_ds, label_ds)) + else: + ds = path_ds - audio_ds = path_ds.map( - lambda x: read_and_decode_audio( - x, sampling_rate, output_sequence_length - ), - num_parallel_calls=tf.data.AUTOTUNE, - ) + if shuffle: + ds = ds.shuffle(buffer_size=shuffle_buffer_size or 1024, seed=seed) - if ragged: - audio_ds = audio_ds.map( - lambda x: tf.RaggedTensor.from_tensor(x), + if label_mode: + ds = ds.map( + lambda x, y: ( + read_and_decode_audio(x, sampling_rate, output_sequence_length), + y, + ), num_parallel_calls=tf.data.AUTOTUNE, ) - if label_mode: - label_ds = dataset_utils.labels_to_dataset( - labels, label_mode, num_classes + if ragged: + ds = ds.map( + lambda x, y: (tf.RaggedTensor.from_tensor(x), y), + num_parallel_calls=tf.data.AUTOTUNE, + ) + + else: + ds = ds.map( + lambda x: read_and_decode_audio( + x, sampling_rate, output_sequence_length + ), + num_parallel_calls=tf.data.AUTOTUNE, ) - audio_ds = tf.data.Dataset.zip((audio_ds, label_ds)) - return audio_ds + + if ragged: + ds = ds.map( + lambda x: tf.RaggedTensor.from_tensor(x), + num_parallel_calls=tf.data.AUTOTUNE, + ) + + return ds diff --git a/keras/src/utils/image_dataset_utils.py b/keras/src/utils/image_dataset_utils.py index 30317c96780b..380b4337973f 100755 --- a/keras/src/utils/image_dataset_utils.py +++ b/keras/src/utils/image_dataset_utils.py @@ -367,12 +367,17 @@ def paths_and_labels_to_dataset( seed=None, ): """Constructs a dataset of images and labels.""" - # TODO(fchollet): consider making num_parallel_calls settable path_ds = tf.data.Dataset.from_tensor_slices(image_paths) - if shuffle: - path_ds = path_ds.shuffle( - buffer_size=shuffle_buffer_size or 1024, seed=seed + if label_mode: + label_ds = dataset_utils.labels_to_dataset( + labels, label_mode, num_classes ) + ds = tf.data.Dataset.zip((path_ds, label_ds)) + else: + ds = path_ds + + if shuffle: + ds = ds.shuffle(buffer_size=shuffle_buffer_size or 1024, seed=seed) args = ( image_size, @@ -382,15 +387,16 @@ def paths_and_labels_to_dataset( crop_to_aspect_ratio, pad_to_aspect_ratio, ) - img_ds = path_ds.map( - lambda x: load_image(x, *args), num_parallel_calls=tf.data.AUTOTUNE - ) if label_mode: - label_ds = dataset_utils.labels_to_dataset( - labels, label_mode, num_classes + ds = ds.map( + lambda x, y: (load_image(x, *args), y), + num_parallel_calls=tf.data.AUTOTUNE, + ) + else: + ds = ds.map( + lambda x: load_image(x, *args), num_parallel_calls=tf.data.AUTOTUNE ) - img_ds = tf.data.Dataset.zip((img_ds, label_ds)) - return img_ds + return ds def load_image( diff --git a/keras/src/utils/text_dataset_utils.py b/keras/src/utils/text_dataset_utils.py index d8e5ece971c5..ab1272bf190d 100644 --- a/keras/src/utils/text_dataset_utils.py +++ b/keras/src/utils/text_dataset_utils.py @@ -258,21 +258,28 @@ def paths_and_labels_to_dataset( ): """Constructs a dataset of text strings and labels.""" path_ds = tf.data.Dataset.from_tensor_slices(file_paths) - if shuffle: - path_ds = path_ds.shuffle( - buffer_size=shuffle_buffer_size or 1024, seed=seed - ) - - string_ds = path_ds.map( - lambda x: path_to_string_content(x, max_length), - num_parallel_calls=tf.data.AUTOTUNE, - ) if label_mode: label_ds = dataset_utils.labels_to_dataset( labels, label_mode, num_classes ) - string_ds = tf.data.Dataset.zip((string_ds, label_ds)) - return string_ds + ds = tf.data.Dataset.zip((path_ds, label_ds)) + else: + ds = path_ds + + if shuffle: + ds = ds.shuffle(buffer_size=shuffle_buffer_size or 1024, seed=seed) + + if label_mode: + ds = ds.map( + lambda x, y: (path_to_string_content(x, max_length), y), + num_parallel_calls=tf.data.AUTOTUNE, + ) + else: + ds = ds.map( + lambda x: path_to_string_content(x, max_length), + num_parallel_calls=tf.data.AUTOTUNE, + ) + return ds def path_to_string_content(path, max_length): From 61e37bd18bc37163702eeb0d39a1cac1115bdfc6 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Fri, 26 Apr 2024 15:46:11 -0700 Subject: [PATCH 080/101] Update version string. --- keras/src/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/version.py b/keras/src/version.py index 3f3890b17af5..11e49a3b9267 100644 --- a/keras/src/version.py +++ b/keras/src/version.py @@ -1,7 +1,7 @@ from keras.src.api_export import keras_export # Unique source of truth for the version number. -__version__ = "3.3.2" +__version__ = "3.3.3" @keras_export("keras.version") From 533bd4342a3f35e162797ba6d62cfdc5477ef109 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Fri, 26 Apr 2024 16:10:14 -0700 Subject: [PATCH 081/101] Minor fix --- keras/src/trainers/compile_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/trainers/compile_utils.py b/keras/src/trainers/compile_utils.py index 5678f8cba151..afb31ed716b6 100644 --- a/keras/src/trainers/compile_utils.py +++ b/keras/src/trainers/compile_utils.py @@ -164,7 +164,7 @@ def variables(self): if not self.built: return [] vars = [] - for m in self._flat_metrics + self._flat_weighted_metrics: + for m in self.metrics: if m is not None: vars.extend(m.variables) return vars From f6c4ac55692c132cd16211f4877fac6dbeead749 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Fri, 26 Apr 2024 16:19:54 -0700 Subject: [PATCH 082/101] Restore version string resolution in pip_build. --- pip_build.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pip_build.py b/pip_build.py index 52cc08747436..6570ab8eb228 100644 --- a/pip_build.py +++ b/pip_build.py @@ -86,9 +86,8 @@ def build(root_path, is_nightly=False, rc_index=None): move_tf_keras_directory() print(os.getcwd()) - # from keras.src.version import __version__ # noqa: E402 + from keras.src.version import __version__ # noqa: E402 - __version__ = "3.3.2" export_version_string(__version__, is_nightly, rc_index) return build_and_save_output(root_path, __version__) finally: From fe03ca5974c970627b9289ec95689c44c7176540 Mon Sep 17 00:00:00 2001 From: hertschuh <1091026+hertschuh@users.noreply.github.com> Date: Fri, 26 Apr 2024 17:53:05 -0700 Subject: [PATCH 083/101] Speed up `DataAdapter` tests by testing only the current backend. (#19625) There is no use case for using an iterator for a different backend than the current backend. Also: - limit the number of tests using multiprocessing, the threading tests give us good coverage. - fixed the `test_exception_reported` test, which was not actually exercising the multiprocessing / multithreading cases. - removed unused `init_pool` method. --- .../data_adapters/array_data_adapter_test.py | 11 +- .../generator_data_adapter_test.py | 40 +++---- .../data_adapters/py_dataset_adapter.py | 5 - .../data_adapters/py_dataset_adapter_test.py | 110 +++++++++--------- .../data_adapters/tf_dataset_adapter_test.py | 29 +++-- .../torch_data_loader_adapter_test.py | 45 +++---- 6 files changed, 111 insertions(+), 129 deletions(-) diff --git a/keras/src/trainers/data_adapters/array_data_adapter_test.py b/keras/src/trainers/data_adapters/array_data_adapter_test.py index 46eb4fcc194e..80b4462e407b 100644 --- a/keras/src/trainers/data_adapters/array_data_adapter_test.py +++ b/keras/src/trainers/data_adapters/array_data_adapter_test.py @@ -52,11 +52,10 @@ def make_array(self, array_type, shape, dtype): "scipy_sparse", ], array_dtype=["float32", "float64"], - iterator_type=["np", "tf", "jax", "torch"], shuffle=[False, "batch", True], ) ) - def test_basic_flow(self, array_type, array_dtype, iterator_type, shuffle): + def test_basic_flow(self, array_type, array_dtype, shuffle): x = self.make_array(array_type, (34, 4), array_dtype) y = self.make_array(array_type, (34, 2), "int32") xdim1 = 1 if array_type == "pandas_series" else 4 @@ -75,10 +74,10 @@ def test_basic_flow(self, array_type, array_dtype, iterator_type, shuffle): self.assertEqual(adapter.has_partial_batch, True) self.assertEqual(adapter.partial_batch_size, 2) - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() expected_class = np.ndarray - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() if array_type == "tf_ragged": expected_class = tf.RaggedTensor @@ -88,13 +87,13 @@ def test_basic_flow(self, array_type, array_dtype, iterator_type, shuffle): expected_class = tf.SparseTensor else: expected_class = tf.Tensor - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() if array_type in ("tf_sparse", "jax_sparse", "scipy_sparse"): expected_class = jax_sparse.JAXSparse else: expected_class = jax.Array - elif iterator_type == "torch": + elif backend.backend() == "torch": it = adapter.get_torch_dataloader() expected_class = torch.Tensor diff --git a/keras/src/trainers/data_adapters/generator_data_adapter_test.py b/keras/src/trainers/data_adapters/generator_data_adapter_test.py index 4d6ebdc5597c..76839308c7f6 100644 --- a/keras/src/trainers/data_adapters/generator_data_adapter_test.py +++ b/keras/src/trainers/data_adapters/generator_data_adapter_test.py @@ -3,12 +3,14 @@ import jax import jax.experimental.sparse as jax_sparse import numpy as np +import pytest import scipy import tensorflow as tf import torch from absl.testing import parameterized from jax import numpy as jnp +from keras.src import backend from keras.src import testing from keras.src.testing.test_utils import named_product from keras.src.trainers.data_adapters import generator_data_adapter @@ -37,10 +39,9 @@ class GeneratorDataAdapterTest(testing.TestCase, parameterized.TestCase): {"testcase_name": "no_weight", "use_sample_weight": False}, ], generator_type=["np", "tf", "jax", "torch"], - iterator_type=["np", "tf", "jax", "torch"], ) ) - def test_basic_flow(self, use_sample_weight, generator_type, iterator_type): + def test_basic_flow(self, use_sample_weight, generator_type): x = np.random.random((34, 4)).astype("float32") y = np.array([[i, i] for i in range(34)], dtype="float32") sw = np.random.random((34,)).astype("float32") @@ -64,16 +65,16 @@ def test_basic_flow(self, use_sample_weight, generator_type, iterator_type): ) adapter = generator_data_adapter.GeneratorDataAdapter(make_generator()) - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() expected_class = np.ndarray - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.Tensor - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = jax.Array - elif iterator_type == "torch": + elif backend.backend() == "torch": it = adapter.get_torch_dataloader() expected_class = torch.Tensor @@ -101,10 +102,7 @@ def test_basic_flow(self, use_sample_weight, generator_type, iterator_type): sample_order.append(by[i, 0]) self.assertAllClose(sample_order, list(range(34))) - @parameterized.named_parameters( - named_product(iterator_type=["np", "tf", "jax", "torch"]) - ) - def test_with_different_shapes(self, iterator_type): + def test_with_different_shapes(self): def generator(): yield np.ones([16, 4], "float32"), np.ones([16, 2], "float32") yield np.ones([16, 5], "float32"), np.ones([16, 2], "float32") @@ -112,13 +110,13 @@ def generator(): adapter = generator_data_adapter.GeneratorDataAdapter(generator()) - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() - elif iterator_type == "torch": + elif backend.backend() == "torch": it = adapter.get_torch_dataloader() for i, batch in enumerate(it): @@ -137,11 +135,13 @@ def generator(): self.assertEqual(by.shape, (2, 2)) @parameterized.named_parameters( - named_product( - generator_type=["tf", "jax", "scipy"], iterator_type=["tf", "jax"] - ) + named_product(generator_type=["tf", "jax", "scipy"]) + ) + @pytest.mark.skipif( + not backend.SUPPORTS_SPARSE_TENSORS, + reason="Backend does not support sparse tensors", ) - def test_scipy_sparse_tensors(self, generator_type, iterator_type): + def test_scipy_sparse_tensors(self, generator_type): if generator_type == "tf": x = tf.SparseTensor([[0, 0], [1, 2]], [1.0, 2.0], (2, 4)) y = tf.SparseTensor([[0, 0], [1, 1]], [3.0, 4.0], (2, 2)) @@ -158,10 +158,10 @@ def generate(): adapter = generator_data_adapter.GeneratorDataAdapter(generate()) - if iterator_type == "tf": + if backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.SparseTensor - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = jax_sparse.BCOO diff --git a/keras/src/trainers/data_adapters/py_dataset_adapter.py b/keras/src/trainers/data_adapters/py_dataset_adapter.py index daa56a1313f4..19b485705778 100644 --- a/keras/src/trainers/data_adapters/py_dataset_adapter.py +++ b/keras/src/trainers/data_adapters/py_dataset_adapter.py @@ -342,11 +342,6 @@ def get_worker_id_queue(): return _WORKER_ID_QUEUE -def init_pool(seqs): - global _SHARED_SEQUENCES - _SHARED_SEQUENCES = seqs - - def get_index(uid, i): """Get the value from the PyDataset `uid` at index `i`. diff --git a/keras/src/trainers/data_adapters/py_dataset_adapter_test.py b/keras/src/trainers/data_adapters/py_dataset_adapter_test.py index 7c41971db561..ac661c2047a4 100644 --- a/keras/src/trainers/data_adapters/py_dataset_adapter_test.py +++ b/keras/src/trainers/data_adapters/py_dataset_adapter_test.py @@ -3,10 +3,12 @@ import jax import numpy as np +import pytest import tensorflow as tf import torch from absl.testing import parameterized +from keras.src import backend from keras.src import testing from keras.src.testing.test_utils import named_product from keras.src.trainers.data_adapters import py_dataset_adapter @@ -77,6 +79,21 @@ def __getitem__(self, idx): return batch +class ExceptionPyDataset(py_dataset_adapter.PyDataset): + + @property + def num_batches(self): + return 4 + + def __getitem__(self, index): + if index < 2: + return ( + np.random.random((64, 4)).astype("float32"), + np.random.random((64, 2)).astype("float32"), + ) + raise ValueError("Expected exception") + + class PyDatasetAdapterTest(testing.TestCase, parameterized.TestCase): @parameterized.named_parameters( named_product( @@ -113,7 +130,6 @@ class PyDatasetAdapterTest(testing.TestCase, parameterized.TestCase): }, ], infinite=[True, False], - iterator_type=["np", "tf", "jax", "torch"], shuffle=[True, False], ) ) @@ -122,11 +138,13 @@ def test_basic_flow( shuffle, dataset_type, infinite, - iterator_type, workers=0, use_multiprocessing=False, max_queue_size=0, ): + if use_multiprocessing and (infinite or shuffle): + pytest.skip("Starting processes is slow, only test one variant") + set_random_seed(1337) x = np.random.random((64, 4)).astype("float32") y = np.array([[i, i] for i in range(64)], dtype="float32") @@ -149,16 +167,16 @@ def test_basic_flow( py_dataset, shuffle=shuffle ) - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() expected_class = np.ndarray - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.Tensor - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = jax.Array - elif iterator_type == "torch": + elif backend.backend() == "torch": it = adapter.get_torch_dataloader() expected_class = torch.Tensor @@ -257,10 +275,7 @@ def test_dict_inputs(self): self.assertEqual(tuple(bx.shape), (4, 4)) self.assertEqual(tuple(by.shape), (4, 2)) - @parameterized.named_parameters( - named_product(iterator_type=["np", "tf", "jax", "torch"]) - ) - def test_with_different_shapes(self, iterator_type): + def test_with_different_shapes(self): class TestPyDataset(py_dataset_adapter.PyDataset): @property @@ -285,13 +300,13 @@ def __getitem__(self, idx): TestPyDataset(), shuffle=False ) - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() - elif iterator_type == "torch": + elif backend.backend() == "torch": it = adapter.get_torch_dataloader() for i, batch in enumerate(it): @@ -310,67 +325,52 @@ def __getitem__(self, idx): self.assertEqual(by.shape, (2, 2)) @parameterized.named_parameters( - named_product( - [ - { - "testcase_name": "multiprocessing", - "workers": 2, - "use_multiprocessing": True, - "max_queue_size": 10, - }, - { - "testcase_name": "multithreading", - "workers": 2, - "max_queue_size": 10, - }, - { - "testcase_name": "single", - }, - ], - iterator_type=["np", "tf", "jax", "torch"], - ) + [ + { + "testcase_name": "multiprocessing", + "workers": 2, + "use_multiprocessing": True, + "max_queue_size": 10, + }, + { + "testcase_name": "multithreading", + "workers": 2, + "max_queue_size": 10, + }, + { + "testcase_name": "single", + }, + ] ) def test_exception_reported( self, - iterator_type, workers=0, use_multiprocessing=False, max_queue_size=0, ): - class ExceptionPyDataset(py_dataset_adapter.PyDataset): - - @property - def num_batches(self): - return 4 - - def __getitem__(self, index): - if index < 2: - return ( - np.random.random((64, 4)).astype("float32"), - np.random.random((64, 2)).astype("float32"), - ) - raise ValueError("Excepted exception") - - adapter = py_dataset_adapter.PyDatasetAdapter( - ExceptionPyDataset(), shuffle=False + dataset = ExceptionPyDataset( + workers=workers, + use_multiprocessing=use_multiprocessing, + max_queue_size=max_queue_size, ) + adapter = py_dataset_adapter.PyDatasetAdapter(dataset, shuffle=False) expected_exception_class = ValueError - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() # tf.data wraps the exception expected_exception_class = tf.errors.InvalidArgumentError - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() - elif iterator_type == "torch": + elif backend.backend() == "torch": it = adapter.get_torch_dataloader() it = iter(it) next(it) next(it) with self.assertRaisesRegex( - expected_exception_class, "Excepted exception" + expected_exception_class, "Expected exception" ): next(it) diff --git a/keras/src/trainers/data_adapters/tf_dataset_adapter_test.py b/keras/src/trainers/data_adapters/tf_dataset_adapter_test.py index ad48c2d3c241..2535e505d619 100644 --- a/keras/src/trainers/data_adapters/tf_dataset_adapter_test.py +++ b/keras/src/trainers/data_adapters/tf_dataset_adapter_test.py @@ -2,20 +2,18 @@ import jax import numpy as np +import pytest import tensorflow as tf import torch from absl.testing import parameterized +from keras.src import backend from keras.src import testing -from keras.src.testing.test_utils import named_product from keras.src.trainers.data_adapters import tf_dataset_adapter class TestTFDatasetAdapter(testing.TestCase, parameterized.TestCase): - @parameterized.named_parameters( - named_product(iterator_type=["np", "tf", "jax", "torch"]) - ) - def test_basic_flow(self, iterator_type): + def test_basic_flow(self): x = tf.random.normal((34, 4)) y = tf.random.normal((34, 2)) base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16) @@ -26,16 +24,16 @@ def test_basic_flow(self, iterator_type): self.assertEqual(adapter.has_partial_batch, None) self.assertEqual(adapter.partial_batch_size, None) - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() expected_class = np.ndarray - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.Tensor - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = jax.Array - elif iterator_type == "torch": + elif backend.backend() == "torch": it = adapter.get_torch_dataloader() expected_class = torch.Tensor @@ -258,10 +256,11 @@ def test_distribute_dataset(self): self.assertEqual(tuple(bx.shape), (2, 4)) self.assertEqual(tuple(by.shape), (2, 2)) - @parameterized.named_parameters( - named_product(iterator_type=["np", "tf", "jax"]) + @pytest.mark.skipif( + not backend.SUPPORTS_SPARSE_TENSORS and backend.backend() != "numpy", + reason="Backend does not support sparse tensors", ) - def test_tf_sparse_tensors(self, iterator_type): + def test_tf_sparse_tensors(self): x = tf.SparseTensor( indices=[[0, 0], [1, 2]], values=[1.0, 2.0], dense_shape=(2, 4) ) @@ -271,13 +270,13 @@ def test_tf_sparse_tensors(self, iterator_type): base_ds = tf.data.Dataset.from_tensors((x, y)) adapter = tf_dataset_adapter.TFDatasetAdapter(base_ds) - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() expected_class = np.ndarray - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.SparseTensor - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = jax.experimental.sparse.BCOO diff --git a/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py b/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py index e86f570d6925..4d02f5592f67 100644 --- a/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py +++ b/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py @@ -6,6 +6,7 @@ import torch from absl.testing import parameterized +from keras.src import backend from keras.src import testing from keras.src.testing.test_utils import named_product from keras.src.trainers.data_adapters.torch_data_loader_adapter import ( @@ -14,10 +15,7 @@ class TestTorchDataLoaderAdapter(testing.TestCase, parameterized.TestCase): - @parameterized.named_parameters( - named_product(iterator_type=["np", "tf", "jax", "torch"]) - ) - def test_basic_dataloader(self, iterator_type): + def test_basic_dataloader(self): x = torch.normal(2, 3, size=(34, 4)) y = torch.normal(1, 3, size=(34, 2)) ds = torch.utils.data.TensorDataset(x, y) @@ -29,16 +27,16 @@ def test_basic_dataloader(self, iterator_type): self.assertEqual(adapter.has_partial_batch, True) self.assertEqual(adapter.partial_batch_size, 2) - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() expected_class = np.ndarray - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.Tensor - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = jax.Array - elif iterator_type == "torch": + elif backend.backend() == "torch": it = adapter.get_torch_dataloader() expected_class = torch.Tensor @@ -57,15 +55,9 @@ def test_basic_dataloader(self, iterator_type): self.assertEqual(by.shape, (2, 2)) @parameterized.named_parameters( - named_product( - batch_size=[None, 3], - implements_len=[True, False], - iterator_type=["np", "tf", "jax", "torch"], - ) + named_product(batch_size=[None, 3], implements_len=[True, False]) ) - def test_dataloader_iterable_dataset( - self, batch_size, implements_len, iterator_type - ): + def test_dataloader_iterable_dataset(self, batch_size, implements_len): class TestIterableDataset(torch.utils.data.IterableDataset): def __init__(self): @@ -104,16 +96,16 @@ def __len__(self): self.assertIsNone(adapter.has_partial_batch) self.assertIsNone(adapter.partial_batch_size) - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() expected_class = np.ndarray - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.Tensor - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = jax.Array - elif iterator_type == "torch": + elif backend.backend() == "torch": it = adapter.get_torch_dataloader() expected_class = torch.Tensor @@ -142,10 +134,7 @@ def __len__(self): else: self.assertEqual(batch_count, 10) - @parameterized.named_parameters( - named_product(iterator_type=["np", "tf", "jax", "torch"]) - ) - def test_with_different_shapes(self, iterator_type): + def test_with_different_shapes(self): x = ( [np.ones([4], "float32")] * 16 + [np.ones([5], "float32")] * 16 @@ -161,13 +150,13 @@ def test_with_different_shapes(self, iterator_type): self.assertEqual(adapter.has_partial_batch, True) self.assertEqual(adapter.partial_batch_size, 2) - if iterator_type == "np": + if backend.backend() == "numpy": it = adapter.get_numpy_iterator() - elif iterator_type == "tf": + elif backend.backend() == "tensorflow": it = adapter.get_tf_dataset() - elif iterator_type == "jax": + elif backend.backend() == "jax": it = adapter.get_jax_iterator() - elif iterator_type == "torch": + elif backend.backend() == "torch": it = adapter.get_torch_dataloader() for i, batch in enumerate(it): From 688daa5a1c0c750442997bbadfe0962b657f7b5a Mon Sep 17 00:00:00 2001 From: Luca Pizzini Date: Sat, 27 Apr 2024 18:44:04 +0200 Subject: [PATCH 084/101] feat(ops): support np.argpartition (#19588) * feat(ops): support np.argpartition * updated documentation, type-casting, and tf implementation * fixed tf implementation * added torch cast to int32 * updated torch type and API generated files * added torch output type cast --- keras/api/_tf_keras/keras/ops/__init__.py | 1 + .../api/_tf_keras/keras/ops/numpy/__init__.py | 1 + keras/api/ops/__init__.py | 1 + keras/api/ops/numpy/__init__.py | 1 + keras/src/backend/jax/numpy.py | 4 ++ keras/src/backend/numpy/numpy.py | 4 ++ keras/src/backend/tensorflow/numpy.py | 21 ++++++++ keras/src/backend/torch/numpy.py | 20 ++++++++ keras/src/ops/numpy.py | 42 ++++++++++++++++ keras/src/ops/numpy_test.py | 48 +++++++++++++++++++ 10 files changed, 143 insertions(+) diff --git a/keras/api/_tf_keras/keras/ops/__init__.py b/keras/api/_tf_keras/keras/ops/__init__.py index 386730deb33f..be8f00acb559 100644 --- a/keras/api/_tf_keras/keras/ops/__init__.py +++ b/keras/api/_tf_keras/keras/ops/__init__.py @@ -102,6 +102,7 @@ from keras.src.ops.numpy import arctanh from keras.src.ops.numpy import argmax from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argpartition from keras.src.ops.numpy import argsort from keras.src.ops.numpy import array from keras.src.ops.numpy import average diff --git a/keras/api/_tf_keras/keras/ops/numpy/__init__.py b/keras/api/_tf_keras/keras/ops/numpy/__init__.py index d0ca72fa9d59..05c8f93fd736 100644 --- a/keras/api/_tf_keras/keras/ops/numpy/__init__.py +++ b/keras/api/_tf_keras/keras/ops/numpy/__init__.py @@ -22,6 +22,7 @@ from keras.src.ops.numpy import arctanh from keras.src.ops.numpy import argmax from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argpartition from keras.src.ops.numpy import argsort from keras.src.ops.numpy import array from keras.src.ops.numpy import average diff --git a/keras/api/ops/__init__.py b/keras/api/ops/__init__.py index 386730deb33f..be8f00acb559 100644 --- a/keras/api/ops/__init__.py +++ b/keras/api/ops/__init__.py @@ -102,6 +102,7 @@ from keras.src.ops.numpy import arctanh from keras.src.ops.numpy import argmax from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argpartition from keras.src.ops.numpy import argsort from keras.src.ops.numpy import array from keras.src.ops.numpy import average diff --git a/keras/api/ops/numpy/__init__.py b/keras/api/ops/numpy/__init__.py index d0ca72fa9d59..05c8f93fd736 100644 --- a/keras/api/ops/numpy/__init__.py +++ b/keras/api/ops/numpy/__init__.py @@ -22,6 +22,7 @@ from keras.src.ops.numpy import arctanh from keras.src.ops.numpy import argmax from keras.src.ops.numpy import argmin +from keras.src.ops.numpy import argpartition from keras.src.ops.numpy import argsort from keras.src.ops.numpy import array from keras.src.ops.numpy import average diff --git a/keras/src/backend/jax/numpy.py b/keras/src/backend/jax/numpy.py index b202f35e1661..60dec64420cd 100644 --- a/keras/src/backend/jax/numpy.py +++ b/keras/src/backend/jax/numpy.py @@ -1167,3 +1167,7 @@ def select(condlist, choicelist, default=0): def slogdet(x): x = convert_to_tensor(x) return tuple(jnp.linalg.slogdet(x)) + + +def argpartition(x, kth, axis=-1): + return jnp.argpartition(x, kth, axis) diff --git a/keras/src/backend/numpy/numpy.py b/keras/src/backend/numpy/numpy.py index 939cf6caece3..b820bc91d504 100644 --- a/keras/src/backend/numpy/numpy.py +++ b/keras/src/backend/numpy/numpy.py @@ -1102,3 +1102,7 @@ def select(condlist, choicelist, default=0): def slogdet(x): return tuple(np.linalg.slogdet(x)) + + +def argpartition(x, kth, axis=-1): + return np.argpartition(x, kth, axis).astype("int32") diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index 60462d3c1997..a464af2fcf8d 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -2435,3 +2435,24 @@ def select(condlist, choicelist, default=0): def slogdet(x): x = convert_to_tensor(x) return tuple(tf.linalg.slogdet(x)) + + +def argpartition(x, kth, axis=-1): + x = convert_to_tensor(x, tf.int32) + + x = swapaxes(x, axis, -1) + bottom_ind = tf.math.top_k(-x, kth + 1).indices + + n = tf.shape(x)[-1] + + mask = tf.reduce_sum(tf.one_hot(bottom_ind, n, dtype=tf.int32), axis=0) + + indices = tf.where(mask) + updates = tf.squeeze(tf.zeros(tf.shape(indices)[0], dtype=tf.int32)) + + final_mask = tf.tensor_scatter_nd_update(x, indices, updates) + + top_ind = tf.math.top_k(final_mask, tf.shape(x)[-1] - kth - 1).indices + + out = tf.concat([bottom_ind, top_ind], axis=x.ndim - 1) + return swapaxes(out, -1, axis) diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 53b22a8db54c..34d08525dc29 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -1613,3 +1613,23 @@ def select(condlist, choicelist, default=0): def slogdet(x): x = convert_to_tensor(x) return tuple(torch.linalg.slogdet(x)) + + +def argpartition(x, kth, axis=-1): + x = convert_to_tensor(x, "int32") + + x = torch.transpose(x, axis, -1) + bottom_ind = torch.topk(-x, kth + 1)[1] + + def set_to_zero(a, i): + a[i] = 0 + return a + + for _ in range(x.dim() - 1): + set_to_zero = torch.vmap(set_to_zero) + proxy = set_to_zero(torch.ones(x.shape, dtype=torch.int32), bottom_ind) + + top_ind = torch.topk(proxy, x.shape[-1] - kth - 1)[1] + + out = torch.cat([bottom_ind, top_ind], dim=x.dim() - 1) + return cast(torch.transpose(out, -1, axis), "int32") diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index 62428df7cb9b..4588c446770b 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -6191,3 +6191,45 @@ def slogdet(x): if any_symbolic_tensors((x,)): return Slogdet().symbolic_call(x) return backend.numpy.slogdet(x) + + +class Argpartition(Operation): + def __init__(self, kth, axis=-1): + super().__init__() + if not isinstance(kth, int): + raise ValueError("kth must be an integer. Received:" f"kth = {kth}") + self.kth = kth + self.axis = axis + + def call(self, x): + return backend.numpy.argpartition(x, kth=self.kth, axis=self.axis) + + def compute_output_spec(self, x): + return KerasTensor(x.shape, dtype="int32") + + +@keras_export(["keras.ops.argpartition", "keras.ops.numpy.argpartition"]) +def argpartition(x, kth, axis=-1): + """Performs an indirect partition along the given axis. + + It returns an array + of indices of the same shape as `x` that index data along the given axis + in partitioned order. + + Args: + a: Array to sort. + kth: Element index to partition by. + The k-th element will be in its final sorted position and all + smaller elements will be moved before it and all larger elements + behind it. The order of all elements in the partitions is undefined. + If provided with a sequence of k-th it will partition all of them + into their sorted position at once. + axis: Axis along which to sort. The default is -1 (the last axis). + If `None`, the flattened array is used. + + Returns: + Array of indices that partition `x` along the specified `axis`. + """ + if any_symbolic_tensors((x,)): + return Argpartition(kth, axis).symbolic_call(x) + return backend.numpy.argpartition(x, kth, axis) diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index 5df2980fe475..768f5ab392ef 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -1475,6 +1475,14 @@ def test_vstack(self): y = KerasTensor((None, None)) self.assertEqual(knp.vstack([x, y]).shape, (None, 3)) + def test_argpartition(self): + x = KerasTensor((None, 3)) + self.assertEqual(knp.argpartition(x, 3).shape, (None, 3)) + self.assertEqual(knp.argpartition(x, 1, axis=1).shape, (None, 3)) + + with self.assertRaises(ValueError): + knp.argpartition(x, (1, 3)) + class NumpyOneInputOpsStaticShapeTest(testing.TestCase): def test_mean(self): @@ -1981,6 +1989,14 @@ def test_vstack(self): y = KerasTensor((2, 3)) self.assertEqual(knp.vstack([x, y]).shape, (4, 3)) + def test_argpartition(self): + x = KerasTensor((2, 3)) + self.assertEqual(knp.argpartition(x, 3).shape, (2, 3)) + self.assertEqual(knp.argpartition(x, 1, axis=1).shape, (2, 3)) + + with self.assertRaises(ValueError): + knp.argpartition(x, (1, 3)) + class NumpyTwoInputOpsCorretnessTest(testing.TestCase, parameterized.TestCase): def test_add(self): @@ -4303,6 +4319,19 @@ def myfunc(a, b): out, np.vectorize(np.diag, signature="(d,d)->(d)")(np.eye(4)) ) + def test_argpartition(self): + x = np.array([3, 4, 2, 1]) + self.assertAllClose(knp.argpartition(x, 2), np.argpartition(x, 2)) + self.assertAllClose(knp.Argpartition(2)(x), np.argpartition(x, 2)) + + x = np.array([[3, 4, 2], [1, 3, 1]]) + self.assertAllClose(knp.argpartition(x, 1), np.argpartition(x, 1)) + self.assertAllClose(knp.Argpartition(1)(x), np.argpartition(x, 1)) + + x = np.array([[[3, 4], [2, 3]], [[1, 2], [0, 1]]]) + self.assertAllClose(knp.argpartition(x, 1), np.argpartition(x, 1)) + self.assertAllClose(knp.Argpartition(1)(x), np.argpartition(x, 1)) + class NumpyArrayCreateOpsCorrectnessTest(testing.TestCase): def test_ones(self): @@ -5402,6 +5431,25 @@ def test_argmin(self, dtype): expected_dtype, ) + @parameterized.named_parameters(named_product(dtype=ALL_DTYPES)) + def test_argpartition(self, dtype): + import jax.numpy as jnp + + if dtype == "bool": + self.skipTest("argpartition doesn't support bool dtype") + + x = knp.array([1, 2, 3], dtype=dtype) + x_jax = jnp.array([1, 2, 3], dtype=dtype) + expected_dtype = standardize_dtype(jnp.argpartition(x_jax, 1).dtype) + + self.assertEqual( + standardize_dtype(knp.argpartition(x, 1).dtype), expected_dtype + ) + self.assertEqual( + standardize_dtype(knp.Argpartition(1).symbolic_call(x).dtype), + expected_dtype, + ) + @parameterized.named_parameters(named_product(dtype=ALL_DTYPES)) def test_argsort(self, dtype): import jax.numpy as jnp From 32fe905f89326a753ba51cd7d8f6df12da2bfe2a Mon Sep 17 00:00:00 2001 From: Luca Pizzini Date: Sat, 27 Apr 2024 18:48:04 +0200 Subject: [PATCH 085/101] test(trainers): add test_errors implementation for ArrayDataAdapter class (#19626) --- .../data_adapters/array_data_adapter_test.py | 57 ++++++++++++++++++- 1 file changed, 55 insertions(+), 2 deletions(-) diff --git a/keras/src/trainers/data_adapters/array_data_adapter_test.py b/keras/src/trainers/data_adapters/array_data_adapter_test.py index 80b4462e407b..a61a904240e5 100644 --- a/keras/src/trainers/data_adapters/array_data_adapter_test.py +++ b/keras/src/trainers/data_adapters/array_data_adapter_test.py @@ -244,5 +244,58 @@ def test_class_weights(self, target_encoding): self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4]) def test_errors(self): - # TODO - pass + x = np.random.random((34, 1)) + y = np.random.random((34, 3)) + sw = np.random.random((34,)) + cw = { + 0: 0.1, + 1: 0.2, + 2: 0.3, + 3: 0.4, + } + + with self.assertRaisesRegex( + ValueError, "Expected all elements of `x` to be array-like" + ): + array_data_adapter.ArrayDataAdapter(x="Invalid") + with self.assertRaisesRegex( + ValueError, "Expected all elements of `x` to be array-like" + ): + array_data_adapter.ArrayDataAdapter(x=x, y="Invalid") + with self.assertRaisesRegex( + ValueError, "Expected all elements of `x` to be array-like" + ): + array_data_adapter.ArrayDataAdapter( + x=x, y=y, sample_weight="Invalid" + ) + + with self.assertRaisesRegex( + ValueError, "You cannot `class_weight` and `sample_weight`" + ): + array_data_adapter.ArrayDataAdapter( + x=x, y=y, sample_weight=sw, class_weight=cw + ) + + nested_y = ({"x": x, "y": y},) + with self.assertRaisesRegex( + ValueError, "You should provide one `sample_weight` array per" + ): + array_data_adapter.ArrayDataAdapter( + x=x, y=nested_y, sample_weight=[] + ) + + tensor_sw = self.make_array("tf", (34, 2), "int32") + with self.assertRaisesRegex( + ValueError, "For a model with multiple outputs, when providing" + ): + array_data_adapter.ArrayDataAdapter( + x=x, y=nested_y, sample_weight=tensor_sw + ) + + with self.assertRaisesRegex( + ValueError, + "`class_weight` is only supported for Models with a single", + ): + array_data_adapter.ArrayDataAdapter( + x=x, y=nested_y, class_weight=cw + ) From 0f3bd5222bb46ac6da3ed96187e7ec908aa851fa Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Sat, 27 Apr 2024 11:45:38 -0700 Subject: [PATCH 086/101] Fix torch GPU CI --- keras/src/backend/torch/numpy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 34d08525dc29..6e361d9e3379 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -1627,7 +1627,7 @@ def set_to_zero(a, i): for _ in range(x.dim() - 1): set_to_zero = torch.vmap(set_to_zero) - proxy = set_to_zero(torch.ones(x.shape, dtype=torch.int32), bottom_ind) + proxy = set_to_zero(ones(x.shape, dtype=torch.int32), bottom_ind) top_ind = torch.topk(proxy, x.shape[-1] - kth - 1)[1] From b41f687d7b0760ce42c158566e4f6793962db5ba Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Sat, 27 Apr 2024 17:41:25 -0700 Subject: [PATCH 087/101] Fix argmax/argmin keepdims with defined axis in TF --- keras/src/backend/tensorflow/numpy.py | 3 ++- keras/src/ops/numpy_test.py | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index a464af2fcf8d..5a49c22949a5 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -19,6 +19,7 @@ from keras.src.backend.tensorflow import sparse from keras.src.backend.tensorflow.core import cast from keras.src.backend.tensorflow.core import convert_to_tensor +from keras.src.backend.tensorflow.core import shape as shape_op @sparse.elementwise_binary_union(tf.sparse.add) @@ -756,7 +757,7 @@ def _keepdims(x, y, axis): if axis is None: shape = [1 for _ in range(len(x.shape))] else: - shape = [tf.shape[i] for i in range(len(x.shape))] + shape = list(shape_op(x)) for axis in tree.flatten(axis): shape[axis] = 1 y = tf.reshape(y, shape) diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index 768f5ab392ef..3960c6f02263 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -3105,9 +3105,13 @@ def test_arctanh(self): self.assertAllClose(knp.Arctanh()(x), np.arctanh(x)) def test_argmax(self): - x = np.array([[1, 2, 3], [3, 2, 1]]) + x = np.array([[1, 2, 3], [3, 2, 1], [4, 5, 6]]) self.assertAllClose(knp.argmax(x), np.argmax(x)) self.assertAllClose(knp.argmax(x, axis=1), np.argmax(x, axis=1)) + self.assertAllClose( + knp.argmax(x, axis=1, keepdims=True), + np.argmax(x, axis=1, keepdims=True), + ) self.assertAllClose( knp.argmax(x, keepdims=True), np.argmax(x, keepdims=True) ) From 54e15eb3eecea8a743e1138a93bbc87703da96e2 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Sat, 27 Apr 2024 21:42:10 -0700 Subject: [PATCH 088/101] Misc fixes in TF backend ops. --- keras/src/backend/tensorflow/numpy.py | 76 +++++++++++++++------------ 1 file changed, 42 insertions(+), 34 deletions(-) diff --git a/keras/src/backend/tensorflow/numpy.py b/keras/src/backend/tensorflow/numpy.py index 5a49c22949a5..51ee5833cad2 100644 --- a/keras/src/backend/tensorflow/numpy.py +++ b/keras/src/backend/tensorflow/numpy.py @@ -828,15 +828,10 @@ def _rank_not_equal_case(): if axis is None: avg = _rank_equal_case() else: - # We condition on rank rather than shape equality, because if we do - # the latter, when the shapes are partially unknown but the ranks - # are known and different, np_utils.cond will run shape checking on - # the true branch, which will raise a shape-checking error. - avg = tf.cond( - tf.equal(tf.rank(x), tf.rank(weights)), - _rank_equal_case, - _rank_not_equal_case, - ) + if len(x.shape) == len(weights.shape): + avg = _rank_equal_case() + else: + avg = _rank_not_equal_case() return avg @@ -929,6 +924,10 @@ def count_nonzero(x, axis=None): def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) + dtype = dtypes.result_type(x1.dtype, x2.dtype) + x1 = tf.cast(x1, dtype) + x2 = tf.cast(x2, dtype) + if axis is not None: axisa = axis axisb = axis @@ -936,10 +935,6 @@ def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): x1 = moveaxis(x1, axisa, -1) x2 = moveaxis(x2, axisb, -1) - dtype = dtypes.result_type(x1.dtype, x2.dtype) - x1 = tf.cast(x1, dtype) - x2 = tf.cast(x2, dtype) - def maybe_pad_zeros(x, size_of_last_dim): def pad_zeros(x): return tf.pad( @@ -953,28 +948,40 @@ def pad_zeros(x): ), ) + if isinstance(size_of_last_dim, int): + if size_of_last_dim == 2: + return pad_zeros(x) + return x + return tf.cond( tf.equal(size_of_last_dim, 2), lambda: pad_zeros(x), lambda: x ) - x1_dim = tf.shape(x1)[-1] - x2_dim = tf.shape(x2)[-1] + x1_dim = shape_op(x1)[-1] + x2_dim = shape_op(x2)[-1] + x1 = maybe_pad_zeros(x1, x1_dim) x2 = maybe_pad_zeros(x2, x2_dim) # Broadcast each other - shape = tf.shape(x1) - shape = tf.broadcast_dynamic_shape(shape, tf.shape(x2)) + shape = shape_op(x1) + + shape = tf.broadcast_dynamic_shape(shape, shape_op(x2)) x1 = tf.broadcast_to(x1, shape) x2 = tf.broadcast_to(x2, shape) c = tf.linalg.cross(x1, x2) - c = tf.cond( + + if isinstance(x1_dim, int) and isinstance(x2_dim, int): + if (x1_dim == 2) & (x2_dim == 2): + return c[..., 2] + return moveaxis(c, -1, axisc) + + return tf.cond( (x1_dim == 2) & (x2_dim == 2), lambda: c[..., 2], lambda: moveaxis(c, -1, axisc), ) - return c def cumprod(x, axis=None, dtype=None): @@ -1024,19 +1031,23 @@ def diagonal(x, offset=0, axis1=0, axis2=1): return tf.linalg.diag_part(x) x = moveaxis(x, (axis1, axis2), (-2, -1)) - x_shape = tf.shape(x) + x_shape = shape_op(x) def _zeros(): return tf.zeros(tf.concat([x_shape[:-1], [0]], 0), dtype=x.dtype) - x, offset = tf.cond( - tf.logical_or( - tf.less_equal(offset, -1 * x_shape[-2]), - tf.greater_equal(offset, x_shape[-1]), - ), - _zeros, - lambda: (x, offset), - ) + if isinstance(x_shape[-1], int) and isinstance(x_shape[-2], int): + if offset <= -1 * x_shape[-2] or offset >= x_shape[-1]: + x = _zeros() + else: + x = tf.cond( + tf.logical_or( + tf.less_equal(offset, -1 * x_shape[-2]), + tf.greater_equal(offset, x_shape[-1]), + ), + lambda: _zeros(), + lambda: x, + ) return tf.linalg.diag_part(x, k=offset) @@ -1225,12 +1236,9 @@ def hstack(xs): if len(dtype_set) > 1: dtype = dtypes.result_type(*dtype_set) xs = tree.map_structure(lambda x: convert_to_tensor(x, dtype), xs) - rank = tf.rank(xs[0]) - return tf.cond( - tf.equal(rank, 1), - lambda: tf.concat(xs, axis=0), - lambda: tf.concat(xs, axis=1), - ) + if len(xs[0].shape) == 1: + return tf.concat(xs, axis=0) + return tf.concat(xs, axis=1) def identity(n, dtype=None): From 81c004708a8ea9aa135edebf08ba17ba3f95e0d1 Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Mon, 29 Apr 2024 02:38:54 +0800 Subject: [PATCH 089/101] Fix `argpartition` cuda bug in torch (#19634) --- keras/src/backend/torch/numpy.py | 7 ++----- keras/src/ops/numpy_test.py | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/keras/src/backend/torch/numpy.py b/keras/src/backend/torch/numpy.py index 6e361d9e3379..8bf95df70c7b 100644 --- a/keras/src/backend/torch/numpy.py +++ b/keras/src/backend/torch/numpy.py @@ -1617,19 +1617,16 @@ def slogdet(x): def argpartition(x, kth, axis=-1): x = convert_to_tensor(x, "int32") - x = torch.transpose(x, axis, -1) bottom_ind = torch.topk(-x, kth + 1)[1] def set_to_zero(a, i): - a[i] = 0 + a[i] = torch.zeros(1, dtype=a.dtype, device=a.device) return a for _ in range(x.dim() - 1): set_to_zero = torch.vmap(set_to_zero) - proxy = set_to_zero(ones(x.shape, dtype=torch.int32), bottom_ind) - + proxy = set_to_zero(torch.ones_like(x, dtype=torch.int32), bottom_ind) top_ind = torch.topk(proxy, x.shape[-1] - kth - 1)[1] - out = torch.cat([bottom_ind, top_ind], dim=x.dim() - 1) return cast(torch.transpose(out, -1, axis), "int32") diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index 3960c6f02263..c8d57eebfe06 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -4328,7 +4328,7 @@ def test_argpartition(self): self.assertAllClose(knp.argpartition(x, 2), np.argpartition(x, 2)) self.assertAllClose(knp.Argpartition(2)(x), np.argpartition(x, 2)) - x = np.array([[3, 4, 2], [1, 3, 1]]) + x = np.array([[3, 4, 2], [1, 3, 4]]) self.assertAllClose(knp.argpartition(x, 1), np.argpartition(x, 1)) self.assertAllClose(knp.Argpartition(1)(x), np.argpartition(x, 1)) From d5c95408ca6d35fc4ef12bd844ed42288894cf27 Mon Sep 17 00:00:00 2001 From: Luca Pizzini Date: Sun, 28 Apr 2024 20:39:41 +0200 Subject: [PATCH 090/101] fix(ops): specify NonZero output dtype and add test coverage (#19635) --- keras/src/ops/numpy.py | 2 +- keras/src/ops/numpy_test.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/keras/src/ops/numpy.py b/keras/src/ops/numpy.py index 4588c446770b..c11899720aec 100644 --- a/keras/src/ops/numpy.py +++ b/keras/src/ops/numpy.py @@ -3881,7 +3881,7 @@ def call(self, x): return backend.numpy.nonzero(x) def compute_output_spec(self, x): - return KerasTensor([None] * len(x.shape)) + return KerasTensor([None] * len(x.shape), dtype="int32") @keras_export(["keras.ops.nonzero", "keras.ops.numpy.nonzero"]) diff --git a/keras/src/ops/numpy_test.py b/keras/src/ops/numpy_test.py index c8d57eebfe06..106d27c8be19 100644 --- a/keras/src/ops/numpy_test.py +++ b/keras/src/ops/numpy_test.py @@ -7102,7 +7102,10 @@ def test_nonzero(self, dtype): self.assertEqual( standardize_dtype(knp.nonzero(x)[0].dtype), expected_dtype ) - # TODO: verify Nonzero + self.assertEqual( + standardize_dtype(knp.Nonzero().symbolic_call(x)[0].dtype), + expected_dtype, + ) @parameterized.named_parameters( named_product(dtypes=itertools.combinations(ALL_DTYPES, 2)) From 880f0cdd67591474d8ed98a6b192655322b7ecfc Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Mon, 29 Apr 2024 02:43:08 +0800 Subject: [PATCH 091/101] Fix `ops.ctc_decode` (#19633) * Fix greedy ctc decode * Remove print * Fix `tf.nn.ctc_beam_search_decoder` * Change default `mask_index` to `0` * Fix losses test * Update --- keras/src/backend/jax/nn.py | 31 ++++++++++++------------- keras/src/backend/numpy/nn.py | 31 ++++++++++++------------- keras/src/backend/tensorflow/nn.py | 27 +++++++++++++++++----- keras/src/backend/torch/nn.py | 19 ++++++++-------- keras/src/losses/losses.py | 12 +++++----- keras/src/losses/losses_test.py | 2 +- keras/src/ops/nn.py | 8 +++---- keras/src/ops/nn_test.py | 36 +++++++++++++++++------------- 8 files changed, 96 insertions(+), 70 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 3cbc61126fc8..863ced6b005b 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -593,7 +593,7 @@ def ctc_loss(target, output, target_length, output_length, mask_index=0): output = convert_to_tensor(output) target_length = convert_to_tensor(target_length, "int32") output_length = convert_to_tensor(output_length, "int32") - batch_size, _, num_classes = output.shape + batch_size, max_input_length, num_classes = output.shape batch_size, max_label_length = target.shape log_epsilon = -1e5 @@ -610,7 +610,7 @@ def _lengths_to_paddings(lengths, max_length): return jnp.logical_not(elem_valid) target_paddings = _lengths_to_paddings(target_length, max_label_length) - output_paddings = _lengths_to_paddings(output_length, max_label_length) + output_paddings = _lengths_to_paddings(output_length, max_input_length) target_paddings = target_paddings.astype(output.dtype) output_paddings = output_paddings.astype(output.dtype) @@ -690,12 +690,12 @@ def loop_body(prev, x): def _ctc_greedy_decode( inputs, - sequence_length, + sequence_lengths, merge_repeated=True, mask_index=None, ): inputs = convert_to_tensor(inputs) - sequence_length = convert_to_tensor(sequence_length, dtype="int32") + sequence_lengths = convert_to_tensor(sequence_lengths, dtype="int32") batch_size, max_length, num_classes = inputs.shape if mask_index is None: @@ -705,7 +705,7 @@ def _ctc_greedy_decode( scores = jnp.max(inputs, axis=-1) seqlen_mask = jnp.arange(max_length)[None, :] - seqlen_mask = seqlen_mask >= sequence_length[:, None] + seqlen_mask = seqlen_mask >= sequence_lengths[:, None] indices = jnp.where(seqlen_mask, mask_index, indices) scores = jnp.where(seqlen_mask, 0.0, scores) @@ -715,16 +715,17 @@ def _ctc_greedy_decode( repeat_mask = jnp.pad(repeat_mask, ((0, 0), (1, 0))) indices = jnp.where(repeat_mask, mask_index, indices) - # We rearrange the indices by moving `mask_index` to the end of the array + # We set to -1 for blank labels invalid_mask = indices == mask_index + indices = jnp.where(invalid_mask, -1, indices) + + # We rearrange the indices by moving `mask_index` to the end of the array order = jnp.expand_dims(jnp.arange(max_length), axis=0) # [1, N] order = jnp.tile(order, (batch_size, 1)) # [B, N] order = jnp.where(invalid_mask, max_length, order) order = jnp.argsort(order, axis=-1) indices = jnp.take_along_axis(indices, order, axis=-1) - # We set to -1 for blank labels - indices = jnp.where(invalid_mask, -1, indices) scores = -jnp.sum(scores, axis=1)[:, None] indices = jnp.expand_dims(indices, axis=0) return indices, scores @@ -732,17 +733,17 @@ def _ctc_greedy_decode( def _ctc_beam_search_decode( inputs, - sequence_length, + sequence_lengths, beam_width=100, top_paths=1, mask_index=None, ): inputs = convert_to_tensor(inputs) - sequence_length = convert_to_tensor(sequence_length) + sequence_lengths = convert_to_tensor(sequence_lengths) batch_size, max_seq_len, num_classes = inputs.shape inputs = jnn.log_softmax(inputs) - seqlen_mask = jnp.arange(max_seq_len)[None, :] >= sequence_length[:, None] + seqlen_mask = jnp.arange(max_seq_len)[None, :] >= sequence_lengths[:, None] if mask_index is None: mask_index = num_classes - 1 @@ -895,12 +896,12 @@ def _decode_batch( def ctc_decode( inputs, - sequence_length, + sequence_lengths, strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, - mask_index=None, + mask_index=0, ): inputs = convert_to_tensor(inputs) dtype = backend.result_type(inputs.dtype, "float32") @@ -909,14 +910,14 @@ def ctc_decode( if strategy == "greedy": return _ctc_greedy_decode( inputs, - sequence_length, + sequence_lengths, merge_repeated=merge_repeated, mask_index=mask_index, ) elif strategy == "beam_search": return _ctc_beam_search_decode( inputs, - sequence_length, + sequence_lengths, beam_width=beam_width, top_paths=top_paths, mask_index=mask_index, diff --git a/keras/src/backend/numpy/nn.py b/keras/src/backend/numpy/nn.py index a2d89c323e67..bcac274c2455 100644 --- a/keras/src/backend/numpy/nn.py +++ b/keras/src/backend/numpy/nn.py @@ -621,7 +621,7 @@ def ctc_loss(target, output, target_length, output_length, mask_index=0): output = convert_to_tensor(output) target_length = convert_to_tensor(target_length, "int32") output_length = convert_to_tensor(output_length, "int32") - batch_size, _, num_classes = output.shape + batch_size, max_input_length, num_classes = output.shape batch_size, max_label_length = target.shape log_epsilon = -1e5 @@ -638,7 +638,7 @@ def _lengths_to_paddings(lengths, max_length): return np.logical_not(elem_valid) target_paddings = _lengths_to_paddings(target_length, max_label_length) - output_paddings = _lengths_to_paddings(output_length, max_label_length) + output_paddings = _lengths_to_paddings(output_length, max_input_length) target_paddings = target_paddings.astype(output.dtype) output_paddings = output_paddings.astype(output.dtype) @@ -729,12 +729,12 @@ def np_scan(f, init, xs): def _ctc_greedy_decode( inputs, - sequence_length, + sequence_lengths, merge_repeated=True, mask_index=None, ): inputs = convert_to_tensor(inputs) - sequence_length = convert_to_tensor(sequence_length, dtype="int32") + sequence_lengths = convert_to_tensor(sequence_lengths, dtype="int32") batch_size, max_length, num_classes = inputs.shape if mask_index is None: @@ -744,7 +744,7 @@ def _ctc_greedy_decode( scores = np.max(inputs, axis=-1) seqlen_mask = np.arange(max_length)[None, :] - seqlen_mask = seqlen_mask >= sequence_length[:, None] + seqlen_mask = seqlen_mask >= sequence_lengths[:, None] indices = np.where(seqlen_mask, mask_index, indices) scores = np.where(seqlen_mask, 0.0, scores) @@ -754,16 +754,17 @@ def _ctc_greedy_decode( repeat_mask = np.pad(repeat_mask, ((0, 0), (1, 0))) indices = np.where(repeat_mask, mask_index, indices) - # We rearrange the indices by moving `mask_index` to the end of the array + # We set to -1 for blank labels invalid_mask = indices == mask_index + indices = np.where(invalid_mask, -1, indices) + + # We rearrange the indices by moving `mask_index` to the end of the array order = np.expand_dims(np.arange(max_length), axis=0) # [1, N] order = np.tile(order, (batch_size, 1)) # [B, N] order = np.where(invalid_mask, max_length, order) order = np.argsort(order, axis=-1) indices = np.take_along_axis(indices, order, axis=-1) - # We set to -1 for blank labels - indices = np.where(invalid_mask, -1, indices) scores = -np.sum(scores, axis=1)[:, None] indices = np.expand_dims(indices, axis=0) return indices, scores @@ -771,17 +772,17 @@ def _ctc_greedy_decode( def _ctc_beam_search_decode( inputs, - sequence_length, + sequence_lengths, beam_width=100, top_paths=1, mask_index=None, ): inputs = convert_to_tensor(inputs) - sequence_length = convert_to_tensor(sequence_length) + sequence_lengths = convert_to_tensor(sequence_lengths) batch_size, max_seq_len, num_classes = inputs.shape inputs = log_softmax(inputs, axis=-1) - seqlen_mask = np.arange(max_seq_len)[None, :] >= sequence_length[:, None] + seqlen_mask = np.arange(max_seq_len)[None, :] >= sequence_lengths[:, None] if mask_index is None: mask_index = num_classes - 1 @@ -936,12 +937,12 @@ def np_scan_only_carry(f, init, xs): def ctc_decode( inputs, - sequence_length, + sequence_lengths, strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, - mask_index=None, + mask_index=0, ): inputs = convert_to_tensor(inputs) dtype = backend.result_type(inputs.dtype, "float32") @@ -950,14 +951,14 @@ def ctc_decode( if strategy == "greedy": return _ctc_greedy_decode( inputs, - sequence_length, + sequence_lengths, merge_repeated=merge_repeated, mask_index=mask_index, ) elif strategy == "beam_search": return _ctc_beam_search_decode( inputs, - sequence_length, + sequence_lengths, beam_width=beam_width, top_paths=top_paths, mask_index=mask_index, diff --git a/keras/src/backend/tensorflow/nn.py b/keras/src/backend/tensorflow/nn.py index e7317599af0e..cb6bff3b4441 100644 --- a/keras/src/backend/tensorflow/nn.py +++ b/keras/src/backend/tensorflow/nn.py @@ -802,12 +802,12 @@ def ctc_loss( def ctc_decode( inputs, - sequence_length, + sequence_lengths, strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, - mask_index=None, + mask_index=0, ): inputs = convert_to_tensor(inputs) input_shape = tf.shape(inputs) @@ -817,18 +817,27 @@ def ctc_decode( dtype = backend.result_type(inputs.dtype, "float32") inputs = tf.cast(inputs, dtype) - sequence_length = convert_to_tensor(sequence_length, dtype="int32") + sequence_lengths = convert_to_tensor(sequence_lengths, dtype="int32") if strategy == "greedy": (decoded, scores) = tf.nn.ctc_greedy_decoder( inputs=inputs, - sequence_length=sequence_length, + sequence_length=sequence_lengths, merge_repeated=merge_repeated, blank_index=mask_index, ) elif strategy == "beam_search": + # Move `mask_index` column to the last position since this is the + # default for `tf.nn.ctc_beam_search_decoder` + if mask_index is not None: + inputs_before = inputs[..., :mask_index] + inputs_mask = inputs[..., mask_index : mask_index + 1] + inputs_after = inputs[..., mask_index + 1 :] + inputs = tf.concat( + [inputs_before, inputs_after, inputs_mask], axis=-1 + ) (decoded, scores) = tf.nn.ctc_beam_search_decoder( inputs=inputs, - sequence_length=sequence_length, + sequence_length=sequence_lengths, beam_width=beam_width, top_paths=top_paths, ) @@ -845,6 +854,14 @@ def ctc_decode( decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) decoded_dense = tf.stack(decoded_dense, axis=0) decoded_dense = tf.cast(decoded_dense, "int32") + + # We need to recover the labels because we swapped the indices earlier + if strategy == "beam_search" and mask_index is not None: + if mask_index < 0: + mask_index = mask_index + input_shape[-1] + decoded_dense = tf.where( + decoded_dense >= mask_index, decoded_dense + 1, decoded_dense + ) return decoded_dense, scores diff --git a/keras/src/backend/torch/nn.py b/keras/src/backend/torch/nn.py index 62749bc163b4..97dd04c1b5ec 100644 --- a/keras/src/backend/torch/nn.py +++ b/keras/src/backend/torch/nn.py @@ -775,12 +775,12 @@ def ctc_loss( def _ctc_greedy_decode( inputs, - sequence_length, + sequence_lengths, merge_repeated=True, mask_index=None, ): inputs = convert_to_tensor(inputs) - sequence_length = convert_to_tensor(sequence_length, dtype="int32") + sequence_lengths = convert_to_tensor(sequence_lengths, dtype="int32") batch_size, max_length, num_classes = inputs.shape if mask_index is None: @@ -791,7 +791,7 @@ def _ctc_greedy_decode( scores = torch.max(inputs, axis=-1)[0] seqlen_mask = torch.arange(max_length, device=indices.device)[None, :] - seqlen_mask = seqlen_mask >= sequence_length[:, None] + seqlen_mask = seqlen_mask >= sequence_lengths[:, None] indices = torch.where(seqlen_mask, mask_index, indices) scores = torch.where(seqlen_mask, 0.0, scores) @@ -801,8 +801,11 @@ def _ctc_greedy_decode( repeat = tnn.pad(repeat, (1, 0, 0, 0)) indices = torch.where(repeat, mask_index, indices) - # We rearrange the indices by moving `mask_index` to the end of the array + # We set to -1 for blank labels invalid_mask = indices == mask_index + indices = torch.where(invalid_mask, -1, indices) + + # We rearrange the indices by moving `mask_index` to the end of the array order = torch.unsqueeze( torch.arange(max_length, device=indices.device), dim=0 ) # [1, N] @@ -811,8 +814,6 @@ def _ctc_greedy_decode( order = torch.argsort(order, dim=-1) indices = torch.take_along_dim(indices, order, dim=-1) - # We set to -1 for blank labels - indices = torch.where(invalid_mask, -1, indices) scores = -torch.sum(scores, axis=1)[:, None] indices = torch.unsqueeze(indices, dim=0) return indices, scores @@ -820,12 +821,12 @@ def _ctc_greedy_decode( def ctc_decode( inputs, - sequence_length, + sequence_lengths, strategy="greedy", beam_width=100, top_paths=1, merge_repeated=True, - mask_index=None, + mask_index=0, ): inputs = convert_to_tensor(inputs) dtype = backend.result_type(inputs.dtype, "float32") @@ -834,7 +835,7 @@ def ctc_decode( if strategy == "greedy": return _ctc_greedy_decode( inputs, - sequence_length, + sequence_lengths, merge_repeated=merge_repeated, mask_index=mask_index, ) diff --git a/keras/src/losses/losses.py b/keras/src/losses/losses.py index 0df1f9542dfd..b91d15e87e70 100644 --- a/keras/src/losses/losses.py +++ b/keras/src/losses/losses.py @@ -1933,14 +1933,16 @@ def ctc(y_true, y_pred): f"Received: y_pred.shape={ops.shape(y_pred)}" ) - batch_length = ops.cast(ops.shape(y_true)[0], dtype="int32") - input_length = ops.cast(ops.shape(y_pred)[1], dtype="int32") - label_length = ops.cast(ops.shape(y_true)[1], dtype="int32") + mask_index = 0 + batch_length = ops.shape(y_pred)[0] + input_length = ops.shape(y_pred)[1] input_length = input_length * ops.ones((batch_length,), dtype="int32") - label_length = label_length * ops.ones((batch_length,), dtype="int32") + label_length = ops.cast( + ops.sum(y_true != mask_index, axis=-1), dtype="int32" + ) return ops.ctc_loss( - y_true, y_pred, label_length, input_length, mask_index=0 + y_true, y_pred, label_length, input_length, mask_index=mask_index ) diff --git a/keras/src/losses/losses_test.py b/keras/src/losses/losses_test.py index b97a8a253c3e..07b74fa3739d 100644 --- a/keras/src/losses/losses_test.py +++ b/keras/src/losses/losses_test.py @@ -1387,7 +1387,7 @@ def test_correctness(self): logits = (np.arange(24).reshape((2, 4, 3)).astype("float32") - 12) / 100 y_true = np.array(([[1, 2, 1, 0], [1, 2, 0, 2]])) output = losses.CTC()(y_true, logits) - self.assertAllClose(output, 4.389582) + self.assertAllClose(output, 2.448645) class DiceTest(testing.TestCase): diff --git a/keras/src/ops/nn.py b/keras/src/ops/nn.py index 1d900c8ccc4f..189f46bee0d6 100644 --- a/keras/src/ops/nn.py +++ b/keras/src/ops/nn.py @@ -1880,7 +1880,7 @@ def __init__( beam_width=100, top_paths=1, merge_repeated=True, - mask_index=None, + mask_index=0, ): super().__init__() self.strategy = strategy @@ -1928,7 +1928,7 @@ def ctc_decode( beam_width=100, top_paths=1, merge_repeated=True, - mask_index=None, + mask_index=0, ): """Decodes the output of a CTC model. @@ -1947,7 +1947,7 @@ def ctc_decode( merge_repeated: A boolean scalar, whether to merge repeated labels in the output. Defaults to `True`. mask_index: An integer scalar, the index of the mask character in - the vocabulary. Defaults to `None`. + the vocabulary. Defaults to `0`. Returns: A tuple containing: @@ -1973,7 +1973,7 @@ def ctc_decode( ).symbolic_call(inputs, sequence_lengths) return backend.nn.ctc_decode( inputs=inputs, - sequence_length=sequence_lengths, + sequence_lengths=sequence_lengths, strategy=strategy, beam_width=beam_width, top_paths=top_paths, diff --git a/keras/src/ops/nn_test.py b/keras/src/ops/nn_test.py index 433bb0f46afa..2c825c4f4166 100644 --- a/keras/src/ops/nn_test.py +++ b/keras/src/ops/nn_test.py @@ -1946,30 +1946,31 @@ def test_ctc_decode(self): [ [ [0.1, 0.4, 0.2, 0.4], - [0.3, 0.3, 0.4, 0.2], + [0.3, -0.3, 0.4, 0.2], [0.3, 0.2, 0.4, 0.3], ], [ - [0.1, 0.4, 0.7, 0.2], + [0.7, 0.4, 0.3, 0.2], [0.3, 0.3, 0.4, 0.1], - [0.2, 0.1, 0.1, 0.5], + [0.6, -0.1, 0.1, 0.5], ], [ [0.1, 0.4, 0.2, 0.7], - [0.3, 0.3, 0.2, 0.7], + [0.3, 0.3, -0.2, 0.7], [0.3, 0.2, 0.4, 0.1], ], ] ) - labels = np.array([[1, 2, -1], [2, -1, -1], [-1, -1, -1]]) - score_labels = np.array([[-1.2], [-1.6], [-0.7]]) - repeated_labels = np.array([[1, 2, 2], [2, 2, -1], [-1, -1, -1]]) + labels = np.array([[1, 2, -1], [2, -1, -1], [3, -1, -1]]) + score_labels = np.array([[-1.2], [-1.7], [-0.7]]) + repeated_labels = np.array([[1, 2, 2], [2, -1, -1], [3, -1, -1]]) # Test strategy="greedy" and merge_repeated=True (decoded,), scores = knn.ctc_decode( inputs, sequence_lengths=[3, 3, 1], strategy="greedy", + mask_index=0, ) self.assertAllClose(decoded, labels) self.assertAllClose(scores, score_labels) @@ -1980,6 +1981,7 @@ def test_ctc_decode(self): sequence_lengths=[3, 3, 1], strategy="greedy", merge_repeated=False, + mask_index=0, ) self.assertAllClose(decoded, repeated_labels) self.assertAllClose(scores, score_labels) @@ -1987,15 +1989,17 @@ def test_ctc_decode(self): if backend.backend() == "torch": self.skipTest("torch doesn't support 'beam_search' strategy") - labels = [ - np.array([[1, 2, -1], [2, -1, -1], [-1, -1, -1]]), - np.array([[2, -1, -1], [2, 0, -1], [1, -1, -1]]), - ] + labels = np.array( + [ + [[1, 2, -1], [2, -1, -1], [3, -1, -1]], + [[2, -1, -1], [3, -1, -1], [1, -1, -1]], + ] + ) score_labels = np.array( [ - [-2.33578291, -2.44335217], - [-2.22499622, -2.25768432], - [-1.0633859, -1.3633859], + [-2.426537, -2.435596], + [-2.127681, -2.182338], + [-1.063386, -1.363386], ] ) beam_width = 4 @@ -2008,9 +2012,9 @@ def test_ctc_decode(self): strategy="beam_search", beam_width=beam_width, top_paths=top_paths, + mask_index=0, ) - for i in range(top_paths): - self.assertAllClose(decoded[i], labels[i]) + self.assertAllClose(decoded, labels) self.assertAllClose(scores, score_labels) def test_normalize(self): From 4cb5671e9aed82433de33455de285783fa81f8ee Mon Sep 17 00:00:00 2001 From: james77777778 <20734616+james77777778@users.noreply.github.com> Date: Mon, 29 Apr 2024 23:35:07 +0800 Subject: [PATCH 092/101] Ensure the same rule applies for np arrays in autocasting (#19636) * Ensure the same rule applies for np arrays in autocasting * Trigger CI by adding docstring * Update * Update docstring --- keras/src/dtype_policies/dtype_policy.py | 32 ++++++++++++------- keras/src/layers/layer_test.py | 29 +++++++++++++---- .../spectral_normalization_test.py | 2 +- keras/src/testing/test_case.py | 14 ++++++++ 4 files changed, 57 insertions(+), 20 deletions(-) diff --git a/keras/src/dtype_policies/dtype_policy.py b/keras/src/dtype_policies/dtype_policy.py index a55eaa4c0659..e9bf91aaab9d 100644 --- a/keras/src/dtype_policies/dtype_policy.py +++ b/keras/src/dtype_policies/dtype_policy.py @@ -1,5 +1,4 @@ from keras.src import backend -from keras.src import ops from keras.src.api_export import keras_export from keras.src.backend.common import global_state @@ -135,25 +134,27 @@ def name(self): return self._name def convert_input(self, x, autocast, dtype): + """Converts the input dtype based on `autocast` and `dtype`. + + Note that `x` can be a tensor, symbolic tensor or numpy array, and this + method will keep integer inputs untouched and only apply casting to + floats. + """ + dtype = backend.standardize_dtype(dtype) if backend.is_tensor(x): - if ( - autocast - and backend.is_float_dtype(x.dtype) - and x.dtype != dtype - ): + if self._should_cast(x, autocast, dtype): x = backend.cast(x, dtype=dtype) return x elif backend.is_keras_tensor(x): - if ( - autocast - and backend.is_float_dtype(x.dtype) - and x.dtype != dtype - ): + if self._should_cast(x, autocast, dtype): x.dtype = dtype return x elif hasattr(x, "__array__"): - return ops.convert_to_tensor(x, dtype=dtype) + x = backend.convert_to_tensor(x) + if self._should_cast(x, autocast, dtype): + x = backend.cast(x, dtype=dtype) + return x return x def get_config(self): @@ -163,6 +164,13 @@ def get_config(self): def from_config(cls, config): return cls(**config) + def _should_cast(self, x, autocast, dtype): + x_dtype = backend.standardize_dtype(x.dtype) + if autocast and backend.is_float_dtype(x_dtype) and x_dtype != dtype: + return True + else: + return False + @keras_export( ["keras.FloatDTypePolicy", "keras.dtype_policies.FloatDTypePolicy"] diff --git a/keras/src/layers/layer_test.py b/keras/src/layers/layer_test.py index 6bc93859abc7..e0c71a0cf7fc 100644 --- a/keras/src/layers/layer_test.py +++ b/keras/src/layers/layer_test.py @@ -437,13 +437,13 @@ def test_mixed_precision(self): y = layer(x) self.assertEqual(layer.compute_dtype, "float16") self.assertEqual(layer.variable_dtype, "float16") - self.assertEqual(backend.standardize_dtype(y.dtype), "float16") + self.assertDType(y, "float16") layer = layers.Dense(2, dtype="mixed_float16") y = layer(x) self.assertEqual(layer.compute_dtype, "float16") self.assertEqual(layer.variable_dtype, "float32") - self.assertEqual(backend.standardize_dtype(y.dtype), "float16") + self.assertDType(y, "float16") self.assertEqual(layer.kernel.dtype, "float32") @pytest.mark.skipif( @@ -451,7 +451,7 @@ def test_mixed_precision(self): reason="Some torch ops not implemented for float16 on CPU.", ) def test_autocast(self): - assertEqual = self.assertEqual + assertDType = self.assertDType # A layer with a int dtype (some preprocessing layers do this). class InnerLayerOne(layers.Layer): @@ -467,7 +467,7 @@ def __init__(self): def call(self, x): # Should not autocast. - assertEqual(backend.standardize_dtype(self.v.dtype), "float32") + assertDType(self.v, "float32") return ops.cast(x, "float32") + self.v # A layer that is explicitly full precision. @@ -483,7 +483,7 @@ def __init__(self): def call(self, x): # Should not autocast. - assertEqual(backend.standardize_dtype(self.v.dtype), "float32") + assertDType(self.v, "float32") return x + self.v # A layer that is explicitly mixed precision but with autocast=False @@ -501,7 +501,7 @@ def __init__(self): def call(self, x): # Should not autocast `self.v`. - assertEqual(backend.standardize_dtype(self.v.dtype), "float32") + assertDType(self.v, "float32") return ops.add(x, self.v) # A layer that is explicitly mixed precision with inner layers. @@ -520,7 +520,7 @@ def __init__(self): def call(self, x): # Should autocast. - assertEqual(backend.standardize_dtype(self.v.dtype), "float16") + assertDType(self.v, "float16") return self.inner_three( self.inner_two(self.inner_one(x + self.v)) ) @@ -529,6 +529,21 @@ def call(self, x): y = layer(np.array(0.0)) self.assertEqual(y, 4.0) + def test_autocast_with_np_array(self): + assertDType = self.assertDType + + class CustomLayer(layers.Layer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def call(self, x): + # Here are the assertions. + assertDType(x[0], "float32") # Cast to compute_dtype + assertDType(x[1], "int32") # Untouched + + x = [np.zeros(1, dtype="float64"), np.zeros(1, dtype="int32")] + CustomLayer()(x) + @pytest.mark.skipif( backend.backend() == "numpy", reason="Numpy backend does not support masking.", diff --git a/keras/src/layers/normalization/spectral_normalization_test.py b/keras/src/layers/normalization/spectral_normalization_test.py index b3cc47d8d9f0..f9a34b4626d9 100644 --- a/keras/src/layers/normalization/spectral_normalization_test.py +++ b/keras/src/layers/normalization/spectral_normalization_test.py @@ -25,7 +25,7 @@ def test_basic_spectralnorm(self): self.run_layer_test( layers.SpectralNormalization, init_kwargs={"layer": layers.Embedding(10, 4)}, - input_data=np.random.randint(10, size=(10,)), + input_data=np.random.randint(10, size=(10,)).astype("float32"), expected_output_shape=(10, 4), expected_num_trainable_weights=1, expected_num_non_trainable_weights=1, diff --git a/keras/src/testing/test_case.py b/keras/src/testing/test_case.py index 0b6fd9d40f3f..4b46d1d9b255 100644 --- a/keras/src/testing/test_case.py +++ b/keras/src/testing/test_case.py @@ -99,6 +99,20 @@ def assertSparse(self, x, sparse=True): f"Backend {backend.backend()} does not support sparse tensors", ) + def assertDType(self, x, dtype, msg=None): + if hasattr(x, "dtype"): + x_dtype = backend.standardize_dtype(x.dtype) + else: + # If x is a python number + x_dtype = backend.standardize_dtype(type(x)) + standardized_dtype = backend.standardize_dtype(dtype) + default_msg = ( + "The dtype of x does not match the expected one. " + f"Received: x.dtype={x_dtype} and dtype={dtype}" + ) + msg = msg or default_msg + self.assertEqual(x_dtype, standardized_dtype, msg=msg) + def run_class_serialization_test(self, instance, custom_objects=None): from keras.src.saving import custom_object_scope from keras.src.saving import deserialize_keras_object From 61d85f33c42cc898536bb8ee04cadbca11019a4f Mon Sep 17 00:00:00 2001 From: Faisal Alsrheed <47912291+Faisal-Alsrheed@users.noreply.github.com> Date: Mon, 29 Apr 2024 22:25:29 +0300 Subject: [PATCH 093/101] Fix `istft` and add class `TestMathErrors` in `ops/math_test.py` (#19594) * Fix and test math functions for jax backend * run /workspaces/keras/shell/format.sh * refix * fix * fix _get_complex_tensor_from_tuple * fix * refix * Fix istft function to handle inputs with less than 2 dimensions * fix * Fix ValueError in istft function for inputs with less than 2 dimensions --- keras/src/backend/jax/math.py | 6 +++ keras/src/ops/linalg_test.py | 9 ++++ keras/src/ops/math_test.py | 88 +++++++++++++++++++++++++++++++++++ 3 files changed, 103 insertions(+) diff --git a/keras/src/backend/jax/math.py b/keras/src/backend/jax/math.py index 361eeee89173..4119f744e1a3 100644 --- a/keras/src/backend/jax/math.py +++ b/keras/src/backend/jax/math.py @@ -204,6 +204,12 @@ def istft( x = _get_complex_tensor_from_tuple(x) dtype = jnp.real(x).dtype + if len(x.shape) < 2: + raise ValueError( + f"Input `x` must have at least 2 dimensions. " + f"Received shape: {x.shape}" + ) + expected_output_len = fft_length + sequence_stride * (x.shape[-2] - 1) l_pad = (fft_length - sequence_length) // 2 r_pad = fft_length - sequence_length - l_pad diff --git a/keras/src/ops/linalg_test.py b/keras/src/ops/linalg_test.py index e1f0decf64b0..a2fd0c61aad0 100644 --- a/keras/src/ops/linalg_test.py +++ b/keras/src/ops/linalg_test.py @@ -101,6 +101,15 @@ def test_qr(self): self.assertEqual(q.shape, qref_shape) self.assertEqual(r.shape, rref_shape) + def test_qr_invalid_mode(self): + # backend agnostic error message + x = np.array([[1, 2], [3, 4]]) + invalid_mode = "invalid_mode" + with self.assertRaisesRegex( + ValueError, "Expected one of {'reduced', 'complete'}." + ): + linalg.qr(x, mode=invalid_mode) + def test_solve(self): a = KerasTensor([None, 20, 20]) b = KerasTensor([None, 20, 5]) diff --git a/keras/src/ops/math_test.py b/keras/src/ops/math_test.py index 60db9fc70f6c..86e3c70a78ee 100644 --- a/keras/src/ops/math_test.py +++ b/keras/src/ops/math_test.py @@ -1,5 +1,6 @@ import math +import jax.numpy as jnp import numpy as np import pytest import scipy.signal @@ -1256,3 +1257,90 @@ def test_undefined_fft_length_and_last_dimension(self): expected_shape = real_part.shape[:-1] + (None,) self.assertEqual(output_spec.shape, expected_shape) + + +class TestMathErrors(testing.TestCase): + + @pytest.mark.skipif( + backend.backend() != "jax", reason="Testing Jax errors only" + ) + def test_segment_sum_no_num_segments(self): + data = jnp.array([1, 2, 3, 4]) + segment_ids = jnp.array([0, 0, 1, 1]) + with self.assertRaisesRegex( + ValueError, + "Argument `num_segments` must be set when using the JAX backend.", + ): + kmath.segment_sum(data, segment_ids) + + @pytest.mark.skipif( + backend.backend() != "jax", reason="Testing Jax errors only" + ) + def test_segment_max_no_num_segments(self): + data = jnp.array([1, 2, 3, 4]) + segment_ids = jnp.array([0, 0, 1, 1]) + with self.assertRaisesRegex( + ValueError, + "Argument `num_segments` must be set when using the JAX backend.", + ): + kmath.segment_max(data, segment_ids) + + def test_stft_invalid_input_type(self): + # backend agnostic error message + x = np.array([1, 2, 3, 4]) + sequence_length = 2 + sequence_stride = 1 + fft_length = 4 + with self.assertRaisesRegex(TypeError, "`float32` or `float64`"): + kmath.stft(x, sequence_length, sequence_stride, fft_length) + + def test_invalid_fft_length(self): + # backend agnostic error message + x = np.array([1.0, 2.0, 3.0, 4.0]) + sequence_length = 4 + sequence_stride = 1 + fft_length = 2 + with self.assertRaisesRegex(ValueError, "`fft_length` must equal or"): + kmath.stft(x, sequence_length, sequence_stride, fft_length) + + def test_stft_invalid_window(self): + # backend agnostic error message + x = np.array([1.0, 2.0, 3.0, 4.0]) + sequence_length = 2 + sequence_stride = 1 + fft_length = 4 + window = "invalid_window" + with self.assertRaisesRegex(ValueError, "If a string is passed to"): + kmath.stft( + x, sequence_length, sequence_stride, fft_length, window=window + ) + + def test_stft_invalid_window_shape(self): + # backend agnostic error message + x = np.array([1.0, 2.0, 3.0, 4.0]) + sequence_length = 2 + sequence_stride = 1 + fft_length = 4 + window = np.ones((sequence_length + 1)) + with self.assertRaisesRegex(ValueError, "The shape of `window` must"): + kmath.stft( + x, sequence_length, sequence_stride, fft_length, window=window + ) + + def test_istft_invalid_window_shape_2D_inputs(self): + # backend agnostic error message + x = (np.array([[1.0, 2.0]]), np.array([[3.0, 4.0]])) + sequence_length = 2 + sequence_stride = 1 + fft_length = 4 + incorrect_window = np.ones((sequence_length + 1,)) + with self.assertRaisesRegex( + ValueError, "The shape of `window` must be equal to" + ): + kmath.istft( + x, + sequence_length, + sequence_stride, + fft_length, + window=incorrect_window, + ) From 9f4da5159a098256dfbccd2c926107953a6812e5 Mon Sep 17 00:00:00 2001 From: hertschuh <1091026+hertschuh@users.noreply.github.com> Date: Mon, 29 Apr 2024 13:20:28 -0700 Subject: [PATCH 094/101] Return a tuple from `ops.shape` with the Torch backend. (#19640) With Torch, `x.shape` returns a `torch.Size`, which is a subclass of `tuple` but can cause different behaviors. In particular `convert_to_tensor` does not work on `torch.Size`. This fixes https://github.com/keras-team/keras/issues/18900 --- keras/src/backend/torch/core.py | 3 ++- keras/src/ops/core_test.py | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/keras/src/backend/torch/core.py b/keras/src/backend/torch/core.py index 8cc6c5b5b564..68453255b1f5 100644 --- a/keras/src/backend/torch/core.py +++ b/keras/src/backend/torch/core.py @@ -234,7 +234,8 @@ def is_tensor(x): def shape(x): - return x.shape + # Convert from `torch.Size` to plain tuple. + return tuple(x.shape) def cast(x, dtype): diff --git a/keras/src/ops/core_test.py b/keras/src/ops/core_test.py index 31d553853b63..29526c20ad4a 100644 --- a/keras/src/ops/core_test.py +++ b/keras/src/ops/core_test.py @@ -338,10 +338,12 @@ def test_stop_gradient_return(self): self.assertAllClose(x, y) def test_shape(self): - x = np.ones((2, 3, 7, 1)) + x = ops.ones((2, 3, 7, 1)) + self.assertEqual(core.shape(x).__class__, tuple) self.assertAllEqual(core.shape(x), (2, 3, 7, 1)) x = KerasTensor((None, 3, None, 1)) + self.assertEqual(core.shape(x).__class__, tuple) self.assertAllEqual(core.shape(x), (None, 3, None, 1)) @pytest.mark.skipif( From f01e99a6b6b728187373ef0b57a485a0059fcd05 Mon Sep 17 00:00:00 2001 From: Haifeng Jin <5476582+haifeng-jin@users.noreply.github.com> Date: Tue, 30 Apr 2024 03:23:34 +0000 Subject: [PATCH 095/101] support conv3d on cpu for TF (#19641) --- keras/src/backend/tensorflow/nn.py | 6 ++++++ keras/src/ops/nn_test.py | 16 +++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/keras/src/backend/tensorflow/nn.py b/keras/src/backend/tensorflow/nn.py index cb6bff3b4441..e590d10e2d3e 100644 --- a/keras/src/backend/tensorflow/nn.py +++ b/keras/src/backend/tensorflow/nn.py @@ -252,6 +252,12 @@ def _conv_xla(): # If kernel's in_channel does not match input's channels, it indicates # convolution is broken down into groups. return _conv_xla() + if data_format == "channels_first" and len(inputs.shape) == 5: + inputs = convert_to_tensor(inputs) + if inputs.device.split(":")[-2] == "CPU": + inputs = tf.transpose(inputs, perm=(0, 2, 3, 4, 1)) + data_format = "channels_last" + return tf.transpose(_conv(), perm=(0, 4, 1, 2, 3)) return _conv() diff --git a/keras/src/ops/nn_test.py b/keras/src/ops/nn_test.py index 2c825c4f4166..4f85e96cd8cc 100644 --- a/keras/src/ops/nn_test.py +++ b/keras/src/ops/nn_test.py @@ -1445,23 +1445,29 @@ def test_conv_2d_group_2(self, strides, dilation_rate): ) self.assertAllClose(outputs, expected) - @parameterized.product(strides=(1, (1, 1, 1), 2), padding=("valid", "same")) - def test_conv_3d(self, strides, padding): - if backend.config.image_data_format() == "channels_last": + @parameterized.product( + strides=(1, (1, 1, 1), 2), + padding=("valid", "same"), + data_format=("channels_first", "channels_last"), + ) + def test_conv_3d(self, strides, padding, data_format): + if data_format == "channels_last": input_shape = (2, 8, 8, 8, 3) else: input_shape = (2, 3, 8, 8, 8) inputs_3d = np.arange(3072, dtype=float).reshape(input_shape) kernel = np.arange(162, dtype=float).reshape([3, 3, 3, 3, 2]) - outputs = knn.conv(inputs_3d, kernel, strides, padding=padding) + outputs = knn.conv( + inputs_3d, kernel, strides, padding=padding, data_format=data_format + ) expected = np_conv3d( inputs_3d, kernel, bias_weights=np.zeros((2,)), strides=strides, padding=padding, - data_format=backend.config.image_data_format(), + data_format=data_format, dilation_rate=1, groups=1, ) From 1b8f7b7bffdfb7b66fd0bc98b46e5ce1582fb811 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Chollet?= Date: Tue, 30 Apr 2024 10:37:03 -0700 Subject: [PATCH 096/101] Enable cudnn rnns when dropout is set (#19645) * Enable cudnn rnns when dropout is set * Fix --- keras/src/layers/rnn/gru.py | 13 +++++++++++-- keras/src/layers/rnn/lstm.py | 14 ++++++++++++-- pip_build.py | 1 - 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/keras/src/layers/rnn/gru.py b/keras/src/layers/rnn/gru.py index f489bd6638fc..8a516d0b4406 100644 --- a/keras/src/layers/rnn/gru.py +++ b/keras/src/layers/rnn/gru.py @@ -538,14 +538,23 @@ def inner_loop(self, sequences, initial_state, mask, training=False): if tree.is_nested(mask): mask = mask[0] if self.use_cudnn in ("auto", True): - if not self.dropout and not self.recurrent_dropout: + if not self.recurrent_dropout: try: + if self.dropout: + dp_mask = self.cell.get_dropout_mask(sequences[:, 0, :]) + dp_mask = ops.expand_dims(dp_mask, axis=1) + dp_mask = ops.broadcast_to( + dp_mask, ops.shape(sequences) + ) + dp_sequences = sequences * dp_mask + else: + dp_sequences = sequences # Backends are allowed to specify (optionally) optimized # implementation of the inner GRU loop. In the case of # TF for instance, it will leverage cuDNN when feasible, and # it will raise NotImplementedError otherwise. out = backend.gru( - sequences, + dp_sequences, initial_state, mask, kernel=self.cell.kernel, diff --git a/keras/src/layers/rnn/lstm.py b/keras/src/layers/rnn/lstm.py index 33055fd197ec..f4903655bb8f 100644 --- a/keras/src/layers/rnn/lstm.py +++ b/keras/src/layers/rnn/lstm.py @@ -518,14 +518,24 @@ def inner_loop(self, sequences, initial_state, mask, training=False): mask = mask[0] if self.use_cudnn in ("auto", True): - if not self.dropout and not self.recurrent_dropout: + if not self.recurrent_dropout: try: + if self.dropout: + dp_mask = self.cell.get_dropout_mask(sequences[:, 0, :]) + dp_mask = ops.expand_dims(dp_mask, axis=1) + dp_mask = ops.broadcast_to( + dp_mask, ops.shape(sequences) + ) + dp_sequences = sequences * dp_mask + else: + dp_sequences = sequences + # Backends are allowed to specify (optionally) optimized # implementation of the inner LSTM loop. In the case of # TF for instance, it will leverage cuDNN when feasible, and # it will raise NotImplementedError otherwise. out = backend.lstm( - sequences, + dp_sequences, initial_state[0], initial_state[1], mask, diff --git a/pip_build.py b/pip_build.py index 6570ab8eb228..66e7578eee25 100644 --- a/pip_build.py +++ b/pip_build.py @@ -84,7 +84,6 @@ def build(root_path, is_nightly=False, rc_index=None): try: copy_source_to_build_directory(root_path) move_tf_keras_directory() - print(os.getcwd()) from keras.src.version import __version__ # noqa: E402 From e85c12b394a8ccef21fb78ff46d6fea77ccca74e Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Tue, 30 Apr 2024 16:06:22 -0700 Subject: [PATCH 097/101] Fix plot_model for input dicts. --- keras/src/utils/model_visualization.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/keras/src/utils/model_visualization.py b/keras/src/utils/model_visualization.py index 2784a83e3ca2..1fd180339b14 100644 --- a/keras/src/utils/model_visualization.py +++ b/keras/src/utils/model_visualization.py @@ -112,18 +112,32 @@ def make_layer_label(layer, **kwargs): output_shape = tree.map_structure(lambda x: x.shape, layer.output) except (ValueError, AttributeError): pass + + def format_shape(shape): + if shape is not None: + if isinstance(shape, dict): + shape_str = ", ".join( + [f"{k}: {v}" for k, v in shape.items()] + ) + else: + shape_str = f"{shape}" + shape_str = shape_str.replace("}", "").replace("{", "") + else: + shape_str = "?" + return shape_str + if class_name != "InputLayer": cols.append( ( '" ) ) cols.append( ( '" ) ) From 9a69ecefe1f671ee8a051d88dac221b8f7c9124f Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Tue, 30 Apr 2024 20:44:31 -0700 Subject: [PATCH 098/101] Fix deprecation warning in torch --- keras/src/backend/torch/linalg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/backend/torch/linalg.py b/keras/src/backend/torch/linalg.py index 81041782a1b8..9a15f24786ef 100644 --- a/keras/src/backend/torch/linalg.py +++ b/keras/src/backend/torch/linalg.py @@ -8,7 +8,7 @@ def cholesky(x): - return torch.cholesky(x) + return torch.linalg.cholesky(x) def det(x): From c0b4d8661bea1775ad5762c3a6c0b12cf2a62843 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 May 2024 10:02:20 -0700 Subject: [PATCH 099/101] Bump the github-actions group with 2 updates (#19653) Bumps the github-actions group with 2 updates: [actions/upload-artifact](https://github.com/actions/upload-artifact) and [github/codeql-action](https://github.com/github/codeql-action). Updates `actions/upload-artifact` from 4.3.1 to 4.3.3 - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/5d5d22a31266ced268874388b861e4b58bb5c2f3...65462800fd760344b1a7b4382951275a0abb4808) Updates `github/codeql-action` from 3.24.9 to 3.25.3 - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/1b1aada464948af03b950897e5eb522f92603cc2...d39d31e687223d841ef683f52467bd88e9b21c14) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch dependency-group: github-actions - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor dependency-group: github-actions ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 1286b78b5274..2fe3b5cdca81 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -48,7 +48,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: SARIF file path: results.sarif @@ -56,6 +56,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + uses: github/codeql-action/upload-sarif@d39d31e687223d841ef683f52467bd88e9b21c14 # v3.25.3 with: sarif_file: results.sarif From 09133f459d4158d35ca582433c2577a02696f62e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 May 2024 10:02:42 -0700 Subject: [PATCH 100/101] Bump the python group with 2 updates (#19654) Bumps the python group with 2 updates: torch and torchvision. Updates `torch` from 2.2.1+cu121 to 2.3.0+cu121 Updates `torchvision` from 0.17.1+cu121 to 0.18.0+cu121 --- updated-dependencies: - dependency-name: torch dependency-type: direct:production update-type: version-update:semver-minor dependency-group: python - dependency-name: torchvision dependency-type: direct:production update-type: version-update:semver-minor dependency-group: python ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements-jax-cuda.txt | 2 +- requirements-tensorflow-cuda.txt | 2 +- requirements-torch-cuda.txt | 4 ++-- requirements.txt | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements-jax-cuda.txt b/requirements-jax-cuda.txt index e21c1cb1c5bc..3e432a1fb174 100644 --- a/requirements-jax-cuda.txt +++ b/requirements-jax-cuda.txt @@ -3,7 +3,7 @@ tensorflow-cpu~=2.16.1 # Pin to TF 2.16 # Torch cpu-only version (needed for testing). --extra-index-url https://download.pytorch.org/whl/cpu -torch>=2.1.0, <2.3.0 +torch>=2.1.0, <2.4.0 torchvision>=0.16.0 # Jax with cuda support. diff --git a/requirements-tensorflow-cuda.txt b/requirements-tensorflow-cuda.txt index f3b946ddcfee..1ce18a3b3c9f 100644 --- a/requirements-tensorflow-cuda.txt +++ b/requirements-tensorflow-cuda.txt @@ -3,7 +3,7 @@ tensorflow[and-cuda]~=2.16.1 # Pin to TF 2.16 # Torch cpu-only version (needed for testing). --extra-index-url https://download.pytorch.org/whl/cpu -torch>=2.1.0, <2.3.0 +torch>=2.1.0, <2.4.0 torchvision>=0.16.0 # Jax cpu-only version (needed for testing). diff --git a/requirements-torch-cuda.txt b/requirements-torch-cuda.txt index e0a71cc4e6a3..88b6d6617389 100644 --- a/requirements-torch-cuda.txt +++ b/requirements-torch-cuda.txt @@ -3,8 +3,8 @@ tensorflow-cpu~=2.16.1 # Pin to TF 2.16 # Torch with cuda support. --extra-index-url https://download.pytorch.org/whl/cu121 -torch==2.2.1+cu121 -torchvision==0.17.1+cu121 +torch==2.3.0+cu121 +torchvision==0.18.0+cu121 # Jax cpu-only version (needed for testing). jax[cpu] diff --git a/requirements.txt b/requirements.txt index c759c9d18156..af1e84cf91f4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ tensorflow-cpu~=2.16.1 # Pin to TF 2.16 # Torch. # TODO: Pin to < 2.3.0 (GitHub issue #19602) --extra-index-url https://download.pytorch.org/whl/cpu -torch>=2.1.0, <2.3.0 +torch>=2.1.0, <2.4.0 torchvision>=0.16.0 # Jax. From 1941c3089e837b91a0a71a817b98d156ac2518be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Chollet?= Date: Wed, 1 May 2024 10:03:00 -0700 Subject: [PATCH 101/101] Revert "Bump the python group with 2 updates (#19654)" (#19655) This reverts commit 09133f459d4158d35ca582433c2577a02696f62e. --- requirements-jax-cuda.txt | 2 +- requirements-tensorflow-cuda.txt | 2 +- requirements-torch-cuda.txt | 4 ++-- requirements.txt | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements-jax-cuda.txt b/requirements-jax-cuda.txt index 3e432a1fb174..e21c1cb1c5bc 100644 --- a/requirements-jax-cuda.txt +++ b/requirements-jax-cuda.txt @@ -3,7 +3,7 @@ tensorflow-cpu~=2.16.1 # Pin to TF 2.16 # Torch cpu-only version (needed for testing). --extra-index-url https://download.pytorch.org/whl/cpu -torch>=2.1.0, <2.4.0 +torch>=2.1.0, <2.3.0 torchvision>=0.16.0 # Jax with cuda support. diff --git a/requirements-tensorflow-cuda.txt b/requirements-tensorflow-cuda.txt index 1ce18a3b3c9f..f3b946ddcfee 100644 --- a/requirements-tensorflow-cuda.txt +++ b/requirements-tensorflow-cuda.txt @@ -3,7 +3,7 @@ tensorflow[and-cuda]~=2.16.1 # Pin to TF 2.16 # Torch cpu-only version (needed for testing). --extra-index-url https://download.pytorch.org/whl/cpu -torch>=2.1.0, <2.4.0 +torch>=2.1.0, <2.3.0 torchvision>=0.16.0 # Jax cpu-only version (needed for testing). diff --git a/requirements-torch-cuda.txt b/requirements-torch-cuda.txt index 88b6d6617389..e0a71cc4e6a3 100644 --- a/requirements-torch-cuda.txt +++ b/requirements-torch-cuda.txt @@ -3,8 +3,8 @@ tensorflow-cpu~=2.16.1 # Pin to TF 2.16 # Torch with cuda support. --extra-index-url https://download.pytorch.org/whl/cu121 -torch==2.3.0+cu121 -torchvision==0.18.0+cu121 +torch==2.2.1+cu121 +torchvision==0.17.1+cu121 # Jax cpu-only version (needed for testing). jax[cpu] diff --git a/requirements.txt b/requirements.txt index af1e84cf91f4..c759c9d18156 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ tensorflow-cpu~=2.16.1 # Pin to TF 2.16 # Torch. # TODO: Pin to < 2.3.0 (GitHub issue #19602) --extra-index-url https://download.pytorch.org/whl/cpu -torch>=2.1.0, <2.4.0 +torch>=2.1.0, <2.3.0 torchvision>=0.16.0 # Jax.
' + f'Input shape: {input_shape or "?"}' + "' - f'Output shape: {shape or "?"}' + f'Output shape: {output_shape or "?"}' "' - f'Input shape: {input_shape or "?"}' + f"Input shape: {format_shape(input_shape)}" "' - f'Output shape: {output_shape or "?"}' + f"Output shape: {format_shape(output_shape)}" "