diff --git a/returnn/tf/layers/base.py b/returnn/tf/layers/base.py index 08394c2a4a..ef58a9e0f3 100644 --- a/returnn/tf/layers/base.py +++ b/returnn/tf/layers/base.py @@ -1380,7 +1380,7 @@ def get_constraints_value(self): c += self.spatial_smoothing * self.get_output_spatial_smoothing_energy() if self.darc1: c += self.darc1 * self.get_darc1() - if c is 0: + if c == 0: return None return c diff --git a/returnn/tf/network.py b/returnn/tf/network.py index 96e888f278..e4051363ad 100644 --- a/returnn/tf/network.py +++ b/returnn/tf/network.py @@ -1618,7 +1618,7 @@ def inv_reduce_sum(x, name): if should_train or should_eval: # These values are cached internally and the graph nodes are created on the first call. loss = self.get_objective() - if loss is 0: + if loss == 0: loss = tf_util.global_tensor(lambda: tf.constant(0.0), name="zero_loss") else: # non-constant-zero loss assert self.losses_dict diff --git a/returnn/tf/util/basic.py b/returnn/tf/util/basic.py index 150289800c..3faf7d555e 100644 --- a/returnn/tf/util/basic.py +++ b/returnn/tf/util/basic.py @@ -2021,7 +2021,7 @@ def expand_dims_unbroadcast(x, axis, dim, name="expand_dims_unbroadcast"): with tf.name_scope(name): x = tf.convert_to_tensor(x) x = tf.expand_dims(x, axis) - if dim is not 1: + if dim != 1: new_ndim = x.get_shape().ndims assert new_ndim is not None, "not implemented otherwise yet" assert isinstance(axis, int), "not implemented otherwise yet" @@ -5547,7 +5547,7 @@ def tensor_array_stack(ta, start=0, stop=None, name="TensorArrayStack"): :param str name: :rtype: tf.Tensor """ - if start is 0 and stop is None: + if start == 0 and stop is None: return ta.stack(name=name) with tf_compat.v1.colocate_with(_tensor_array_ref(ta)): with tf.name_scope(name):