Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove broken logger warning #781

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 1 addition & 33 deletions aesara/tensor/math_opt.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
""" Tensor optimizations addressing the ops in math.py."""

import itertools
import logging
import operator
from functools import partial, reduce

Expand Down Expand Up @@ -93,11 +92,6 @@
values_eq_approx_remove_nan,
)
from aesara.tensor.var import TensorConstant, get_unique_value
from aesara.utils import NoDuplicateOptWarningFilter


_logger = logging.getLogger("aesara.tensor.math_opt")
_logger.addFilter(NoDuplicateOptWarningFilter())


def scalarconsts_rest(inputs, elemwise=True, only_process_constants=False):
Expand Down Expand Up @@ -186,14 +180,6 @@ def local_0_dot_x(fgraph, node):
elif x.ndim == 1 and y.ndim == 1:
constant_zero = assert_op(constant_zero, eq(x.shape[0], y.shape[0]))
return [constant_zero]
else:
_logger.warning(
"Optimization Warning: "
"Optimization aesara/opt.py:local_0_dot_x Found "
"that it could apply, but was not implemented "
"for dot product with these input types:\n"
f"({x.type}, {y.type})"
)


@register_canonicalize
Expand Down Expand Up @@ -1024,28 +1010,10 @@ def same(x, y):
new = fill_chain(new, node.inputs)[0]

if new.type == out.type:
# This happen with test
# aesara/tensor/tests/test_opt.py:T_local_switch_sink
new.tag.values_eq_approx = values_eq_approx_remove_inf_nan

# We need to implement the copy over of the stacktrace.
# See issue #5104.
copy_stack_trace(out, new)
return [new]
else:
_logger.warning(
" ".join(
(
"CANONIZE FAILED: new, out = ",
new,
",",
out,
"types",
new.type,
",",
out.type,
)
)
)
return False

def __str__(self):
Expand Down
70 changes: 48 additions & 22 deletions tests/tensor/test_math_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,15 @@
from aesara.compile.mode import Mode, get_default_mode, get_mode
from aesara.compile.ops import DeepCopyOp, deep_copy_op
from aesara.configdefaults import config
from aesara.graph.basic import Constant
from aesara.graph.basic import Apply, Constant, equal_computations
from aesara.graph.fg import FunctionGraph
from aesara.graph.opt import LocalOptGroup, TopoOptimizer, check_stack_trace, out2in
from aesara.graph.opt import (
LocalOptGroup,
TopoOptimizer,
check_stack_trace,
in2out,
out2in,
)
from aesara.graph.opt_utils import is_same_graph, optimize_graph
from aesara.graph.optdb import OptimizationQuery
from aesara.misc.safe_asarray import _asarray
Expand Down Expand Up @@ -79,6 +85,7 @@
is_1pexp,
local_grad_log_erfc_neg,
local_greedy_distributor,
local_mul_canonizer,
mul_canonizer,
parse_mul_tree,
perform_sigm_times_exp,
Expand Down Expand Up @@ -220,23 +227,31 @@ def test_kording_bug(self):
assert np.all(r0 == r2)


class TestAlgebraicCanonize:
def test_muldiv(self):
x, y, z = matrices("xyz")
a, b, c, d = matrices("abcd")
# e = (2.0 * x) / (2.0 * y)
# e = (2.0 * x) / (4.0 * y)
# e = x / (y / z)
# e = (x * y) / x
# e = (x / y) * (y / z) * (z / x)
# e = (a / b) * (b / c) * (c / d)
# e = (a * b) / (b * c) / (c * d)
# e = 2 * x / 2
# e = x / y / x
# e = (x / x) * (y / y)
e = (-1 * x) / y / (-2 * z)
g = FunctionGraph([x, y, z, a, b, c, d], [e])
mul_canonizer.optimize(g)
class TestAlgebraicCanonizer:
x, y, z = matrices("xyz")

@pytest.mark.parametrize(
"e, exp_g",
[
# ((2.0 * x) / (2.0 * y), None),
# ((2.0 * x) / (4.0 * y), None),
# (x / (y / z), None),
# ((x * y) / x, None),
# ((x / y) * (y / z) * (z / x), None),
# ((a / b) * (b / c) * (c / d), None),
# ((a * b) / (b * c) / (c * d), None),
# (2 * x / 2, None),
# (x / y / x, None),
# ((x / x) * (y / y), None),
(
(-1 * x) / y / (-2 * z),
(at.as_tensor([[0.5]], dtype="floatX") * x) / (y * z),
),
],
)
def test_muldiv(self, e, exp_g):
g_opt = optimize_graph(e, custom_opt=mul_canonizer)
assert equal_computations([g_opt], [exp_g])

def test_elemwise_multiple_inputs_optimisation(self):
# verify that the AlgebraicCanonizer merge sequential Elemwise({mul,add}) part 1
Expand All @@ -245,7 +260,6 @@ def test_elemwise_multiple_inputs_optimisation(self):
# that are not implemented but are supposed to be.
#
# Test with and without DimShuffle

shp = (5, 5)
fx, fy, fz = fmatrices("xyz")
dx, dy, dz = dmatrices("xyz")
Expand Down Expand Up @@ -369,8 +383,7 @@ def test_elemwise_multiple_inputs_optimisation(self):
assert out_dtype == out.dtype

@pytest.mark.skip(
reason="Current implementation of AlgebraicCanonizer does not "
"implement all cases. Skip the corresponding test."
reason="Current implementation of AlgebraicCanonizer does not implement all cases."
)
def test_elemwise_multiple_inputs_optimisation2(self):
# verify that the AlgebraicCanonizer merge sequential Elemwise({mul,add}) part 2.
Expand Down Expand Up @@ -951,6 +964,19 @@ def test_canonicalize_nan(self):
# at all.
assert not sio.getvalue()

def test_mismatching_types(self):
a = at.as_tensor([[0.0]], dtype=np.float64)
b = tensor("float64", (None,)).dimshuffle("x", 0)
z = add(a, b)
# Construct a node with the wrong output `Type`
z = Apply(
z.owner.op, z.owner.inputs, [tensor("float64", (None, None))]
).outputs[0]

z_opt = optimize_graph(z, custom_opt=in2out(local_mul_canonizer, name="blah"))
# No rewrite was applied
assert z_opt is z


def test_local_merge_abs():
x, y, z = matrices("xyz")
Expand Down