Skip to content

Commit

Permalink
Revert "[Relay] add test for second order ad (apache#2754)" (apache#2926
Browse files Browse the repository at this point in the history
)

This reverts commit f5ca991.
  • Loading branch information
tqchen authored and wweic committed Apr 8, 2019
1 parent 430336f commit 52bce33
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 66 deletions.
19 changes: 1 addition & 18 deletions python/tvm/relay/op/_tensor_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from __future__ import absolute_import
from ..expr import const
from .op import register_gradient
from .transform import collapse_sum_like, broadcast_to_like, where
from .transform import collapse_sum_like, where
from .tensor import exp, negative, power, less
from .tensor import zeros_like, ones_like

Expand Down Expand Up @@ -77,20 +77,3 @@ def divide_grad(orig, grad):
x, y = orig.args
return [collapse_sum_like(grad / y, x),
collapse_sum_like(- (grad * orig / y), y)]


@register_gradient("zeros_like")
def zeros_like_grad(orig, grad):
"""Returns [0]"""
return [orig]

@register_gradient("ones_like")
def ones_like_grad(orig, grad):
"""Returns [0]"""
return [zeros_like(orig.args[0])]

@register_gradient("collapse_sum_like")
def collapse_sum_like_grad(orig, grad):
"""Returns [broadcast_to_like(grad, x), 0]"""
x, y = orig.args
return [broadcast_to_like(grad, x), zeros_like(y)]
74 changes: 26 additions & 48 deletions tests/python/relay/test_pass_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ def test_id():
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), x.asnumpy())
tvm.testing.assert_allclose(grad.asnumpy(), np.ones_like(x.asnumpy()))
np.testing.assert_allclose(forward.asnumpy(), x.asnumpy())
np.testing.assert_allclose(grad.asnumpy(), np.ones_like(x.asnumpy()))


def test_add():
Expand All @@ -35,8 +35,8 @@ def test_add():
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), 2 * x.asnumpy())
tvm.testing.assert_allclose(grad.asnumpy(), 2 * np.ones_like(x.asnumpy()))
np.testing.assert_allclose(forward.asnumpy(), 2 * x.asnumpy())
np.testing.assert_allclose(grad.asnumpy(), 2 * np.ones_like(x.asnumpy()))


def test_temp_add():
Expand All @@ -51,8 +51,8 @@ def test_temp_add():
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), 4 * x.asnumpy())
tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))
np.testing.assert_allclose(forward.asnumpy(), 4 * x.asnumpy())
np.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))


def test_sub():
Expand All @@ -66,8 +66,8 @@ def test_sub():
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), np.zeros_like(x.asnumpy()))
tvm.testing.assert_allclose(grad.asnumpy(), np.zeros_like(x.asnumpy()))
np.testing.assert_allclose(forward.asnumpy(), np.zeros_like(x.asnumpy()))
np.testing.assert_allclose(grad.asnumpy(), np.zeros_like(x.asnumpy()))


def test_broadcast_add():
Expand All @@ -90,11 +90,11 @@ def test_broadcast_add():
relay.TupleType([t1, t2])]))
ex = create_executor()
forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(),
np.ones_like(expected_forward).sum(axis=2, keepdims=True))
tvm.testing.assert_allclose(grad_y.asnumpy(),
np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))
np.testing.assert_allclose(forward.asnumpy(), expected_forward)
np.testing.assert_allclose(grad_x.asnumpy(),
np.ones_like(expected_forward).sum(axis=2, keepdims=True))
np.testing.assert_allclose(grad_y.asnumpy(),
np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))


def test_broadcast_subtract():
Expand All @@ -117,11 +117,11 @@ def test_broadcast_subtract():
relay.TupleType([t1, t2])]))
ex = create_executor()
forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(),
np.ones_like(expected_forward).sum(axis=2, keepdims=True))
tvm.testing.assert_allclose(grad_y.asnumpy(),
-np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))
np.testing.assert_allclose(forward.asnumpy(), expected_forward)
np.testing.assert_allclose(grad_x.asnumpy(),
np.ones_like(expected_forward).sum(axis=2, keepdims=True))
np.testing.assert_allclose(grad_y.asnumpy(),
-np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))


def test_tuple():
Expand All @@ -147,10 +147,10 @@ def test_tuple():
expected_forward = x_np + y_np - z_np
ex = create_executor()
forward, (grad_x, grad_y, grad_z) = ex.evaluate(back_func)(x_nd, y_nd, z_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(), np.ones_like(grad_x.asnumpy()))
tvm.testing.assert_allclose(grad_y.asnumpy(), np.ones_like(grad_y.asnumpy()))
tvm.testing.assert_allclose(grad_z.asnumpy(), -1 * np.ones_like(grad_z.asnumpy()))
np.testing.assert_allclose(forward.asnumpy(), expected_forward)
np.testing.assert_allclose(grad_x.asnumpy(), np.ones_like(grad_x.asnumpy()))
np.testing.assert_allclose(grad_y.asnumpy(), np.ones_like(grad_y.asnumpy()))
np.testing.assert_allclose(grad_z.asnumpy(), -1 * np.ones_like(grad_z.asnumpy()))


def test_pow():
Expand All @@ -168,9 +168,8 @@ def test_pow():
i_nd = rand(dtype, *shape)
ex = create_executor(mod=mod)
forward, (grad_i,) = ex.evaluate(back_func)(i_nd)
tvm.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
tvm.testing.assert_allclose(grad_i.asnumpy(), 8 * np.ones_like(grad_i.asnumpy()))

np.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
np.testing.assert_allclose(grad_i.asnumpy(), 8 * np.ones_like(grad_i.asnumpy()))

def test_ref():
shape = (10, 10)
Expand All @@ -188,28 +187,8 @@ def test_ref():
x_nd = rand(dtype, *shape)
ex = create_executor()
forward, (grad_x,) = ex.evaluate(back_func)(x_nd)
tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))


def test_square_second_order():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x * x)
back_func = relay.ir_pass.infer_type(gradient(func))
y = relay.var("y", t)
back_func_adjusted = relay.Function([y], relay.TupleGetItem(relay.TupleGetItem(back_func(y), 1), 0))
back_func_adjusted = relay.ir_pass.infer_type(back_func_adjusted)
back_back_func = relay.ir_pass.infer_type(gradient(back_func_adjusted))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x_nd = rand(dtype, *shape)
ex = create_executor()
forward, (grad_x,) = ex.evaluate(back_back_func)(x_nd)
tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))

np.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
np.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))

if __name__ == "__main__":
test_id()
Expand All @@ -221,4 +200,3 @@ def test_square_second_order():
test_tuple()
test_pow()
test_ref()
test_square_second_order()

0 comments on commit 52bce33

Please sign in to comment.