Skip to content

Commit

Permalink
* test cases
Browse files Browse the repository at this point in the history
  • Loading branch information
srkreddy1238 committed Mar 30, 2019
1 parent 1c4f076 commit 49de0f2
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 30 deletions.
17 changes: 2 additions & 15 deletions python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def _impl_v1(cls, inputs, attr, params):
m.run()
params_new = m.get_output(0)
inputs.pop(1)
out = _op.reshape(inputs[0], tuple(params_new.asnumpy().flatten()))
out = _op.reshape(inputs[0], tuple(params_new.asnumpy().astype('int32').flatten()))

return out

Expand Down Expand Up @@ -483,20 +483,7 @@ class Shape(OnnxOpConverter):

@classmethod
def _impl_v1(cls, inputs, attr, params):
from topi.util import get_const_tuple
try:
out_type = ir_pass.infer_type(inputs[0])
out_shape = get_const_tuple(out_type.checked_type.shape)
except ValueError as e:
raise ImportError(
"Please pass graph level shapes to compute shape node properly {}".format(e))

node_name = attr['tvm_custom']['name']
params[node_name] = _nd.array(np.asarray(out_shape, dtype='int64'))

return _expr.var(node_name,
shape=params[node_name].shape,
dtype=params[node_name].dtype)
return _op.shape_of(inputs[0])

class Cast(OnnxOpConverter):
""" Operator converter for Cast.
Expand Down
31 changes: 16 additions & 15 deletions tests/python/frontend/onnx/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,35 +113,36 @@ def test_reshape():

tvm.testing.assert_allclose(ref_shape, tvm_out.shape)

def test_reshape_like():
def test_shape():
in_shape = (4, 3, 3, 4)
ref_shape = (3, 4, 4, 3)
ref_shape = (6, 2, 4, 3)

ref_array = np.random.uniform(size=ref_shape).astype('float32')
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node('Constant',
inputs=[],
outputs=['ref_in'],
value=onnx.helper.make_tensor(name = 'const_tensor',
data_type = onnx.TensorProto.FLOAT,
data_type = onnx.TensorProto.INT32,
dims = ref_array.shape,
vals = ref_array.flatten().astype(float)))
copy_node = helper.make_node("Identity", ["ref_in"], ["copy_in"])
reshape_node = helper.make_node("Reshape", ["in", "copy_in"], ["out"])
vals = ref_array.flatten().astype(int)))
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])

shape_node = helper.make_node("Shape", ['out'], ['final_out'])

graph = helper.make_graph([ref_node, copy_node, reshape_node],
"reshape_like_test",
graph = helper.make_graph([ref_node, reshape_node, shape_node],
"shape_test",
inputs = [helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape))],
outputs = [helper.make_tensor_value_info("out",
outputs = [helper.make_tensor_value_info("final_out",
TensorProto.FLOAT, list(ref_shape))])

model = helper.make_model(graph, producer_name='reshape_like_test')
model = helper.make_model(graph, producer_name='shape_test')

for target, ctx in ctx_list():
x = np.random.uniform(size=in_shape).astype('float32')
tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')
x = np.random.uniform(size=in_shape).astype('int32')
tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'int32')

tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
tvm.testing.assert_allclose(ref_shape, tvm_out)

def _test_power_iteration(x_shape, y_shape):
if isinstance(y_shape, int):
Expand Down Expand Up @@ -995,7 +996,7 @@ def test_LogSoftmax():

if __name__ == '__main__':
test_reshape()
test_reshape_like()
test_shape()
test_power()
test_squeeze()
test_unsqueeze()
Expand Down

0 comments on commit 49de0f2

Please sign in to comment.