-
Notifications
You must be signed in to change notification settings - Fork 3.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[RELAY][MXNET][FRONTEND] add support for MXNET numpy operators #6054
Merged
Merged
Changes from 13 commits
Commits
Show all changes
19 commits
Select commit
Hold shift + click to select a range
b04dad4
[RELAY][MXNET][FRONTEND] add supports for OPs in numpy from mxnet
7f1e329
Merge branch 'master' into relay_mx_np
sandyhu533 851f47a
Update test_forward.py
sandyhu533 fb1d50b
Update mxnet.py
sandyhu533 5188efa
Update mxnet.py
sandyhu533 781dd73
Update test_forward.py
sandyhu533 45fdde4
update and bugfix
e8f25bc
test for multiple dtypes
sandyhu533 bca8e36
Update test_forward.py
sandyhu533 04e5552
add data type and optimize coding style
sandyhu533 84ab57e
replace pytest.skip with @pytest.mark.skipif
sandyhu533 31630ad
Merge branch 'master' into relay_mx_np
sandyhu533 9151019
Update test_forward.py
sandyhu533 cb08135
update pytest style
sandyhu533 bbb9c33
Update test_forward.py
sandyhu533 7fee8c0
Update test_forward.py
sandyhu533 77a6553
Update test_forward.py
sandyhu533 89f6937
Merge branch 'relay_mx_np' of https://github.com/sandyhu533/incubator…
45e3e55
Update test_forward.py
sandyhu533 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,7 +27,8 @@ | |
from mxnet import gluon | ||
from mxnet.gluon.model_zoo import vision | ||
import model_zoo | ||
|
||
import random | ||
import pytest | ||
|
||
def verify_mxnet_frontend_impl(mx_symbol, | ||
data_shape=(1, 3, 224, 224), | ||
|
@@ -1410,6 +1411,221 @@ def verify(data_shape, axis, use_length, length): | |
verify((2, 3, 4), 2, True, np.array([[3, 4, 2], [1, 2, 1]]).astype('int32')) | ||
|
||
|
||
@pytest.mark.skipif(not hasattr(mx.sym.np, 'pad'), reason="mx.sym.np.pad hasn't been publish yet") | ||
@pytest.mark.parametrize( | ||
"data_shape, pad_width", | ||
[((1,1,3,5),(0,0,0,0,1,2,3,4)), ((1,1,3,5,7),(0,0,0,0,1,2,3,4,5,6))] | ||
) | ||
@pytest.mark.parametrize("mode", ["constant", "edge", "reflect"]) | ||
@pytest.mark.parametrize("dtype", ['float64', 'float32', 'int64', 'int32']) | ||
@pytest.mark.parametrize("constant_value", [0.0, 3.0]) | ||
def test_forward_npi_pad(data_shape, pad_width, mode, dtype, constant_value): | ||
data_np = np.random.uniform(size=data_shape).astype(dtype) | ||
data = mx.sym.var('data') | ||
if mode == 'constant': | ||
ref_res = mx.ndarray.pad(mx.nd.array(data_np), mode=mode,pad_width=pad_width, constant_value=constant_value) | ||
mx_sym = mx.sym.np.pad(data.as_np_ndarray(), mode=mode, pad_width=pad_width, constant_values=constant_value) | ||
else: | ||
ref_res = mx.ndarray.pad(mx.nd.array(data_np), mode=mode,pad_width=pad_width) | ||
mx_sym = mx.sym.np.pad(data.as_np_ndarray(), mode=mode, pad_width=pad_width) | ||
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype) | ||
for target, ctx in ctx_list(): | ||
for kind in ["debug"]: | ||
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) | ||
op_res = intrp.evaluate()(data_np) | ||
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) | ||
|
||
|
||
@pytest.mark.parametrize("data_shape", [(2,2,2),(2,7,2)]) | ||
@pytest.mark.parametrize("dtype", ['float64', 'float32', 'int64', 'int32', 'bool']) | ||
@pytest.mark.parametrize("axes", [(1,0,2),None]) | ||
def test_forward_npi_transpose(data_shape, axes, dtype): | ||
def verify(data_shape, axes=None): | ||
data_np = np.random.uniform(size=data_shape).astype(dtype) | ||
data = mx.sym.var('data') | ||
ref_res = mx.np.transpose(mx.np.array(data_np), axes=axes) | ||
mx_sym = mx.sym.np.transpose(data.as_np_ndarray(), axes=axes) | ||
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype) | ||
for target, ctx in ctx_list(): | ||
for kind in ["graph", "vm", "debug"]: | ||
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) | ||
op_res = intrp.evaluate()(data_np) | ||
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"data_shape1, data_shape2, axis", | ||
[((2,2),(2,2),1),((2,4),(2,3),1),((1,3,2),(1,3,5),2),((1,3,3),(1,3,3),1),((1,3),(1,3),0)] | ||
) | ||
@pytest.mark.parametrize("dtype", ['float64', 'float32', 'int64', 'int32']) | ||
def test_forward_npi_concatenate(data_shape1, data_shape2, axis, dtype): | ||
data_np1 = np.random.uniform(size=data_shape1).astype(dtype) | ||
data_np2 = np.random.uniform(size=data_shape2).astype(dtype) | ||
data1 = mx.sym.var('data1') | ||
data2 = mx.sym.var('data2') | ||
ref_res = mx.np.concatenate([mx.np.array(data_np1), mx.np.array(data_np2)], axis=axis) | ||
mx_sym = mx.sym.np.concatenate([data1.as_np_ndarray(), data2.as_np_ndarray()], axis=axis) | ||
mod, _ = relay.frontend.from_mxnet(mx_sym, shape={"data1": data_shape1, "data2": data_shape2}, dtype=dtype) | ||
for target, ctx in ctx_list(): | ||
for kind in ["graph", "vm", "debug"]: | ||
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) | ||
op_res = intrp.evaluate()(data_np1, data_np2) | ||
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) | ||
|
||
|
||
@pytest.mark.parametrize("data_shape", [(2,2,2),(2,7,2),(2,2,2,1,2,3,1),(1,8)]) | ||
@pytest.mark.parametrize("dtype", ['float64', 'float32', 'int64', 'int32', 'bool']) | ||
def test_forward_np_copy(data_shape,dtype): | ||
data_np = np.random.uniform(size=data_shape).astype(dtype) | ||
data = mx.sym.var('data') | ||
ref_res = mx.np.copy(mx.np.array(data_np)) | ||
mx_sym = mx.sym.np.copy(data.as_np_ndarray()) | ||
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype) | ||
for target, ctx in ctx_list(): | ||
for kind in ["graph", "vm", "debug"]: | ||
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) | ||
op_res = intrp.evaluate()(data_np) | ||
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) | ||
|
||
|
||
@pytest.mark.parametrize("dtype", ['float64', 'float32', 'int64', 'int32', 'bool']) | ||
def test_forward_npx_reshape(dtype): | ||
def verify(data_shape,out_shape,reverse=False): | ||
data_np = np.random.uniform(size=data_shape).astype(dtype) | ||
data = mx.sym.var('data') | ||
ref_res = mx.npx.reshape(mx.np.array(data_np), newshape=out_shape, reverse=reverse) | ||
mx_sym = mx.sym.npx.reshape(data.as_np_ndarray(), newshape=out_shape, reverse=reverse) | ||
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype) | ||
for target, ctx in ctx_list(): | ||
for kind in ["graph", "vm", "debug"]: | ||
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) | ||
op_res = intrp.evaluate()(data_np) | ||
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) | ||
|
||
verify(data_shape=(2, 3, 8), out_shape=(-2, -2, 2, -1)) | ||
verify(data_shape=(8, 3, 3, 3, 4, 4), out_shape=(-6, 2, -1, -4)) | ||
verify(data_shape=(8, 3, 3, 3, 4, 4), out_shape=(-5, -4)) | ||
verify(data_shape=(8, 3, 3, 3, 3, 8), out_shape=(-4, -5), reverse=True) | ||
verify(data_shape=(8, 3, 2, 4, 8), out_shape=(-4, -1, 2, -6), reverse=True) | ||
|
||
|
||
@pytest.mark.parametrize("data_shape", [(2,2,2),(2,7,2),(2,2,2,1,2,3,1),(1,8),(2,2),(1,3)]) | ||
@pytest.mark.parametrize("dtype", ['float64', 'float32', 'int64', 'int32']) | ||
def test_forward_npi_binary(data_shape,dtype): | ||
ref_ops = [mx.np.power, mx.np.multiply, mx.np.add, mx.np.less] | ||
mx_ops = [mx.sym.np.power, mx.sym.np.multiply, mx.sym.np.add, mx.sym.np.less] | ||
for i in range(len(ref_ops)): | ||
ref_op = ref_ops[i] | ||
mx_op = mx_ops[i] | ||
# mx.np.power only support float type | ||
if ref_op == mx.np.power and dtype not in ['float64', 'float32']: | ||
continue | ||
data_np1 = np.random.uniform(size=data_shape).astype(dtype) | ||
data_np2 = np.random.uniform(size=data_shape).astype(dtype) | ||
data1 = mx.sym.var('lhs') | ||
data2 = mx.sym.var('rhs') | ||
ref_res = ref_op(mx.np.array(data_np1), mx.np.array(data_np2)) | ||
mx_sym = mx_op(data1.as_np_ndarray(), data2.as_np_ndarray()) | ||
mod, _ = relay.frontend.from_mxnet(mx_sym, shape={"lhs": data_shape, "rhs": data_shape}, dtype=dtype) | ||
for target, ctx in ctx_list(): | ||
for kind in ["graph", "vm", "debug"]: | ||
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) | ||
op_res = intrp.evaluate()(data_np1, data_np2) | ||
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) | ||
|
||
|
||
@pytest.mark.parametrize("data_shape", [(2,2,2),(2,7,2),(2,2,2,1,2,3,1),(1,8),(2,2),(1,3)]) | ||
@pytest.mark.parametrize("dtype", ['float64', 'float32', 'int64', 'int32']) | ||
@pytest.mark.parametrize("scalar", [1.0,2.0,3.0,4.0]) | ||
def test_forward_npi_binary_scalar(data_shape,dtype,scalar): | ||
ref_ops = [mx.np.power, mx.np.multiply, mx.np.add, mx.np.true_divide] | ||
mx_ops = [mx.sym.np.power, mx.sym.np.multiply, mx.sym.np.add, mx.sym.np.true_divide] | ||
for i in range(len(ref_ops)): | ||
ref_op = ref_ops[i] | ||
mx_op = mx_ops[i] | ||
# mx.np.power only support float type | ||
if ref_op == mx.np.power and dtype not in ['float64', 'float32']: | ||
continue | ||
data_np1 = np.random.uniform(size=data_shape).astype(dtype) | ||
data1 = mx.sym.var('lhs') | ||
ref_res = ref_op(mx.np.array(data_np1), scalar) | ||
mx_sym = mx_op(data1.as_np_ndarray(), scalar) | ||
mod, _ = relay.frontend.from_mxnet(mx_sym, shape={"lhs": data_shape}, dtype=dtype) | ||
for target, ctx in ctx_list(): | ||
for kind in ["graph", "vm", "debug"]: | ||
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) | ||
op_res = intrp.evaluate()(data_np1) | ||
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) | ||
|
||
|
||
@pytest.mark.parametrize("data_shape", [(2,2,2),(2,7,2),(2,2,2,1,2,3,1),(1,8),(2,2),(1,3)]) | ||
@pytest.mark.parametrize("dtype", ['float64', 'float32']) | ||
def test_forward_npi_tanh(data_shape,dtype): | ||
data_np1 = np.random.uniform(size=data_shape).astype(dtype) | ||
data1 = mx.sym.var('data') | ||
ref_res = mx.np.tanh(mx.np.array(data_np1)) | ||
mx_sym = mx.sym.np.tanh(data1.as_np_ndarray()) | ||
mod, _ = relay.frontend.from_mxnet(mx_sym, shape={"data": data_shape}, dtype=dtype) | ||
for target, ctx in ctx_list(): | ||
for kind in ["graph", "vm", "debug"]: | ||
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) | ||
op_res = intrp.evaluate()(data_np1) | ||
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) | ||
|
||
|
||
@pytest.mark.skipif(not hasattr(mx.np, 'where'), reason="mx.np.where hasn't been publish yet") | ||
@pytest.mark.parametrize("data_shape", [(2,2,2),(2,7,2),(1,8),(2,2),(1,3)]) | ||
@pytest.mark.parametrize("cond_dtype", ['float64', 'float32', 'int64', 'int32', 'bool']) | ||
@pytest.mark.parametrize("data_dtype", ['float64', 'float32', 'int64', 'int32', 'bool']) | ||
@pytest.mark.parametrize("scalar", [1.0,2.0]) | ||
def test_forward_npi_where_rscalar(data_shape,cond_dtype,data_dtype,scalar): | ||
if data_dtype == 'bool': | ||
scalar = scalar == 0.0 | ||
cond_np = np.random.uniform(size=data_shape).astype(cond_dtype) | ||
data_np = np.random.uniform(size=data_shape).astype(data_dtype) | ||
cond = mx.sym.var('condition') | ||
data = mx.sym.var('x') | ||
ref_res = mx.np.where(mx.np.array(cond_np), mx.np.array(data_np), scalar) | ||
mx_sym = mx.sym.np.where(cond.as_np_ndarray(), data.as_np_ndarray(), scalar) | ||
dtypeDic = {} | ||
dtypeDic["condition"] = cond_dtype | ||
dtypeDic["x"] = data_dtype | ||
mod, _ = relay.frontend.from_mxnet( | ||
mx_sym, shape={"condition": data_shape, "x": data_shape}, | ||
dtype=dtypeDic) | ||
for target, ctx in ctx_list(): | ||
for kind in ["graph", "vm", "debug"]: | ||
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) | ||
op_res = intrp.evaluate()(cond_np, data_np) | ||
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) | ||
|
||
|
||
@pytest.mark.parametrize("dtype", ['float64', 'float32', 'int64', 'int32', 'bool']) | ||
def test_forward_split_v2(dtype): | ||
def verify(data_shape, axis=0, indices_or_sections=1, squeeze_axis=False): | ||
data_np = np.random.uniform(size=data_shape).astype(dtype) | ||
data = mx.sym.var('data') | ||
ref_res = mx.ndarray.split_v2(mx.nd.array(data_np), indices_or_sections, axis=axis, squeeze_axis=squeeze_axis) | ||
mx_sym = mx.sym.split_v2(data.as_nd_ndarray(), indices_or_sections, axis=axis, squeeze_axis=squeeze_axis) | ||
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype) | ||
for target, ctx in ctx_list(): | ||
for kind in ["graph", "vm", "debug"]: | ||
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) | ||
op_res = intrp.evaluate()(data_np) | ||
op_res_ = [] | ||
for arr in op_res: | ||
op_res_.append(arr.asnumpy().tolist()) | ||
ref_res_ = [] | ||
for arr in ref_res: | ||
ref_res_.append(arr.asnumpy().tolist()) | ||
tvm.testing.assert_allclose(op_res_, ref_res_, rtol=1e-5) | ||
|
||
verify((3, 2, 1), axis=1, indices_or_sections=2) | ||
verify((3, 2, 1), axis=0, indices_or_sections=3) | ||
verify((3, 2, 1), axis=0, indices_or_sections=3, squeeze_axis=True) | ||
verify((3, 2, 1), axis=0, indices_or_sections=(1, 2)) | ||
|
||
|
||
if __name__ == '__main__': | ||
test_forward_mlp() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. remove all the test_xxx function calls and add |
||
test_forward_vgg() | ||
|
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why is this one using debug only?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
maybe we can also use
@pytest.mark.parametrize
for ctx_list and kind.