Skip to content

Commit

Permalink
[Relay to onnx conversion][New ops] (#8436)
Browse files Browse the repository at this point in the history
* [Relay to Onnx conversion]

* added support for Sigmoid op
* added unit test

* [Relay to Onnx conversion][Copy]

* added support for Copy op
* added unit test

* [Relay to Onnx conversion][Round]

* added support for Round op
* added unit test

* [Relay to Onnx conversion][Cast]

* added support for Cast op
* added unit test

* [Relay to Onnx testing]

* fixed formatting

* * fixed formatting issues

* * fixed formatting issue in onnx.py

* [Relay to Onnx conversion][Conv2d Transpose]

* Added support for conv2d transpose operator
* Added unit test case. Unit test is similar to the conv2d unit test.

* * Fixed formatting errors
  • Loading branch information
schilkunda-amba authored Jul 12, 2021
1 parent c3558a1 commit 3424005
Show file tree
Hide file tree
Showing 2 changed files with 156 additions and 0 deletions.
29 changes: 29 additions & 0 deletions python/tvm/contrib/target/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import onnx
import onnx.utils
from onnx import numpy_helper, OperatorSetIdProto, defs
from onnx import TensorProto
import tvm
from tvm import relay
import tvm._ffi
Expand Down Expand Up @@ -138,6 +139,21 @@ def convert_attributes(cls, attrs):
}


class ConvTranspose(OpConverter):
"""Operator converter for ConvTranspose."""

@classmethod
def convert_attributes(cls, attrs):
return {
"group": attrs.get_int("groups"),
"pads": attrs.get_int_tuple("padding"),
"strides": attrs.get_int_tuple("strides"),
"dilations": attrs.get_int_tuple("dilation"),
"kernel_shape": attrs.get_int_tuple("kernel_size"),
"output_padding": attrs.get_int_tuple("output_padding"),
}


class MaxPool(OpConverter):
"""Operator converter for MaxPool."""

Expand Down Expand Up @@ -638,9 +654,18 @@ def convert_attributes(cls, attrs):
return {"alpha": attrs.alpha, "beta": attrs.beta, "bias": attrs.bias, "size": attrs.size}


class Cast(OpConverter):
""" Operator converter for Cast."""

@classmethod
def convert_attributes(cls, attrs):
return {"to": getattr(TensorProto, attrs.dtype.upper())}


relay_to_onnx_op_mapping = {
"reshape": Reshape,
"nn.conv2d": Conv,
"nn.conv2d_transpose": ConvTranspose,
"add": rename("Add"),
"nn.relu": rename("Relu"),
"transpose": Transpose,
Expand Down Expand Up @@ -672,6 +697,10 @@ def convert_attributes(cls, attrs):
"clip": Clip,
"expand_dims": Expand,
"nn.lrn": LRN,
"sigmoid": rename("Sigmoid"),
"copy": rename("Identity"),
"round": rename("Round"),
"cast": Cast,
}


Expand Down
127 changes: 127 additions & 0 deletions tests/python/contrib/test_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,60 @@ def verify_conv2d(
verify_conv2d("float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4))


def test_conv2d_transpose():
"""Conv2d_Transpose unit tests."""

def verify_conv2d_transpose(
dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs
):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d_transpose(
x, w, padding=padding, dilation=dilation, groups=groups, **attrs
)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
verify_results(func, [data, kernel], "test_conv2d_transpose", rtol=1e-5, atol=1e-5)

dshape = (1, 3, 224, 224)
kshape = (3, 10, 3, 3)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3)
)

dshape = (1, 3, 224, 224)
kshape = (3, 10, 3, 3)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(2, 2), channels=10, kernel_size=(3, 3)
)

dshape = (1, 3, 18, 18)
kshape = (3, 10, 2, 2)
verify_conv2d_transpose(
"float32",
1,
dshape,
kshape,
padding=(2, 2),
channels=10,
kernel_size=(2, 2),
dilation=(1, 1),
)

dshape = (1, 3, 18, 18)
kshape = (3, 10, 4, 4)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4)
)

dshape = (1, 3, 18, 18)
kshape = (3, 10, 4, 4)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4)
)


def test_reshape():
def verify_reshape(shape, newshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
Expand Down Expand Up @@ -519,6 +573,8 @@ def verify_expand_dims(dshape, axis, num_newaxis, dtype="float32"):


def test_lrn():
"""LRN unit test."""

def verify_lrn(xshape, size, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(xshape, dtype))
y = relay.nn.lrn(x, size=size, axis=1, alpha=1.0, beta=1.0, bias=1.0)
Expand All @@ -533,10 +589,77 @@ def verify_lrn(xshape, size, dtype="float32"):
verify_lrn(i, s)


def test_sigmoid():
"""Sigmoid unit test."""

def verify_sigmoid(dshape, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.sigmoid(x)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_sigmoid", rtol=1e-4, atol=1e-4)

isize = [(1, 3, 480, 640), (1, 3, 224, 224)]

for i in isize:
verify_sigmoid(i)


def test_copy():
"""Copy unit test."""

def verify_copy(dshape, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.copy(x)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_copy", rtol=1e-4, atol=1e-4)

isize = [(1, 3, 480, 640), (1, 3, 224, 224)]

for i in isize:
verify_copy(i)


def test_round():
"""Round unit test."""

def verify_round(dshape, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.round(x)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_round", rtol=1e-4, atol=1e-4)

isize = [(1, 3, 480, 640), (1, 3, 224, 224)]

for i in isize:
verify_round(i)


def test_cast():
"""Cast unit test."""

def verify_cast(dshape, dtype):
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.cast(x, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype("float32")
verify_results(func, [x_data], "test_cast", rtol=1e-4, atol=1e-4)

isize = [(1, 3, 480, 640), (1, 3, 224, 224)]
out_dtypes = ["int8", "int16", "uint8", "uint16"]

for i in isize:
for o_dtype in out_dtypes:
verify_cast(i, o_dtype)


if __name__ == "__main__":
test_add()
test_bias_add()
test_conv2d()
test_conv2d_transpose()
test_reshape()
test_transpose()
test_dense()
Expand All @@ -557,3 +680,7 @@ def verify_lrn(xshape, size, dtype="float32"):
test_clip()
test_expand_dims()
test_lrn()
test_sigmoid()
test_copy()
test_round()
test_cast()

0 comments on commit 3424005

Please sign in to comment.