Skip to content

Commit

Permalink
Dynamic ONNX importer: Upsampling and Pad (#2)
Browse files Browse the repository at this point in the history
  • Loading branch information
Lily Orth-Smith authored and Matthew Brookhart committed Sep 3, 2020
1 parent 569ed7f commit 66ba19a
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 42 deletions.
79 changes: 46 additions & 33 deletions python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@

__all__ = ['from_onnx']


class onnx_input():
""" Dual purpose list or dictionary access object."""
def __init__(self):
Expand Down Expand Up @@ -127,7 +126,6 @@ def revert_caffe2_pad(pads):
raise tvm.error.OpAttributeInvalid('Number of pads must be either 2 or 4.')
return pads


def get_pad_pair(input1d, kernel1d, stride1d):
"""infer pad size"""
if input1d % stride1d == 0:
Expand Down Expand Up @@ -641,26 +639,22 @@ def _impl_v2(cls, inputs, attr, params):

@classmethod
def _impl_v11(cls, inputs, attr, params):
pad_width = []
pads = infer_value_simulated(inputs[1], params).asnumpy()
pads = inputs[1]
if len(inputs) == 3:
value = infer_value_simulated(inputs[2], params).asnumpy().item()
value = _op.take(inputs[2], _op.const(0))
else:
value = 0
attr["pad_value"] = value
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i + dims]))
attr['pad_width'] = pad_width

pads_shape = infer_shape(pads)
dims = int(pads_shape[0] / 2)
pad_width_expr = _op.transpose(_op.reshape(pads, (2, dims)))
pad_mode = attr.get('mode', b'constant').decode('utf-8')
if pad_mode in ['constant', 'edge', 'reflect']:
attr['pad_mode'] = pad_mode
attr.pop('mode', None)
else:

if not pad_mode in ['constant', 'edge', 'reflect']:
raise tvm.error.OpAttributeInvalid('Value ' + pad_mode +
' in attribute "mode" is invalid for operator Pad.')

return AttrCvt('pad')(inputs[:1], attr, params)
return _op.nn.pad(inputs[0], pad_width_expr, value, pad_mode=pad_mode)


class ParametricSoftPlus(OnnxOpConverter):
Expand Down Expand Up @@ -868,17 +862,24 @@ class Upsample(OnnxOpConverter):
@classmethod
def _impl_v9(cls, inputs, attr, params):
scales = attr.get('scales')

input_shape = infer_shape(inputs[0])
dims = len(input_shape)

if not scales:
#Here we are going to higher OPSET version.
assert len(inputs) == 2, "Upsample op take 2 inputs, {} given".format(len(inputs))
assert len(inputs) == 2, "Upsample op takes 2 inputs, {} given".format(len(inputs))

if get_name(inputs[1]) in params:
scales = params[inputs[1].name_hint].asnumpy()
else:
elif dims == 5:
scales = infer_value_simulated(inputs[1], params).asnumpy()
inputs = inputs[:1]
assert scales[0] == 1.0 and scales[1] == 1.0
input_shape = infer_shape(inputs[0])
dims = len(input_shape)
else:
scales = inputs[1]

if not isinstance(scales, Call):
assert scales[0] == 1.0 and scales[1] == 1.0

mode = attr.get('mode')
if mode == b'nearest':
method = "nearest_neighbor"
Expand All @@ -887,21 +888,31 @@ def _impl_v9(cls, inputs, attr, params):
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Upsample is not valid.'.format(mode))
attr = {'scale_h': scales[-2], 'scale_w': scales[-1], 'method': method}

if method == 'nearest_neighbor':
align_corners=False
else:
align_corners=True
# in 3d case, we use the purely static op
if dims == 5:
assert len(scales) == 5
attr['scale_d'] = scales[-3]
attr['layout'] = 'NCDHW'
op_name = 'upsampling3d'
scale_h = scales[-2]
scale_w = scales[-1]
scale_d = scales[-3]
layout = 'NCDHW'
return _op.nn.upsampling3d(inputs[0], scale_d, scale_h, scale_w,
layout=layout, method=method)
# in 2d case, use dynamic op
else:
assert len(scales) == 4
attr['layout'] = 'NCHW'
if method == 'nearest_neighbor':
attr['align_corners'] = False
if isinstance(scales, Call):
scale_h = _op.take(scales, _op.const(3))
scale_w = _op.take(scales, _op.const(4))
else:
attr['align_corners'] = True
op_name = 'upsampling'
return AttrCvt(op_name)(inputs, attr)
assert len(scales) == 4
scale_h = scales[-2]
scale_w = scales[-1]
layout = 'NCHW'

return _op.nn.upsampling(inputs[0], scale_h, scale_w, layout=layout, method=method, align_corners=align_corners)


class Shape(OnnxOpConverter):
Expand Down Expand Up @@ -2289,3 +2300,5 @@ def from_onnx(model, shape=None, dtype="float32", opset=None, freeze_params=Fals
opset = 1
mod, params = g.from_onnx(graph, opset, freeze_params)
return mod, params


17 changes: 8 additions & 9 deletions tests/python/frontend/onnx/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -988,11 +988,9 @@ def _test_upsample_bilinear_opset9():
graph, producer_name='upsample_bilinear_opset9_test')

for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(
model, in_array, target, ctx, out_shape, 'float32')
tvm_out = get_tvm_output_with_vm(model, [in_array], target, ctx, opset=9, freeze_params=True)
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)


def _test_upsample3d_trilinear():
scale = 2
in_shape = (1, 1, 3, 3, 3)
Expand Down Expand Up @@ -1026,7 +1024,8 @@ def _test_upsample3d_trilinear():
model, in_array, target, ctx, out_shape, 'float32')
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)

@tvm.testing.uses_gpu
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_upsample():
_test_upsample_nearest()
_test_upsample_bilinear()
Expand Down Expand Up @@ -1419,7 +1418,7 @@ def verify_pad_v11(indata, pads, mode='constant', value=0.0):
outputs=[helper.make_tensor_value_info("output",
TensorProto.FLOAT, list(outdata.shape))])
else:
inputs = [indata, pads, np.array([value])]
inputs = [indata, pads, np.array([value]).astype("float32")]
outdata = np.pad(indata, pad_width=np_pads,
mode='constant', constant_values=value)
node = helper.make_node(
Expand All @@ -1435,7 +1434,7 @@ def verify_pad_v11(indata, pads, mode='constant', value=0.0):
helper.make_tensor_value_info("pads",
TensorProto.INT64,(len(pads),)),
helper.make_tensor_value_info("constant_value",
TensorProto.INT64,(1,)),
TensorProto.FLOAT,(1,)),
],
initializer=[helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads),
helper.make_tensor("constant_value", TensorProto.FLOAT, (1,), [value])],
Expand All @@ -1444,12 +1443,12 @@ def verify_pad_v11(indata, pads, mode='constant', value=0.0):
model = helper.make_model(graph, producer_name='pad_test')
# tvm result
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(
model, inputs, target, ctx, outdata.shape, 'float32', opset=11)
tvm_out = get_tvm_output_with_vm(model, inputs, target, ctx, opset=11, freeze_params=False)
tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)


@tvm.testing.uses_gpu
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_pad():
verify_pad(np.random.randn(2, 2).astype(
np.float32), [0, 1, 0, 0], 'constant', 0.0)
Expand Down

0 comments on commit 66ba19a

Please sign in to comment.