Skip to content

Commit

Permalink
Implement 1d deconvolution (apache#4476)
Browse files Browse the repository at this point in the history
  • Loading branch information
alexgl-github authored and zhiics committed Dec 31, 2019
1 parent 73fc1ac commit 93c8d5b
Show file tree
Hide file tree
Showing 19 changed files with 853 additions and 14 deletions.
58 changes: 58 additions & 0 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,64 @@ struct Conv2DTransposeAttrs : public tvm::AttrsNode<Conv2DTransposeAttrs> {
}
};

/*! \brief Attributes used in 1D transposed convolution operator */
struct Conv1DTransposeAttrs : public tvm::AttrsNode<Conv1DTransposeAttrs> {
IndexExpr channels;
Array<IndexExpr> kernel_size;
Array<IndexExpr> strides;
Array<IndexExpr> padding;
Array<IndexExpr> output_padding;
Array<IndexExpr> dilation;
int groups;
std::string data_layout;
std::string kernel_layout;
std::string out_layout;
DataType out_dtype;

TVM_DECLARE_ATTRS(Conv1DTransposeAttrs, "relay.attrs.Conv1DTransposeAttrs") {
TVM_ATTR_FIELD(channels)
.set_default(NullValue<IndexExpr>())
.describe("The dimensionality of the output space"
"i.e. the number of output channels in the convolution.");
TVM_ATTR_FIELD(kernel_size)
.describe("The dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
TVM_ATTR_FIELD(strides).set_default(Array<IndexExpr>({1}))
.describe("The strides of the convolution.");
TVM_ATTR_FIELD(output_padding).set_default(Array<IndexExpr>({0}))
.describe("Zero-padding added to one side of the output.");
TVM_ATTR_FIELD(padding).set_default(Array<IndexExpr>({0}))
.describe("Symmetric or asymmetric padding."
"Single value: the input is implicitly zero-padded on both sides."
"Two values: padding[0] is used for left input padding, "
"padding[1] is used for right input padding,");
TVM_ATTR_FIELD(dilation).set_default(Array<IndexExpr>({1}))
.describe("Specifies the dilation rate to use for dilated convolution.");
TVM_ATTR_FIELD(groups).set_default(1)
.describe("Controls the connections between inputs and outputs."
"At groups=1, all inputs are convolved to all outputs."
"At groups=2, the operation becomes equivalent to having two convolution"
"layers side by side, each seeing half the input channels, and producing"
"half the output channels, and both subsequently concatenated.");
TVM_ATTR_FIELD(data_layout).set_default("NCW")
.describe("Dimension ordering of data. Can be 'NCW', 'NWC', etc."
"'N', 'C', 'W' stands for batch, channel, and width"
"dimensions respectively. Convolution is applied on the"
"'W' dimension.");
TVM_ATTR_FIELD(kernel_layout).set_default("OIW")
.describe("Dimension ordering of data and weight. Can be 'OIW', 'OIW16o16i', etc."
"'O', 'I', 'W' stands for num_filter, input_channel, and width"
"dimensions respectively.");
TVM_ATTR_FIELD(out_layout).set_default("")
.describe("Dimension ordering of output. Can be 'NCW', 'NWC', etc."
"'N', 'C', 'W' stands for batch, channel, and width"
"dimensions respectively. Default to be same as input layout.");
TVM_ATTR_FIELD(out_dtype)
.set_default(NullValue<DataType>())
.describe("Output data type, set to explicit type under mixed precision setting");
}
};

/*! \brief Attributes for max pool operator */
struct MaxPool2DAttrs : public tvm::AttrsNode<MaxPool2DAttrs> {
Array<IndexExpr> pool_size;
Expand Down
1 change: 1 addition & 0 deletions python/tvm/autotvm/task/relay_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ def extract_from_multiple_program(funcs, params, ops, target, target_host=None,
tvm.relay.op.nn.dense: [topi.nn.dense],
tvm.relay.op.nn.batch_matmul: [topi.nn.batch_matmul],
tvm.relay.op.nn.deformable_conv2d: [topi.nn.deformable_conv2d_nchw],
tvm.relay.op.nn.conv1d_transpose: [topi.nn.conv1d_transpose_ncw],
}

topi_funcs = []
Expand Down
12 changes: 12 additions & 0 deletions python/tvm/autotvm/task/topi_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ def __init__(self, allow_duplicate=False):
topi.nn.bitserial_conv2d_nhwc: "topi_nn_bitserial_conv2d_nhwc",
topi.nn.bitserial_dense: "topi_nn_bitserial_dense",
topi.nn.deformable_conv2d_nchw: "topi_nn_deformable_conv2d_nchw",
topi.nn.conv1d_transpose_ncw: "topi_nn_conv1d_transpose_ncw",
}

self.topi_to_schedule = {
Expand All @@ -109,6 +110,7 @@ def __init__(self, allow_duplicate=False):
topi.nn.bitserial_conv2d_nhwc: [topi.generic.schedule_bitserial_conv2d_nhwc],
topi.nn.bitserial_dense: [topi.generic.schedule_bitserial_dense],
topi.nn.deformable_conv2d_nchw: [topi.generic.schedule_deformable_conv2d_nchw],
topi.nn.conv1d_transpose_ncw: [topi.generic.schedule_conv1d_transpose_ncw],
}

# function reflection for tracing
Expand All @@ -125,6 +127,7 @@ def __init__(self, allow_duplicate=False):
topi.nn.bitserial_conv2d_nhwc: lambda x: setattr(topi.nn, 'bitserial_conv2d_nhwc', x),
topi.nn.bitserial_dense: lambda x: setattr(topi.nn, 'bitserial_dense', x),
topi.nn.deformable_conv2d_nchw: lambda x: setattr(topi.nn, 'deformable_conv2d_nchw', x),
topi.nn.conv1d_transpose_ncw: lambda x: setattr(topi.nn, 'conv1d_transpose_ncw', x),
}

self.allow_duplicate = allow_duplicate
Expand Down Expand Up @@ -214,6 +217,15 @@ def _topi_nn_conv2d_transpose_nchw(*args, **kwargs):
s = topi.generic.schedule_conv2d_transpose_nchw([C])
return s, [A, W, C]

@register("topi_nn_conv1d_transpose_ncw")
def _topi_nn_conv1d_transpose_ncw(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
C = topi.nn.conv1d_transpose_ncw(*args, **kwargs)
s = topi.generic.schedule_conv1d_transpose_ncw([C])
return s, [A, W, C]

@register("topi_nn_dense")
def _topi_nn_dense(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
Expand Down
1 change: 1 addition & 0 deletions python/tvm/relay/_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ def __call__(self, args, attrs, type_args):
"nn.softmax": op.nn.softmax,
"reshape": op.reshape,
"nn.conv2d_transpose": op.nn.conv2d_transpose,
"nn.conv1d_transpose": op.nn.conv1d_transpose,
"concatenate": op.concatenate,
"nn.dropout": op.nn.dropout_raw,
"zeros": op.zeros,
Expand Down
20 changes: 7 additions & 13 deletions python/tvm/relay/frontend/mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,29 +207,23 @@ def _mx_conv1d_transpose(inputs, attrs):
if data_layout != "NCW":
raise tvm.error.OpAttributeInvalid(
'Only "NCW" data layout is supported for 1D Convolution')
data_layout = "NCHW"
channel_axis = 1
kernel_layout = "OIHW"

kernel_layout = "OIW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = (1,) + attrs.get_int_tuple("kernel")
new_attrs["strides"] = (1,) + attrs.get_int_tuple("stride", (1,))
new_attrs["output_padding"] = (0,) + attrs.get_int_tuple("adj", (0,))
new_attrs["padding"] = (0,) + attrs.get_int_tuple("pad", (0,))
new_attrs["dilation"] = (1,) + attrs.get_int_tuple("dilate", (1,))
new_attrs["kernel_size"] = attrs.get_int_tuple("kernel")
new_attrs["strides"] = attrs.get_int_tuple("stride", (1,))
new_attrs["output_padding"] = attrs.get_int_tuple("adj", (0,))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0,))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1,))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", True)
data = _op.expand_dims(inputs[0], axis=2)
kernel = _op.expand_dims(inputs[1], axis=2)
res = _op.nn.conv2d_transpose(data, kernel, **new_attrs)

res = _op.nn.conv1d_transpose(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
res = _op.squeeze(res, axis=[2])
return res


Expand Down
31 changes: 31 additions & 0 deletions python/tvm/relay/op/nn/_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,37 @@ def legalize_conv2d_transpose(attrs, inputs, types):

reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)

# conv1d_transpose
@reg.register_compute("nn.conv1d_transpose")
def compute_conv1d_transpose(attrs, inputs, out_dtype, target):
"""Compute definition of conv1d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
assert layout == "NCW", "conv1d_transpose ncw only supported"
assert dilation == (1,), "conv1d_transpose dilation is not supported"
assert groups == 1, "conv1d_transpose groups == 1 only supported"
out = topi.nn.conv1d_transpose_ncw(
inputs[0], inputs[1], strides, padding, out_dtype)
output_padding = get_const_tuple(attrs.output_padding)
out = topi.nn.pad(out,
[0, 0, 0], [0, 0, output_padding[0]])
return [out]


@reg.register_schedule("nn.conv1d_transpose")
def schedule_conv1d_transpose(attrs, outs, target):
"""Schedule definition of conv1d_transpose"""
with target:
return topi.generic.schedule_conv1d_transpose_ncw(outs)

reg.register_pattern("nn.conv1d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)

# bias_add
reg.register_schedule("nn.bias_add", schedule_injective)
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)
Expand Down
66 changes: 66 additions & 0 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,72 @@ def conv2d_transpose(data,
kernel_layout, out_layout, output_padding, out_dtype)


def conv1d_transpose(data,
weight,
strides=(1,),
padding=(0,),
dilation=(1,),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCW",
kernel_layout="OIW",
out_layout="",
output_padding=(0,),
out_dtype=""):
"""One dimensional transposed convolution operator.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : Tuple[int], optional
The strides of convolution.
padding : Tuple[int], optional
The padding of convolution on both sides of inputs.
dilation : Tuple[int], optional
Specifies the dilation rate to be used for dilated convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
groups : int, optional
Number of groups for grouped convolution.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : Optional[str]
Layout of the output, by default, out_layout is the same as data_layout
output_padding : Tuple[int], optional
Additional zero-padding to be added to one side of the output.
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.conv1d_transpose(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
kernel_layout, out_layout, output_padding, out_dtype)


def softmax(data, axis=-1):
r"""Computes softmax.
Expand Down
Loading

0 comments on commit 93c8d5b

Please sign in to comment.