Skip to content

Commit

Permalink
Fix documentation for bilinear upsampling and add unit test (apache#1…
Browse files Browse the repository at this point in the history
…4035)

* Bilinear upsampling documentation and test

* test trial

* Edit test

* Addressed review comments

* Fix lint error

* Test target shape
  • Loading branch information
vandanavk authored and haohuw committed Jun 23, 2019
1 parent 284aa41 commit f60d93d
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 11 deletions.
4 changes: 3 additions & 1 deletion src/operator/nn/upsampling-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,9 @@ struct UpSamplingParam : public dmlc::Parameter<UpSamplingParam> {
.set_range(1, 1000)
.describe("Up sampling scale");
DMLC_DECLARE_FIELD(num_filter)
.describe("Input filter. Only used by bilinear sample_type.")
.describe("Input filter. Only used by bilinear sample_type."
"Since bilinear upsampling uses deconvolution, num_filters "
"is set to the number of channels.")
.set_default(0);
DMLC_DECLARE_FIELD(sample_type)
.add_enum("nearest", up_enum::kNearest)
Expand Down
7 changes: 5 additions & 2 deletions src/operator/nn/upsampling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,9 @@ struct UpSamplingGrad {
DMLC_REGISTER_PARAMETER(UpSamplingParam);

NNVM_REGISTER_OP(UpSampling)
.describe("Performs nearest neighbor/bilinear up sampling to inputs.")
.describe("Performs nearest neighbor/bilinear up sampling to inputs. "
"Bilinear upsampling makes use of deconvolution. Therefore, "
"provide 2 inputs - data and weight. ")
.set_num_inputs([](const NodeAttrs& attrs) {
const UpSamplingParam& params = nnvm::get<UpSamplingParam>(attrs.parsed);
return params.sample_type == up_enum::kNearest ? params.num_args : 2;
Expand Down Expand Up @@ -149,7 +151,8 @@ NNVM_REGISTER_OP(UpSampling)
.set_attr<FCompute>("FCompute<cpu>", UpSamplingCompute<cpu>)
.set_attr<nnvm::FGradient>("FGradient", UpSamplingGrad{"_backward_UpSampling"})
.set_attr<std::string>("key_var_num_args", "num_args")
.add_argument("data", "NDArray-or-Symbol[]", "Array of tensors to upsample")
.add_argument("data", "NDArray-or-Symbol[]", "Array of tensors to upsample. "
"For bilinear upsampling, there should be 2 inputs - 1 data and 1 weight.")
.add_arguments(UpSamplingParam::__FIELDS__())
.set_attr<nnvm::FSetInputVarAttrOnCompose>("FSetInputVarAttrOnCompose",
[](const nnvm::NodeAttrs& attrs, nnvm::NodePtr var, const int index) {
Expand Down
47 changes: 39 additions & 8 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -1526,17 +1526,32 @@ def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)


def check_bilinear_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}

up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='bilinear', scale=root_scale)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr

up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}

arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape


@with_seed()
Expand All @@ -1549,6 +1564,22 @@ def test_nearest_upsampling():
check_nearest_upsampling_with_shape(shapes, scale, root_scale)


@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)

@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
Expand Down

0 comments on commit f60d93d

Please sign in to comment.