diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index 6b238209d4ac2..ef94266b4ebe1 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -1424,16 +1424,18 @@ static void Interpolate1DInferShapeCheck( if (scale_tensor) { auto scale_tensor_dim = scale_tensor.dims(); PADDLE_ENFORCE_EQ( - scale_tensor_dim.size(), - 1, + scale_tensor_dim.size() == 1 || scale_tensor_dim.size() == 0, + true, phi::errors::InvalidArgument( - "Scale's dimension size must be 1, but got dimension = %d .", + "Scale's dimension size must be 1 or 0, but got dimension = %d .", scale_tensor_dim.size())); - PADDLE_ENFORCE_EQ(scale_tensor_dim[0], - 1, - phi::errors::InvalidArgument( - "Scale's shape must be 1, but got shape = %d .", - scale_tensor_dim[0])); + if (scale_tensor_dim.size() == 1) { + PADDLE_ENFORCE_EQ(scale_tensor_dim[0], + 1, + phi::errors::InvalidArgument( + "Scale's shape must be 1, but got shape = %d .", + scale_tensor_dim[0])); + } out_w_tmp = -1; } else { if (scale.size() > 0) { @@ -1550,19 +1552,25 @@ static void Interpolate2DInferShapeCheck( } int out_h_tmp, out_w_tmp; + if (scale_tensor) { auto scale_tensor_dim = scale_tensor.dims(); PADDLE_ENFORCE_EQ( - scale_tensor_dim.size(), - 1, + scale_tensor_dim.size() == 1 || scale_tensor_dim.size() == 0, + true, phi::errors::InvalidArgument( - "Scale's dimension size must be 1, but got dimension = %d .", + "Scale's dimension size must be 1 or 0, but got dimension = %d .", scale_tensor_dim.size())); - PADDLE_ENFORCE_EQ(scale_tensor_dim[0] == 2 || scale_tensor_dim[0] == 1, - true, - phi::errors::InvalidArgument( - "Scale's shape must be 2 or 1, but got shape = %d .", - scale_tensor_dim[0])); + + if (scale_tensor_dim.size() == 1) { + PADDLE_ENFORCE_EQ( + scale_tensor_dim[0] == 2 || scale_tensor_dim[0] == 1, + true, + phi::errors::InvalidArgument( + "Scale's shape must be 2 or 1, but got shape = %d .", + scale_tensor_dim[0])); + } + out_h_tmp = -1; out_w_tmp = -1; } else { @@ -1695,10 +1703,10 @@ static void Interpolate3DInferShapeCheck( if (scale_tensor) { auto scale_tensor_dim = scale_tensor.dims(); PADDLE_ENFORCE_EQ( - scale_tensor_dim.size(), - 1, + scale_tensor_dim.size() == 1 || scale_tensor_dim.size() == 0, + true, phi::errors::InvalidArgument( - "Scale's dimension size must be 1, but got size = %d .", + "Scale's dimension size must be 1 or 0, but got size = %d .", scale_tensor_dim.size())); PADDLE_ENFORCE_EQ(scale_tensor_dim[0] == 3 || scale_tensor_dim[0] == 1, true, diff --git a/paddle/phi/kernels/funcs/interpolate_function.h b/paddle/phi/kernels/funcs/interpolate_function.h index 89b02317f3e95..53b0577fc29d7 100644 --- a/paddle/phi/kernels/funcs/interpolate_function.h +++ b/paddle/phi/kernels/funcs/interpolate_function.h @@ -85,12 +85,14 @@ inline std::vector get_new_shape( std::vector vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; - PADDLE_ENFORCE_EQ( - tensor->dims(), - phi::make_ddim({1}), - errors::InvalidArgument("The shape of dimension tensor should be [1]," - "but received d%.", - tensor->dims())); + PADDLE_ENFORCE_EQ(tensor->dims() == phi::make_ddim({1}) || + tensor->dims() == phi::make_ddim({}), + true, + errors::InvalidArgument( + "The shape of dimension tensor should be [1] or []," + "but received d%.", + tensor->dims())); + #ifdef PADDLE_WITH_XPU if (tensor->place().GetType() == phi::AllocationType::XPU) { DenseTensor temp; diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py index ed7b1375e54aa..f274752c1c875 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py @@ -816,5 +816,80 @@ def test_main(self): np.testing.assert_allclose(x_g_np_1, x_g_np_2, atol=1e-2, rtol=1e-2) +class TestBilinearInterpOpAPI_0DTensorScale(unittest.TestCase): + def test_case(self): + import paddle + + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + else: + place = core.CPUPlace() + with fluid.dygraph.guard(place): + input_data = np.random.random((2, 3, 6, 6)).astype("float32") + input_x = paddle.to_tensor(input_data) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + scale_0d = paddle.full([], 2) + out = interpolate( + x=input_x, + scale_factor=scale_0d, + mode="bilinear", + align_corners=False, + ) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) + + +class TestBilinearInterpOpAPI_0DTensorScale2(unittest.TestCase): + def test_case(self): + import paddle + + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + else: + place = core.CPUPlace() + with fluid.dygraph.guard(place): + input_data = np.random.random((2, 3, 6, 6)).astype("float32") + input_x = paddle.to_tensor(input_data) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + scale_0d = [paddle.full([], 2), paddle.full([], 2)] + out = interpolate( + x=input_x, + scale_factor=scale_0d, + mode="bilinear", + align_corners=False, + ) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) + + +class TestBilinearInterpOpAPI_0DTensorOutSize(unittest.TestCase): + def test_case(self): + import paddle + + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + else: + place = core.CPUPlace() + with fluid.dygraph.guard(place): + input_data = np.random.random((2, 3, 6, 6)).astype("float32") + input_x = paddle.to_tensor(input_data) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + output_size = [ + paddle.full([], 12, dtype="int32"), + paddle.full([], 12, dtype="int32"), + ] + out = interpolate( + x=input_x, + size=output_size, + mode="bilinear", + align_corners=False, + ) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py index 11d85b52446b2..2d07ab31334df 100644 --- a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py @@ -1388,6 +1388,72 @@ def test_atan2(self): self.assertEqual(x1.grad.numpy(), 0.5) self.assertEqual(x2.grad.numpy(), 0) + def test_interpolate(self): + from paddle.nn.functional import interpolate + + input_x = paddle.rand([2, 3, 6, 6]) + input_x.stop_gradient = False + origin_result = interpolate( + x=input_x, size=[12, 12], mode="bilinear", align_corners=False + ) + + output_size = [ + paddle.full([], 12, dtype="int32"), + paddle.full([], 12, dtype="int32"), + ] + out1 = interpolate( + x=input_x, size=output_size, mode="bilinear", align_corners=False + ) + out1.backward() + + self.assertEqual(out1.shape, [2, 3, 12, 12]) + self.assertEqual(input_x.grad.shape, [2, 3, 6, 6]) + + scale_1 = [paddle.full([], 2), paddle.full([], 2)] + out2 = interpolate( + x=input_x, + scale_factor=scale_1, + mode="bilinear", + align_corners=False, + ) + out2.backward() + + self.assertEqual(out2.shape, [2, 3, 12, 12]) + self.assertEqual(input_x.grad.shape, [2, 3, 6, 6]) + + scale_2 = paddle.full([], 2) + out3 = interpolate( + x=input_x, + scale_factor=scale_2, + mode="bilinear", + align_corners=False, + ) + out3.backward() + + # for coverage + scale_3 = paddle.full([1], 2) + input_3d = paddle.rand([2, 3, 6]) + out4 = interpolate( + x=input_3d, + scale_factor=scale_3, + mode="LINEAR", + align_corners=False, + data_format="NCW", + ) + + self.assertEqual(out3.shape, [2, 3, 12, 12]) + self.assertEqual(input_x.grad.shape, [2, 3, 6, 6]) + + np.testing.assert_allclose( + origin_result.numpy(), out1.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + origin_result.numpy(), out2.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + origin_result.numpy(), out3.numpy(), rtol=1e-05 + ) + def test_maseked_select(self): x = paddle.rand([]) x.stop_gradient = False @@ -2223,6 +2289,41 @@ def test_atan2(self): self.assertEqual(res[0].shape, ()) + @prog_scope() + def test_interpolate(self): + from paddle.nn.functional import interpolate + + input_x = paddle.rand([2, 3, 6, 6]) + input_x.stop_gradient = False + + output_size = [ + paddle.full([], 12, dtype="int32"), + paddle.full([], 12, dtype="int32"), + ] + + out1 = interpolate( + x=input_x, size=output_size, mode="bilinear", align_corners=False + ) + paddle.static.append_backward(out1.sum()) + prog = paddle.static.default_main_program() + res1 = self.exe.run(prog, feed={}, fetch_list=[out1, input_x.grad_name]) + + scale_1 = paddle.full([], 2) + out2 = interpolate( + x=input_x, + scale_factor=scale_1, + mode="bilinear", + align_corners=False, + ) + paddle.static.append_backward(out2.sum()) + prog = paddle.static.default_main_program() + res2 = self.exe.run(prog, feed={}, fetch_list=[out2, input_x.grad_name]) + + self.assertEqual(res1[0].shape, (2, 3, 12, 12)) + self.assertEqual(res1[1].shape, (2, 3, 6, 6)) + self.assertEqual(res2[0].shape, (2, 3, 12, 12)) + self.assertEqual(res2[1].shape, (2, 3, 6, 6)) + @prog_scope() def test_maseked_select(self): x = paddle.rand([]) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py index c0597d0ad53ea..f6f64aefe9db7 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py @@ -883,6 +883,61 @@ def test_allclose(self): y = paddle.full([], 0.6) self.assertFalse(paddle.allclose(x, y)) + def test_interpolate(self): + from paddle.nn.functional import interpolate + + input_x = paddle.rand([2, 3, 6, 6]) + input_x.stop_gradient = False + origin_result = interpolate( + x=input_x, size=[12, 12], mode="bilinear", align_corners=False + ) + + output_size = [ + paddle.full([], 12, dtype="int32"), + paddle.full([], 12, dtype="int32"), + ] + out1 = interpolate( + x=input_x, size=output_size, mode="bilinear", align_corners=False + ) + out1.backward() + + self.assertEqual(out1.shape, [2, 3, 12, 12]) + self.assertEqual(input_x.grad.shape, [2, 3, 6, 6]) + + scale_1 = [paddle.full([], 2), paddle.full([], 2)] + out2 = interpolate( + x=input_x, + scale_factor=scale_1, + mode="bilinear", + align_corners=False, + ) + out2.backward() + + self.assertEqual(out2.shape, [2, 3, 12, 12]) + self.assertEqual(input_x.grad.shape, [2, 3, 6, 6]) + + scale_2 = paddle.full([], 2) + out3 = interpolate( + x=input_x, + scale_factor=scale_2, + mode="bilinear", + align_corners=False, + ) + out3.backward() + + self.assertEqual(out3.shape, [2, 3, 12, 12]) + self.assertEqual(input_x.grad.shape, [2, 3, 6, 6]) + + np.testing.assert_allclose( + origin_result.numpy(), out1.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + origin_result.numpy(), out2.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + origin_result.numpy(), out3.numpy(), rtol=1e-05 + ) + def test_equalall(self): x = paddle.full([], 0.5) y = paddle.full([], 0.6) diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index d9f5b0b160dc0..57a1e0023d4fc 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import numpy + import paddle from paddle import _C_ops, _legacy_C_ops from paddle.fluid.layer_helper import LayerHelper @@ -102,6 +104,10 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): y = F.unfold(x, [3, 3], 1, 1, 1) """ + helper = LayerHelper("unfold", **locals()) + + check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold') + assert len(x.shape) == 4, "input should be the format of [N, C, H, W]" if isinstance(kernel_sizes, int): @@ -145,9 +151,6 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): if in_dygraph_mode(): return _C_ops.unfold(x, kernel_sizes, strides, paddings, dilations) - helper = LayerHelper("unfold", **locals()) - - check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="unfold", @@ -432,9 +435,12 @@ def interpolate( ): if len(size) == 0: raise ValueError("output size can not be empty") + if size is None: + raise ValueError("output size can not be None in AREA mode") if len(x.shape) == 3: return paddle.nn.functional.adaptive_avg_pool1d(x, size) elif len(x.shape) == 4: + print("size :", size) return paddle.nn.functional.adaptive_avg_pool2d(x, size) elif len(x.shape) == 5: return paddle.nn.functional.adaptive_avg_pool3d(x, size) @@ -494,9 +500,10 @@ def _is_list_or_turple_(data): out_shape = list(out_shape.numpy()) else: out_shape = list(out_shape) + for i, dim in enumerate(out_shape): if isinstance(dim, Variable): - out_shape[i] = dim.numpy()[0] + out_shape[i] = dim.numpy().item() if not (_is_list_or_turple_(out_shape)): raise TypeError("size should be a list or tuple or Variable.") # Validate the shape @@ -568,11 +575,18 @@ def _is_list_or_turple_(data): else: if in_dynamic_mode() and isinstance(scale, Variable): - scale = list(scale.numpy()) + if scale.shape == []: + scale = float(scale) + else: + scale = list(scale.numpy()) if isinstance(scale, Variable): scale.stop_gradient = True inputs["Scale"] = scale - elif isinstance(scale, float) or isinstance(scale, int): + elif ( + isinstance(scale, float) + or isinstance(scale, int) + or isinstance(scale, numpy.ndarray) + ): if scale <= 0: raise ValueError("Attr(scale) should be greater than zero.") scale_list = [] @@ -2253,6 +2267,11 @@ def fold( # y.shape = [2,3,4,5] """ + + helper = LayerHelper("fold", **locals()) + + check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fold') + assert len(x.shape) == 3, "input should be the format of [N, C, L]" def _is_list_or_turple_(data): @@ -2322,9 +2341,6 @@ def _is_list_or_turple_(data): dilations, ) else: - helper = LayerHelper("fold", **locals()) - - check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fold') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="fold",