Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

support 0d tensor for interpolate #49929

Merged
merged 9 commits into from
Jan 31, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 27 additions & 19 deletions paddle/phi/infermeta/multiary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1424,16 +1424,18 @@ static void Interpolate1DInferShapeCheck(
if (scale_tensor) {
auto scale_tensor_dim = scale_tensor.dims();
PADDLE_ENFORCE_EQ(
scale_tensor_dim.size(),
1,
scale_tensor_dim.size() == 1 || scale_tensor_dim.size() == 0,
true,
phi::errors::InvalidArgument(
"Scale's dimension size must be 1, but got dimension = %d .",
"Scale's dimension size must be 1 or 0, but got dimension = %d .",
scale_tensor_dim.size()));
PADDLE_ENFORCE_EQ(scale_tensor_dim[0],
1,
phi::errors::InvalidArgument(
"Scale's shape must be 1, but got shape = %d .",
scale_tensor_dim[0]));
if (scale_tensor_dim.size() == 1) {
PADDLE_ENFORCE_EQ(scale_tensor_dim[0],
1,
phi::errors::InvalidArgument(
"Scale's shape must be 1, but got shape = %d .",
scale_tensor_dim[0]));
}
out_w_tmp = -1;
} else {
if (scale.size() > 0) {
Expand Down Expand Up @@ -1550,19 +1552,25 @@ static void Interpolate2DInferShapeCheck(
}

int out_h_tmp, out_w_tmp;

if (scale_tensor) {
auto scale_tensor_dim = scale_tensor.dims();
PADDLE_ENFORCE_EQ(
scale_tensor_dim.size(),
1,
scale_tensor_dim.size() == 1 || scale_tensor_dim.size() == 0,
true,
phi::errors::InvalidArgument(
"Scale's dimension size must be 1, but got dimension = %d .",
"Scale's dimension size must be 1 or 0, but got dimension = %d .",
scale_tensor_dim.size()));
PADDLE_ENFORCE_EQ(scale_tensor_dim[0] == 2 || scale_tensor_dim[0] == 1,
true,
phi::errors::InvalidArgument(
"Scale's shape must be 2 or 1, but got shape = %d .",
scale_tensor_dim[0]));

if (scale_tensor_dim.size() == 1) {
PADDLE_ENFORCE_EQ(
scale_tensor_dim[0] == 2 || scale_tensor_dim[0] == 1,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

什么情况下scale的shape会是[2]呀

true,
phi::errors::InvalidArgument(
"Scale's shape must be 2 or 1, but got shape = %d .",
scale_tensor_dim[0]));
}

out_h_tmp = -1;
out_w_tmp = -1;
} else {
Expand Down Expand Up @@ -1695,10 +1703,10 @@ static void Interpolate3DInferShapeCheck(
if (scale_tensor) {
auto scale_tensor_dim = scale_tensor.dims();
PADDLE_ENFORCE_EQ(
scale_tensor_dim.size(),
1,
scale_tensor_dim.size() == 1 || scale_tensor_dim.size() == 0,
true,
phi::errors::InvalidArgument(
"Scale's dimension size must be 1, but got size = %d .",
"Scale's dimension size must be 1 or 0, but got size = %d .",
scale_tensor_dim.size()));
PADDLE_ENFORCE_EQ(scale_tensor_dim[0] == 3 || scale_tensor_dim[0] == 1,
true,
Expand Down
14 changes: 8 additions & 6 deletions paddle/phi/kernels/funcs/interpolate_function.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,12 +85,14 @@ inline std::vector<int> get_new_shape(
std::vector<int> vec_new_shape;
for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) {
auto tensor = list_new_shape_tensor[i];
PADDLE_ENFORCE_EQ(
tensor->dims(),
phi::make_ddim({1}),
errors::InvalidArgument("The shape of dimension tensor should be [1],"
"but received d%.",
tensor->dims()));
PADDLE_ENFORCE_EQ(tensor->dims() == phi::make_ddim({1}) ||
tensor->dims() == phi::make_ddim({}),
true,
errors::InvalidArgument(
"The shape of dimension tensor should be [1] or [],"
"but received d%.",
tensor->dims()));

#ifdef PADDLE_WITH_XPU
if (tensor->place().GetType() == phi::AllocationType::XPU) {
DenseTensor temp;
Expand Down
75 changes: 75 additions & 0 deletions python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -816,5 +816,80 @@ def test_main(self):
np.testing.assert_allclose(x_g_np_1, x_g_np_2, atol=1e-2, rtol=1e-2)


class TestBilinearInterpOpAPI_0DTensorScale(unittest.TestCase):
def test_case(self):
import paddle

if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
input_x = paddle.to_tensor(input_data)
expect_res = bilinear_interp_np(
input_data, out_h=12, out_w=12, align_corners=False
)
scale_0d = paddle.full([], 2)
out = interpolate(
x=input_x,
scale_factor=scale_0d,
mode="bilinear",
align_corners=False,
)
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05)


class TestBilinearInterpOpAPI_0DTensorScale2(unittest.TestCase):
def test_case(self):
import paddle

if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
input_x = paddle.to_tensor(input_data)
expect_res = bilinear_interp_np(
input_data, out_h=12, out_w=12, align_corners=False
)
scale_0d = [paddle.full([], 2), paddle.full([], 2)]
out = interpolate(
x=input_x,
scale_factor=scale_0d,
mode="bilinear",
align_corners=False,
)
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05)


class TestBilinearInterpOpAPI_0DTensorOutSize(unittest.TestCase):
def test_case(self):
import paddle

if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
input_x = paddle.to_tensor(input_data)
expect_res = bilinear_interp_np(
input_data, out_h=12, out_w=12, align_corners=False
)
output_size = [
paddle.full([], 12, dtype="int32"),
paddle.full([], 12, dtype="int32"),
]
out = interpolate(
x=input_x,
size=output_size,
mode="bilinear",
align_corners=False,
)
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05)


if __name__ == "__main__":
unittest.main()
101 changes: 101 additions & 0 deletions python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1388,6 +1388,72 @@ def test_atan2(self):
self.assertEqual(x1.grad.numpy(), 0.5)
self.assertEqual(x2.grad.numpy(), 0)

def test_interpolate(self):
from paddle.nn.functional import interpolate

input_x = paddle.rand([2, 3, 6, 6])
input_x.stop_gradient = False
origin_result = interpolate(
x=input_x, size=[12, 12], mode="bilinear", align_corners=False
)

output_size = [
paddle.full([], 12, dtype="int32"),
paddle.full([], 12, dtype="int32"),
]
out1 = interpolate(
x=input_x, size=output_size, mode="bilinear", align_corners=False
)
out1.backward()

self.assertEqual(out1.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])

scale_1 = [paddle.full([], 2), paddle.full([], 2)]
out2 = interpolate(
x=input_x,
scale_factor=scale_1,
mode="bilinear",
align_corners=False,
)
out2.backward()

self.assertEqual(out2.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])

scale_2 = paddle.full([], 2)
out3 = interpolate(
x=input_x,
scale_factor=scale_2,
mode="bilinear",
align_corners=False,
)
out3.backward()

# for coverage
scale_3 = paddle.full([1], 2)
input_3d = paddle.rand([2, 3, 6])
out4 = interpolate(
x=input_3d,
scale_factor=scale_3,
mode="LINEAR",
align_corners=False,
data_format="NCW",
)

self.assertEqual(out3.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])

np.testing.assert_allclose(
tink2123 marked this conversation as resolved.
Show resolved Hide resolved
origin_result.numpy(), out1.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
origin_result.numpy(), out2.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
origin_result.numpy(), out3.numpy(), rtol=1e-05
)

def test_maseked_select(self):
x = paddle.rand([])
x.stop_gradient = False
Expand Down Expand Up @@ -2223,6 +2289,41 @@ def test_atan2(self):

self.assertEqual(res[0].shape, ())

@prog_scope()
def test_interpolate(self):
from paddle.nn.functional import interpolate

input_x = paddle.rand([2, 3, 6, 6])
input_x.stop_gradient = False

output_size = [
paddle.full([], 12, dtype="int32"),
paddle.full([], 12, dtype="int32"),
]

out1 = interpolate(
x=input_x, size=output_size, mode="bilinear", align_corners=False
)
paddle.static.append_backward(out1.sum())
prog = paddle.static.default_main_program()
res1 = self.exe.run(prog, feed={}, fetch_list=[out1, input_x.grad_name])

scale_1 = paddle.full([], 2)
out2 = interpolate(
x=input_x,
scale_factor=scale_1,
mode="bilinear",
align_corners=False,
)
paddle.static.append_backward(out2.sum())
prog = paddle.static.default_main_program()
res2 = self.exe.run(prog, feed={}, fetch_list=[out2, input_x.grad_name])

self.assertEqual(res1[0].shape, (2, 3, 12, 12))
self.assertEqual(res1[1].shape, (2, 3, 6, 6))
self.assertEqual(res2[0].shape, (2, 3, 12, 12))
tink2123 marked this conversation as resolved.
Show resolved Hide resolved
self.assertEqual(res2[1].shape, (2, 3, 6, 6))

@prog_scope()
def test_maseked_select(self):
x = paddle.rand([])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -835,6 +835,61 @@ def test_allclose(self):
y = paddle.full([], 0.6)
self.assertFalse(paddle.allclose(x, y))

def test_interpolate(self):
from paddle.nn.functional import interpolate

input_x = paddle.rand([2, 3, 6, 6])
input_x.stop_gradient = False
origin_result = interpolate(
x=input_x, size=[12, 12], mode="bilinear", align_corners=False
)

output_size = [
paddle.full([], 12, dtype="int32"),
paddle.full([], 12, dtype="int32"),
]
out1 = interpolate(
x=input_x, size=output_size, mode="bilinear", align_corners=False
)
out1.backward()

self.assertEqual(out1.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])

scale_1 = [paddle.full([], 2), paddle.full([], 2)]
out2 = interpolate(
x=input_x,
scale_factor=scale_1,
mode="bilinear",
align_corners=False,
)
out2.backward()

self.assertEqual(out2.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])

scale_2 = paddle.full([], 2)
out3 = interpolate(
x=input_x,
scale_factor=scale_2,
mode="bilinear",
align_corners=False,
)
out3.backward()

self.assertEqual(out3.shape, [2, 3, 12, 12])
self.assertEqual(input_x.grad.shape, [2, 3, 6, 6])

np.testing.assert_allclose(
origin_result.numpy(), out1.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
origin_result.numpy(), out2.numpy(), rtol=1e-05
)
np.testing.assert_allclose(
origin_result.numpy(), out3.numpy(), rtol=1e-05
)

def test_equalall(self):
x = paddle.full([], 0.5)
y = paddle.full([], 0.6)
Expand Down
Loading