Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Zero-dim] Zero-dim Tensor for XPU prelu, softmax, log_softmax #50433

Merged
merged 7 commits into from
Feb 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions paddle/phi/kernels/xpu/log_softmax_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ void LogSoftmaxGradKernel(const Context& dev_ctx,

// For 0D Tensor
if (rank == 0) {
dev_ctx.template Alloc<T>(x_grad);
phi::funcs::set_constant(dev_ctx, x_grad, 0.0);
return;
}
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/xpu/log_softmax_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ void LogSoftmaxKernel(const Context& dev_ctx,

// For 0D Tensor
if (rank == 0) {
dev_ctx.template Alloc<T>(out);
phi::funcs::set_constant(dev_ctx, out, 0.0);
return;
}
Expand Down
16 changes: 12 additions & 4 deletions paddle/phi/kernels/xpu/prelu_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,24 @@ void PReluGradKernel(const Context& dev_ctx,
auto x_rank = x_dim.size();

std::vector<int> x_shape(x_rank);
for (int i = 0; i < x_rank; i++) {
x_shape[i] = x_dim[i];
if (x_rank == 0) {
x_shape = std::vector<int>({1});
} else {
for (int i = 0; i < x_rank; i++) {
x_shape[i] = x_dim[i];
}
}

auto alpha_dim = alpha.dims();
auto alpha_rank = alpha_dim.size();

std::vector<int> alpha_shape(alpha_rank);
for (int i = 0; i < x_rank; i++) {
alpha_shape[i] = alpha_dim[i];
if (alpha_rank == 0) {
alpha_shape = std::vector<int>({1});
} else {
for (int i = 0; i < x_rank; i++) {
alpha_shape[i] = alpha_dim[i];
}
}

// mode = 0: channel_nchw, slope_shape = {c}, default. meanwhile, xhsape = {n,
Expand Down
18 changes: 12 additions & 6 deletions paddle/phi/kernels/xpu/prelu_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,20 +34,26 @@ void PReluKernel(const Context& dev_ctx,

auto x_dim = x.dims();
auto x_rank = x_dim.size();

std::vector<int> x_shape(x_rank);

for (int i = 0; i < x_rank; i++) {
x_shape[i] = x_dim[i];
if (x_rank == 0) {
x_shape = std::vector<int>({1});
} else {
for (int i = 0; i < x_rank; i++) {
x_shape[i] = x_dim[i];
}
}

auto alpha_dim = alpha.dims();
auto alpha_rank = alpha_dim.size();

std::vector<int> alpha_shape(x_rank, 1); // same size with x_shape

for (int i = 0; i < alpha_rank; i++) {
alpha_shape[i] = alpha_dim[i];
if (x_rank == 0) {
alpha_shape = std::vector<int>({1});
} else {
for (int i = 0; i < alpha_rank; i++) {
alpha_shape[i] = alpha_dim[i];
}
}

int r = xpu::prelu(dev_ctx.x_context(),
Expand Down
7 changes: 7 additions & 0 deletions paddle/phi/kernels/xpu/softmax_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ limitations under the License. */
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"

namespace phi {

Expand All @@ -35,6 +36,12 @@ void SoftmaxGradKernel(const Context& dev_ctx,
return;
}

// For 0D Tensor
if (rank == 0) {
phi::funcs::set_constant(dev_ctx, x_grad, 0.0);
return;
}

std::vector<int> x_dims;
for (int i = 0; i < rank; i++) {
x_dims.push_back(x_grad->dims()[i]);
Expand Down
21 changes: 13 additions & 8 deletions python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1628,24 +1628,29 @@ def test_t(self):
self.assertEqual(x.grad.shape, [])

def test_prelu(self):
x = paddle.full([], 1.0, 'float32')
x.stop_gradient = False

w1 = paddle.to_tensor([0.25], dtype='float32')
out1 = paddle.nn.functional.prelu(x, w1)
x1 = paddle.full([], 1.0, 'float32')
x1.stop_gradient = False
w1 = paddle.full([], 0.25, dtype='float32')
out1 = paddle.nn.functional.prelu(x1, w1)
out1.retain_grads()
out1.backward()
self.assertEqual(out1.shape, [])
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out1.grad.shape, [])
self.assertEqual(x.grad.shape, [])
self.assertEqual(x1.grad.shape, [])
self.assertEqual(x1.grad.numpy(), 1.0)

x2 = paddle.full([], -1.0, 'float32')
x2.stop_gradient = False
w2 = paddle.full([], 0.25, dtype='float32')
out2 = paddle.nn.functional.prelu(x, w2)
out2 = paddle.nn.functional.prelu(x2, w2)
out2.retain_grads()
out2.backward()
self.assertEqual(out2.shape, [])
self.assertEqual(out2.numpy(), -0.25)
self.assertEqual(out2.grad.shape, [])
self.assertEqual(x.grad.shape, [])
self.assertEqual(x2.grad.shape, [])
self.assertEqual(x2.grad.numpy(), 0.25)

def test_while_loop(self):
def cond(i, x):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,8 @@
paddle.lgamma,
paddle.poisson,
paddle.bernoulli,
paddle.nn.functional.softmax,
paddle.nn.functional.log_softmax,
]

inplace_api_list = [
Expand Down Expand Up @@ -1033,6 +1035,33 @@ def test_unsqueeze(self):
out2.backward()
self.assertEqual(out2.shape, [1])

def test_prelu(self):
x1 = paddle.full([], 1.0, 'float32')
x1.stop_gradient = False
w1 = paddle.full([], 0.25, dtype='float32')
w1.stop_gradient = False
out1 = paddle.nn.functional.prelu(x1, w1)
out1.retain_grads()
out1.backward()
self.assertEqual(out1.shape, [])
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out1.grad.shape, [])
self.assertEqual(x1.grad.shape, [])
self.assertEqual(x1.grad.numpy(), 1.0)

x2 = paddle.full([], -1.0, 'float32')
x2.stop_gradient = False
w2 = paddle.full([], 0.25, dtype='float32')
w2.stop_gradient = False
out2 = paddle.nn.functional.prelu(x2, w2)
out2.retain_grads()
out2.backward()
self.assertEqual(out2.shape, [])
self.assertEqual(out2.numpy(), -0.25)
self.assertEqual(out2.grad.shape, [])
self.assertEqual(x2.grad.shape, [])
self.assertEqual(x2.grad.numpy(), 0.25)


# Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest.
class TestNoBackwardAPI(unittest.TestCase):
Expand Down