Skip to content

Commit

Permalink
cancel the modify of xpu
Browse files Browse the repository at this point in the history
  • Loading branch information
yunyaoXYY committed Feb 24, 2023
1 parent 7d8a630 commit 3d376f8
Show file tree
Hide file tree
Showing 4 changed files with 3 additions and 176 deletions.
18 changes: 1 addition & 17 deletions paddle/phi/kernels/xpu/expand_as_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,26 +49,10 @@ void ExpandAs(const Context& context,
target_shape[i]));
}
}
if (target_shape.size() == 0) {
phi::DDim out_dims = phi::make_ddim(target_shape);
out->Resize(out_dims);
context.template Alloc<T>(out);

int r = xpu::copy<XPUType>(context.x_context(),
reinterpret_cast<const XPUType*>(x.data<T>()),
reinterpret_cast<XPUType*>(out->data<T>()),
x.numel());
PADDLE_ENFORCE_XDNN_SUCCESS(r, "copy");
return;
}

phi::DDim out_dims = phi::make_ddim(target_shape);
out->Resize(out_dims);
context.template Alloc<T>(out);
auto& x_shape = vec_in_dims;
if (x.dims().size() == 0) {
x_shape = std::vector<int>({1});
}
auto out_shape = phi::vectorize<int>(out_dims);

int r = XPU_SUCCESS;
Expand Down Expand Up @@ -111,7 +95,7 @@ void ExpandAsKernel(const Context& ctx,
rank));
PADDLE_ENFORCE_GE(
rank,
0,
1,
phi::errors::InvalidArgument("The rank (%d) of the input 'x' for "
"expand_as_v2 op must be positive.",
rank));
Expand Down
17 changes: 1 addition & 16 deletions paddle/phi/kernels/xpu/expand_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ void ExpandKernel(const Context& ctx,
auto rank = x.dims().size();
PADDLE_ENFORCE_GE(
rank,
0,
1,
phi::errors::InvalidArgument(
"The rank of the input 'X' for expand_v2_npu op must be positive, "
"but the value received is %d.",
Expand All @@ -94,25 +94,10 @@ void ExpandKernel(const Context& ctx,
shape_size,
rank));

if (shape_size == 0) {
phi::DDim out_dims = phi::make_ddim(final_expand_shape);
out->Resize(out_dims);
ctx.template Alloc<T>(out);

int r = xpu::copy<XPUType>(ctx.x_context(),
reinterpret_cast<const XPUType*>(x.data<T>()),
reinterpret_cast<XPUType*>(out->data<T>()),
x.numel());
PADDLE_ENFORCE_XDNN_SUCCESS(r, "copy");
return;
}
DDim out_dims = phi::make_ddim(final_expand_shape);
out->Resize(out_dims);
ctx.template Alloc<T>(out);
auto& x_shape = vec_in_dims;
if (rank == 0) {
x_shape = std::vector<int>({1});
}
auto out_shape = phi::vectorize<int>(out_dims);

int r = XPU_SUCCESS;
Expand Down
12 changes: 1 addition & 11 deletions paddle/phi/kernels/xpu/top_k_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"

namespace phi {

template <typename T, typename Context>
Expand Down Expand Up @@ -49,17 +49,7 @@ void TopkKernel(const Context& dev_ctx,
errors::External(
"XPU API does not support smallest topk operation currently."
" Operator will be supported in future update."));
if (in_dims.size() == 0) {
int r = xpu::copy<XPUType>(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x.data<T>()),
reinterpret_cast<XPUType*>(out->data<T>()),
x.numel());
PADDLE_ENFORCE_XDNN_SUCCESS(r, "copy");

phi::funcs::set_constant(dev_ctx, indices, 0);

return;
}
if (axis < 0) axis += in_dims.size();

size_t k = k_scalar.to<int>();
Expand Down
132 changes: 0 additions & 132 deletions python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,138 +330,6 @@ def setUp(self):
paddle.disable_static()
self.x = paddle.rand([])

def test_expand(self):
# case1
x = paddle.full([], 1, 'float32')
x.stop_gradient = False
out = paddle.expand(x, shape=[1])
out.retain_grads()
out.backward()

self.assertEqual(out.shape, [1])
np.testing.assert_allclose(out, 1.0)
self.assertEqual(x.grad.shape, [])
np.testing.assert_allclose(x.grad, 1.0)
self.assertEqual(out.grad.shape, [1])
np.testing.assert_allclose(out.grad, 1.0)

# case2
x1 = paddle.full([], 1, 'float32')
x1.stop_gradient = False
out1 = paddle.expand(x1, shape=[])
out1.retain_grads()
out1.backward()

self.assertEqual(out1.shape, [])
np.testing.assert_allclose(out1, 1.0)
self.assertEqual(x1.grad.shape, [])
np.testing.assert_allclose(x1.grad, 1.0)
self.assertEqual(out1.grad.shape, [])
np.testing.assert_allclose(out1.grad, 1.0)

# case3
x2 = paddle.full([], 1, 'float32')
x2.stop_gradient = False
out2 = paddle.expand(x2, shape=[1, 1])
out2.retain_grads()
out2.backward()

self.assertEqual(out2.shape, [1, 1])
np.testing.assert_allclose(out2, 1.0)
self.assertEqual(x2.grad.shape, [])
np.testing.assert_allclose(x2.grad, 1.0)
self.assertEqual(out2.grad.shape, [1, 1])
np.testing.assert_allclose(out2.grad, 1.0)

# case4
x3 = paddle.full([], 1, 'float32')
x3.stop_gradient = False
out3 = paddle.expand(x3, shape=[3, 3])
out3.retain_grads()
out3.backward()

self.assertEqual(out3.shape, [3, 3])
np.testing.assert_allclose(out3, 1.0)
self.assertEqual(x3.grad.shape, [])
np.testing.assert_allclose(x3.grad, 9.0)
self.assertEqual(out3.grad.shape, [3, 3])
np.testing.assert_allclose(out3.grad, 1.0)

def test_expand_as(self):
x = paddle.full([], 1, 'float32')
x.stop_gradient = False
y = paddle.full([], 1, 'float32')
y.stop_gradient = False
out = paddle.expand_as(x, y)
out.backward()
self.assertEqual(x.shape, [])
self.assertEqual(x.item(), 1.0)
self.assertEqual(x.grad.shape, [])
self.assertEqual(x.grad.item(), 1.0)
self.assertEqual(out.shape, [])
self.assertEqual(out.item(), 1.0)
self.assertEqual(out.grad, None)

x1 = paddle.full([], 1, 'float32')
x1.stop_gradient = False
y1 = paddle.full([1], 1, 'float32')
out1 = paddle.expand_as(x1, y1)
out1.backward()
self.assertEqual(x1.shape, [])
self.assertEqual(x1.item(), 1.0)
self.assertEqual(x1.grad.shape, [])
self.assertEqual(x1.grad.item(0), 1.0)
self.assertEqual(out1.shape, [1])
self.assertEqual(out1.item(0), 1.0)
self.assertEqual(out1.grad, None)

x2 = paddle.full([], 1, 'float32')
x2.stop_gradient = False
y2 = paddle.full([3, 3], 1, 'float32')
out2 = paddle.expand_as(x2, y2)
out2.backward()
self.assertEqual(x2.shape, [])
self.assertEqual(x2.item(), 1.0)
self.assertEqual(x2.grad.shape, [])
self.assertEqual(x2.grad.item(0), 9.0)
self.assertEqual(out2.shape, [3, 3])
self.assertEqual(out2.item(0), 1.0)
self.assertEqual(out2.grad, None)

def test_top_k(self):
x = paddle.full([], 1, 'float32')
x.stop_gradient = False
out, indices = paddle.topk(x, k=1, axis=0)
out.retain_grads()
out.backward()
self.assertEqual(indices.shape, [])
self.assertEqual(indices.item(), 0)
self.assertEqual(x.shape, [])
self.assertEqual(x.item(), 1.0)
self.assertEqual(x.grad.shape, [])
self.assertEqual(x.grad.item(0), 1.0)
self.assertEqual(out.shape, [])
self.assertEqual(out.item(), 1.0)
self.assertEqual(out.grad, 1.0)

x1 = paddle.full([], 1, 'float32')
x1.stop_gradient = False
out1, indices1 = paddle.topk(x1, k=1, axis=-1)
out1.retain_grads()
out1.backward()
self.assertEqual(indices1.shape, [])
self.assertEqual(indices1.item(), 0)
self.assertEqual(x1.shape, [])
self.assertEqual(x1.item(), 1.0)
self.assertEqual(x.grad.shape, [])
self.assertEqual(x.grad.item(0), 1.0)
self.assertEqual(out1.shape, [])
self.assertEqual(out1.item(), 1.0)
self.assertEqual(out1.grad, 1.0)

with self.assertRaises(ValueError):
tmp = paddle.topk(x1, k=1, axis=2)

def test_argmin(self):
x = paddle.rand([])
out1 = paddle.argmin(x, 0)
Expand Down

0 comments on commit 3d376f8

Please sign in to comment.