Skip to content

Commit

Permalink
[Zero-Dim] add FLAGS_set_to_1d, control whether to hack process to 1D…
Browse files Browse the repository at this point in the history
…, add ut for xpu (#51899)
  • Loading branch information
zhwesky2010 authored Mar 27, 2023
1 parent 10145cb commit 134c9c0
Show file tree
Hide file tree
Showing 7 changed files with 93 additions and 11 deletions.
1 change: 1 addition & 0 deletions paddle/fluid/pybind/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -504,6 +504,7 @@ if(WITH_PYTHON)
list(APPEND PYBIND_DEPS tensor_api)
list(APPEND PYBIND_DEPS eager_tensor_operants)
list(APPEND PYBIND_DEPS pybind_util)
list(APPEND PYBIND_DEPS flags)
endif()

# On Linux, cc_library(paddle SHARED ..) will generate the libpaddle.so,
Expand Down
5 changes: 4 additions & 1 deletion paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ typedef SSIZE_T ssize_t;
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"

DECLARE_bool(set_to_1d);

namespace paddle {
namespace pybind {

Expand Down Expand Up @@ -124,7 +126,8 @@ static PyObject* tensor_method_numpy(TensorObject* self,
size_t numel = 1;
if (py_rank == 0) {
Py_ssize_t args_num = PyTuple_Size(args);
bool set_to_1d = true;
// true by default
bool set_to_1d = FLAGS_set_to_1d;
if (args_num == (Py_ssize_t)1) {
PyObject* obj = PyTuple_GET_ITEM(args, 0);
if (obj == Py_False) {
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/core/flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -744,6 +744,16 @@ PADDLE_DEFINE_EXPORTED_int32(
"less FLAGS_max_inplace_grad_add, than it will be use several grad_add"
"instead of sum. Default is 0.");

/**
* Tensor.numpy() has a hack, and this flag can close this hack
* [true]: set 0D Tensor to 1D Numpy
* [false]: not set 0D Tensor to 1D Numpy, close the hack
*
* Now, just set true by default in 2.5 transition time
* which will be removed in future (2.6 or 2.7) .
*/
PADDLE_DEFINE_EXPORTED_bool(set_to_1d, true, "set 0D Tensor to 1D numpy");

/**
* Debug related FLAG
* Name: tracer_mkldnn_ops_on
Expand Down
1 change: 1 addition & 0 deletions paddle/scripts/paddle_build.bat
Original file line number Diff line number Diff line change
Expand Up @@ -685,6 +685,7 @@ for /F %%# in ('wmic os get localdatetime^|findstr 20') do set start=%%#
set start=%start:~4,10%

set FLAGS_call_stack_level=2
set FLAGS_set_to_1d=False
dir %THIRD_PARTY_PATH:/=\%\install\openblas\lib
dir %THIRD_PARTY_PATH:/=\%\install\openblas\bin
dir %THIRD_PARTY_PATH:/=\%\install\zlib\bin
Expand Down
1 change: 1 addition & 0 deletions paddle/scripts/paddle_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ function init() {

# NOTE(chenweihang): For easy debugging, CI displays the C++ error stacktrace by default
export FLAGS_call_stack_level=2
export FLAGS_set_to_1d=False
}

function cmake_base() {
Expand Down
6 changes: 2 additions & 4 deletions python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1100,12 +1100,10 @@ def test_tolist(self):

def test_numpy(self):
x = paddle.full([], 0.5)
# 0D Tensor hack to 1D Numpy defaut, will remove in future
x_np = x.numpy()
np.testing.assert_array_equal(x_np.shape, (1,))
np.testing.assert_array_equal(x_np, np.array([0.5]))
np.testing.assert_array_equal(x_np.shape, ())
np.testing.assert_array_equal(x_np, np.array(0.5))

# return origin correct numpy
x_np = x.numpy(False)
np.testing.assert_array_equal(x_np.shape, ())
np.testing.assert_array_equal(x_np, np.array(0.5))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,20 @@ def test_dygraph_reduce(self):
np.testing.assert_allclose(x.grad.numpy(), np.array(1.0))
np.testing.assert_allclose(out.grad.numpy(), np.array(1.0))

out1 = api(x, 0)
self.assertEqual(out1.shape, [])
self.assertEqual(out1, out)
out1.backward()

out2 = api(x, -1)
self.assertEqual(out2.shape, [])
self.assertEqual(out2, out)
out2.backward()

if x.grad is not None:
self.assertEqual(x.grad.shape, [])
np.testing.assert_allclose(x.grad.numpy(), np.array(3.0))

paddle.enable_static()


Expand Down Expand Up @@ -463,32 +477,72 @@ def test_top_k(self):
tmp = paddle.topk(x1, k=1, axis=2)

def test_argmin(self):
# 1) x is 0D
x = paddle.rand([])
out1 = paddle.argmin(x, 0)
out2 = paddle.argmin(x, -1)
out3 = paddle.argmin(x, None)

self.assertEqual(out1.shape, [])
np.testing.assert_allclose(out1, 0.0)
np.testing.assert_allclose(out1, 0)

self.assertEqual(out2.shape, [])
np.testing.assert_allclose(out2, 0.0)
np.testing.assert_allclose(out2, 0)

self.assertEqual(out3.shape, [])
np.testing.assert_allclose(out3, 0.0)
np.testing.assert_allclose(out3, 0)

# 2) x is 1D
x = paddle.rand([5])
x.stop_gradient = False
out = paddle.argmin(x, 0)
out.backward()
self.assertEqual(out.shape, [])

# 3) x is ND
x = paddle.rand([3, 5])
x.stop_gradient = False
out = paddle.argmin(x)
out.backward()
self.assertEqual(out.shape, [])

# 4) x is ND, keepdim=True
x = paddle.rand([3, 5])
x.stop_gradient = False
out = paddle.argmin(x, keepdim=True)
out.backward()
self.assertEqual(out.shape, [1, 1])

def test_argmax(self):
# 1) x is 0D
x = paddle.rand([])
out1 = paddle.argmax(x, 0)
out2 = paddle.argmax(x, -1)
out3 = paddle.argmax(x, None)

self.assertEqual(out1.shape, [])
np.testing.assert_allclose(out1, 0.0)
np.testing.assert_allclose(out1, 0)

self.assertEqual(out2.shape, [])
np.testing.assert_allclose(out2, 0.0)
np.testing.assert_allclose(out2, 0)

self.assertEqual(out3.shape, [])
np.testing.assert_allclose(out3, 0.0)
np.testing.assert_allclose(out3, 0)

# 2) x is 1D
x = paddle.rand([5])
out = paddle.argmax(x, 0)
self.assertEqual(out.shape, [])

# 3) x is ND
x = paddle.rand([3, 5])
out = paddle.argmax(x)
self.assertEqual(out.shape, [])

# 4) x is ND, keepdim=True
x = paddle.rand([3, 5])
out = paddle.argmax(x, keepdim=True)
self.assertEqual(out.shape, [1, 1])

def test_median(self):
x = paddle.rand([])
Expand Down Expand Up @@ -575,15 +629,29 @@ def test_numpy(self):
np.testing.assert_array_equal(x.numpy(), np.array(0.5))

def test_numel(self):
# 1) x is 0D
out = paddle.numel(self.x)
self.assertEqual(out.shape, [])
np.testing.assert_array_equal(out.numpy(), np.array(1))

# 2) x is ND
x = paddle.full([3, 5], 0.5)
out = paddle.numel(x)
self.assertEqual(out.shape, [])
np.testing.assert_array_equal(out.numpy(), np.array(15))

def test_rank(self):
# 1) x is 0D
out = paddle.rank(self.x)
self.assertEqual(out.shape, [])
np.testing.assert_array_equal(out.numpy(), np.array(0))

# 1) x is ND
x = paddle.full([3, 5], 0.5)
out = paddle.rank(x)
self.assertEqual(out.shape, [])
np.testing.assert_array_equal(out.numpy(), np.array(2))

def test_shape(self):
out = paddle.shape(self.x)
self.assertEqual(out.shape, [0])
Expand Down

0 comments on commit 134c9c0

Please sign in to comment.