Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【complex op】 add complex support for unstack and add_n #59079

Merged
merged 1 commit into from
Nov 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions paddle/phi/kernels/cpu/add_n_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,9 @@ PD_REGISTER_KERNEL(add_n,
int,
phi::dtype::bfloat16,
phi::dtype::float16,
int64_t) {}
int64_t,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}

PD_REGISTER_KERNEL(add_n_array,
CPU,
Expand All @@ -101,4 +103,6 @@ PD_REGISTER_KERNEL(add_n_array,
int,
phi::dtype::bfloat16,
phi::dtype::float16,
int64_t) {}
int64_t,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
4 changes: 3 additions & 1 deletion paddle/phi/kernels/cpu/unstack_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,6 @@ PD_REGISTER_KERNEL(unstack_grad,
float,
double,
int,
int64_t) {}
int64_t,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
13 changes: 10 additions & 3 deletions paddle/phi/kernels/cpu/unstack_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,13 @@ limitations under the License. */
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/unstack_kernel_impl.h"

PD_REGISTER_KERNEL(
unstack, CPU, ALL_LAYOUT, phi::UnStackKernel, float, double, int, int64_t) {
}
PD_REGISTER_KERNEL(unstack,
CPU,
ALL_LAYOUT,
phi::UnStackKernel,
float,
double,
int,
int64_t,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
4 changes: 4 additions & 0 deletions paddle/phi/kernels/funcs/selected_rows_functor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -399,6 +399,10 @@ template struct SelectedRowsAddToTensor<phi::CPUContext, int>;
template struct SelectedRowsAddToTensor<phi::CPUContext, int64_t>;
template struct SelectedRowsAddToTensor<phi::CPUContext, phi::dtype::float16>;
template struct SelectedRowsAddToTensor<phi::CPUContext, phi::dtype::bfloat16>;
template struct SelectedRowsAddToTensor<phi::CPUContext,
phi::dtype::complex<float>>;
template struct SelectedRowsAddToTensor<phi::CPUContext,
phi::dtype::complex<double>>;

#ifdef PADDLE_WITH_XPU
template struct SelectedRowsAddToTensor<phi::XPUContext, float>;
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/funcs/selected_rows_functor.cu
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,10 @@ template struct SelectedRowsAddToTensor<phi::GPUContext, double>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int64_t>;
template struct SelectedRowsAddToTensor<phi::GPUContext, phi::dtype::float16>;
template struct SelectedRowsAddToTensor<phi::GPUContext,
phi::dtype::complex<float>>;
template struct SelectedRowsAddToTensor<phi::GPUContext,
phi::dtype::complex<double>>;

namespace scatter {

Expand Down
8 changes: 6 additions & 2 deletions paddle/phi/kernels/gpu/add_n_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,9 @@ PD_REGISTER_KERNEL(add_n,
int,
phi::dtype::bfloat16,
phi::dtype::float16,
int64_t) {}
int64_t,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}

PD_REGISTER_KERNEL(add_n_array,
GPU,
Expand All @@ -251,4 +253,6 @@ PD_REGISTER_KERNEL(add_n_array,
int,
phi::dtype::bfloat16,
phi::dtype::float16,
int64_t) {}
int64_t,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
4 changes: 3 additions & 1 deletion paddle/phi/kernels/gpu/unstack_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,6 @@ PD_REGISTER_KERNEL(unstack_grad,
int64_t,
int,
phi::dtype::float16,
phi::dtype::bfloat16) {}
phi::dtype::bfloat16,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
4 changes: 3 additions & 1 deletion paddle/phi/kernels/gpu/unstack_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,6 @@ PD_REGISTER_KERNEL(unstack,
int64_t,
int,
phi::dtype::float16,
phi::dtype::bfloat16) {}
phi::dtype::bfloat16,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
4 changes: 2 additions & 2 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -567,12 +567,12 @@ def unstack(x, axis=0, num=None):
raised.

Args:
x (Tensor): Input Tensor. It is a N-D Tensors of data types float32, float64, int32, int64.
x (Tensor): Input Tensor. It is a N-D Tensors of data types float32, float64, int32, int64, complex64, complex128.
axis (int): The axis along which the input is unstacked.
num (int|None): The number of output variables.

Returns:
list(Tensor), The unstacked Tensors list. The list elements are N-D Tensors of data types float32, float64, int32, int64.
list(Tensor), The unstacked Tensors list. The list elements are N-D Tensors of data types float32, float64, int32, int64, complex64, complex128.

Examples:
.. code-block:: python
Expand Down
15 changes: 13 additions & 2 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1946,7 +1946,7 @@ def add_n(inputs, name=None):

Args:
inputs (Tensor|list[Tensor]|tuple[Tensor]): A Tensor or a list/tuple of Tensors. The shape and data type of the list/tuple elements should be consistent.
Input can be multi-dimensional Tensor, and data types can be: float32, float64, int32, int64.
Input can be multi-dimensional Tensor, and data types can be: float32, float64, int32, int64, complex64, complex128.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Expand Down Expand Up @@ -1985,14 +1985,25 @@ def add_n(inputs, name=None):
'int32',
'int64',
'uint16',
'complex64',
'complex128',
],
'add_n',
)
else:
check_variable_and_dtype(
inputs,
"inputs",
['float16', 'float32', 'float64', 'int32', 'int64', 'uint16'],
[
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
'complex64',
'complex128',
],
'add_n',
)

Expand Down
22 changes: 14 additions & 8 deletions test/legacy_test/test_add_n_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
class TestAddnOp(unittest.TestCase):
def setUp(self):
np.random.seed(20)
l = 32
self.x_np = np.random.random([l, 16, 256])
self.l = 32
self.x_np = np.random.random([self.l, 16, 256])

def check_main(self, x_np, dtype, axis=None):
paddle.disable_static()
Expand All @@ -33,10 +33,10 @@ def check_main(self, x_np, dtype, axis=None):
x.append(val)
y = paddle.add_n(x)
x_g = paddle.grad(y, x)
y_np = y.numpy().astype('float32')
y_np = y.numpy().astype(dtype)
x_g_np = []
for val in x_g:
x_g_np.append(val.numpy().astype('float32'))
x_g_np.append(val.numpy().astype(dtype))
paddle.enable_static()
return y_np, x_g_np

Expand All @@ -53,11 +53,17 @@ def test_add_n_fp16(self):
def test_add_n_api(self):
if not paddle.is_compiled_with_cuda():
return
dtypes = ['float32', 'complex64', 'complex128']
for dtyte in dtypes:
if dtyte == 'complex64' or dtyte == 'complex128':
self.x_np = (
np.random.random([self.l, 16, 256])
+ 1j * np.random.random([self.l, 16, 256])
).astype(dtyte)

y_np_32, x_g_np_32 = self.check_main(self.x_np, 'float32')
y_np_gt = np.sum(self.x_np, axis=0).astype('float32')

np.testing.assert_allclose(y_np_32, y_np_gt, rtol=1e-06)
y_np_32, x_g_np_32 = self.check_main(self.x_np, dtyte)
y_np_gt = np.sum(self.x_np, axis=0).astype(dtyte)
np.testing.assert_allclose(y_np_32, y_np_gt, rtol=1e-06)


if __name__ == "__main__":
Expand Down
77 changes: 71 additions & 6 deletions test/legacy_test/test_unstack_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,54 @@ def initParameters(self):
self.axis = 2


class TestStackOp3_Complex64(TestStackOp3):
def initParameters(self):
self.dtype = np.complex64
self.axis = -1


class TestStackOp4_complex64(TestStackOp4):
def initParameters(self):
self.dtype = np.complex64
self.axis = -3


class TestStackOp5_complex64(TestStackOp5):
def initParameters(self):
self.dtype = np.complex64
self.axis = 1


class TestStackOp6_complex64(TestStackOp6):
def initParameters(self):
self.dtype = np.complex64
self.axis = 2


class TestStackOp3_Complex128(TestStackOp3):
def initParameters(self):
self.dtype = np.complex128
self.axis = -1


class TestStackOp4_complex128(TestStackOp4):
def initParameters(self):
self.dtype = np.complex128
self.axis = -3


class TestStackOp5_complex128(TestStackOp5):
def initParameters(self):
self.dtype = np.complex128
self.axis = 1


class TestStackOp6_complex128(TestStackOp6):
def initParameters(self):
self.dtype = np.complex128
self.axis = 2


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
Expand Down Expand Up @@ -186,14 +234,31 @@ class TestUnstackZeroInputOp(unittest.TestCase):
def unstack_zero_input_static(self):
paddle.enable_static()

array = np.array([], dtype=np.float32)
x = paddle.to_tensor(np.reshape(array, [0]), dtype='float32')
paddle.unstack(x, axis=1)
dtypes = ['float32', 'complex64', 'complex128']
for dtype in dtypes:
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
data = np.random.random([0]).astype(dtype)
if dtype == 'complex64' or dtype == 'complex128':
data = (
np.random.random([0]) + 1j * np.random.random([0])
).astype(dtype)
x = paddle.static.data(shape=[0], dtype=dtype, name='x')
paddle.unstack(x, axis=1)

def unstack_zero_input_dynamic(self):
array = np.array([], dtype=np.float32)
x = paddle.to_tensor(np.reshape(array, [0]), dtype='float32')
paddle.unstack(x, axis=1)
paddle.disable_static()
dtypes = ['float32', 'complex64', 'complex128']
for dtype in dtypes:
with base.dygraph.guard():
data = np.random.random([0]).astype(dtype)
if dtype == 'complex64' or dtype == 'complex128':
data = (
np.random.random([0]) + 1j * np.random.random([0])
).astype(dtype)
x = base.dygraph.to_variable(data)
paddle.unstack(x, axis=1)

def test_type_error(self):
paddle.disable_static()
Expand Down