Skip to content

Commit 226f862

Browse files
authored
Merge branch 'develop' into prod_reshape
2 parents 8ad4045 + 8321bbb commit 226f862

20 files changed

+480
-30
lines changed

paddle/fluid/framework/ir/onednn/cpu_bfloat16_pass_tester.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ void SetOp(ProgramDesc* prog,
2828
const std::string& onednn_data_type = "float32") {
2929
auto* op = prog->MutableBlock(0)->AppendOp();
3030
op->SetType(type);
31-
op->SetAttr("use_mkldnn", use_onednn);
31+
op->SetAttr("use_onednn", use_onednn);
3232
op->SetAttr("name", name);
3333

3434
if (type == "conv2d") {

paddle/fluid/framework/ir/onednn/interpolate_onednn_pass.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,8 @@ void InterpolateOneDNNPass::ApplyImpl(ir::Graph* graph) const {
3131
PADDLE_ENFORCE_NOT_NULL(graph,
3232
common::errors::InvalidArgument(
3333
"Pointer to graph argument should not be NULL."));
34-
if (!(graph->Has("use_mkldnn") && graph->Get<bool>("use_mkldnn"))) {
34+
if (!(graph->Has("use_mkldnn") && graph->Get<bool>("use_mkldnn")) &&
35+
!(graph->Has("use_onednn") && graph->Get<bool>("use_onednn"))) {
3536
VLOG(3) << "Do not handle interpolate_onednn_pass";
3637
return;
3738
}
@@ -53,7 +54,7 @@ void InterpolateOneDNNPass::ApplyImpl(ir::Graph* graph) const {
5354
interpolate_op_types.end(),
5455
node->Name()) != interpolate_op_types.end()) {
5556
auto* op_desc = node->Op();
56-
op_desc->SetAttr("use_mkldnn", true);
57+
op_desc->SetAttr("use_onednn", true);
5758
++found_count;
5859
}
5960
}

paddle/fluid/framework/ir/onednn/multi_gru_fuse_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ MultiGRUFusePass::MultiGRUFusePass() {
186186
.AddAttr("origin_mode")
187187
.IsType<bool>()
188188
.End()
189-
.AddAttr("use_mkldnn")
189+
.AddAttr("use_onednn")
190190
.IsType<bool>()
191191
.End()
192192
.AddAttr("mkldnn_data_type")

python/paddle/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -369,6 +369,8 @@
369369
unstack,
370370
view,
371371
view_as,
372+
view_as_complex,
373+
view_as_real,
372374
vsplit,
373375
vstack,
374376
)
@@ -1167,7 +1169,9 @@
11671169
'acosh',
11681170
'atanh',
11691171
'as_complex',
1172+
'view_as_complex',
11701173
'as_real',
1174+
'view_as_real',
11711175
'diff',
11721176
'angle',
11731177
'fmax',

python/paddle/nn/functional/input.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
import paddle
1919
from paddle import _C_ops
20-
from paddle.utils.decorator_utils import ParamAliasDecorator
20+
from paddle.utils.decorator_utils import param_one_alias
2121

2222
from ...base.data_feeder import check_variable_and_dtype
2323
from ...base.layer_helper import LayerHelper
@@ -162,7 +162,7 @@ def embedding_renorm_(
162162
return weight
163163

164164

165-
@ParamAliasDecorator({"x": ["input"]})
165+
@param_one_alias(["x", "input"])
166166
def embedding(
167167
x: Tensor,
168168
weight: Tensor,

python/paddle/sparse/unary.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -882,7 +882,7 @@ def expm1(x: Tensor, name: str | None = None) -> Tensor:
882882
return _C_ops.sparse_expm1(x)
883883

884884

885-
@param_one_alias({"x": "input"})
885+
@param_one_alias(["x", "input"])
886886
def reshape(x: Tensor, shape: ShapeLike, name: str | None = None) -> Tensor:
887887
"""
888888
Changes the shape of ``x`` without changing its value, requiring x to be a SparseCooTensor or SparseCsrTensor.

python/paddle/tensor/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,8 @@
227227
unstack,
228228
view,
229229
view_as,
230+
view_as_complex,
231+
view_as_real,
230232
vsplit,
231233
vstack,
232234
)
@@ -783,7 +785,9 @@
783785
'lu_unpack',
784786
'cdist',
785787
'as_complex',
788+
'view_as_complex',
786789
'as_real',
790+
'view_as_real',
787791
'rad2deg',
788792
'deg2rad',
789793
'gcd',

python/paddle/tensor/logic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from paddle import _C_ops
2323
from paddle.tensor.creation import full
2424
from paddle.tensor.math import broadcast_shape
25-
from paddle.utils.decorator_utils import ParamAliasDecorator
25+
from paddle.utils.decorator_utils import ParamAliasDecorator, param_two_alias
2626
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only
2727

2828
from ..base.data_feeder import check_type, check_variable_and_dtype
@@ -1330,7 +1330,7 @@ def bitwise_and_(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
13301330
return _C_ops.bitwise_and_(x, y)
13311331

13321332

1333-
@ParamAliasDecorator({"x": ["input"], "y": ["other"]})
1333+
@param_two_alias(["x", "input"], ["y", "other"])
13341334
def bitwise_or(
13351335
x: Tensor, y: Tensor, out: Tensor | None = None, name: str | None = None
13361336
) -> Tensor:

python/paddle/tensor/manipulation.py

Lines changed: 80 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@
2727
from paddle.utils.decorator_utils import (
2828
ParamAliasDecorator,
2929
reshape_decorator,
30+
param_one_alias,
31+
param_two_alias,
3032
view_decorator,
3133
)
3234
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only
@@ -3471,7 +3473,7 @@ def squeeze_(
34713473
return _C_ops.squeeze_(input, axes)
34723474

34733475

3474-
@ParamAliasDecorator({"x": ["input"], "axis": ["dim"]})
3476+
@param_two_alias(["x", "input"], ["axis", "dim"])
34753477
def unique_consecutive(
34763478
x: Tensor,
34773479
return_inverse: bool = False,
@@ -6297,7 +6299,83 @@ def as_real(x: Tensor, name: str | None = None) -> Tensor:
62976299
return out
62986300

62996301

6300-
@ParamAliasDecorator({"x": ["input"], "axis": ["dim"]})
6302+
def view_as_complex(input: Tensor) -> Tensor:
6303+
"""Return a complex tensor that is a view of the input real tensor .
6304+
6305+
The data type of the input tensor is 'float32' or 'float64', and the data
6306+
type of the returned tensor is 'complex64' or 'complex128', respectively.
6307+
6308+
The shape of the input tensor is ``(* ,2)``, (``*`` means arbitrary shape), i.e.
6309+
the size of the last axis should be 2, which represent the real and imag part
6310+
of a complex number. The shape of the returned tensor is ``(*,)``.
6311+
6312+
The complex tensor is a view of the input real tensor, meaning that it shares the same memory with real tensor.
6313+
6314+
The image below demonstrates the case that a real 3D-tensor with shape [2, 3, 2] is transformed into a complex 2D-tensor with shape [2, 3].
6315+
6316+
.. image:: https://githubraw.cdn.bcebos.com/PaddlePaddle/docs/develop/docs/images/api_legend/as_complex.png
6317+
:width: 500
6318+
:alt: Illustration of as_complex
6319+
:align: center
6320+
6321+
Args:
6322+
input (Tensor): The input tensor. Data type is 'float32' or 'float64'.
6323+
6324+
Returns:
6325+
Tensor, The output. Data type is 'complex64' or 'complex128', sharing the same memory with input.
6326+
6327+
Examples:
6328+
.. code-block:: python
6329+
6330+
>>> import paddle
6331+
>>> x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
6332+
>>> y = paddle.as_complex(x)
6333+
>>> print(y)
6334+
Tensor(shape=[2, 3], dtype=complex64, place=Place(cpu), stop_gradient=True,
6335+
[[1j , (2+3j) , (4+5j) ],
6336+
[(6+7j) , (8+9j) , (10+11j)]])
6337+
"""
6338+
6339+
return as_complex(x=input)
6340+
6341+
6342+
def view_as_real(input: Tensor) -> Tensor:
6343+
"""Return a real tensor that is a view of the input complex tensor.
6344+
6345+
The data type of the input tensor is 'complex64' or 'complex128', and the data
6346+
type of the returned tensor is 'float32' or 'float64', respectively.
6347+
6348+
When the shape of the input tensor is ``(*, )``, (``*`` means arbitrary shape),
6349+
the shape of the output tensor is ``(*, 2)``, i.e. the shape of the output is
6350+
the shape of the input appended by an extra ``2``.
6351+
6352+
The real tensor is a view of the input complex tensor, meaning that it shares the same memory with complex tensor.
6353+
6354+
Args:
6355+
input (Tensor): The input tensor. Data type is 'complex64' or 'complex128'.
6356+
6357+
Returns:
6358+
Tensor, The output. Data type is 'float32' or 'float64', sharing the same memory with input.
6359+
6360+
Examples:
6361+
.. code-block:: python
6362+
6363+
>>> import paddle
6364+
>>> x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
6365+
>>> y = paddle.as_complex(x)
6366+
>>> z = paddle.as_real(y)
6367+
>>> print(z)
6368+
Tensor(shape=[2, 3, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
6369+
[[[0. , 1. ],
6370+
[2. , 3. ],
6371+
[4. , 5. ]],
6372+
[[6. , 7. ],
6373+
[8. , 9. ],
6374+
[10., 11.]]])
6375+
"""
6376+
return as_real(x=input)
6377+
6378+
63016379
def repeat_interleave(
63026380
x: Tensor,
63036381
repeats: int | Tensor,

python/paddle/tensor/math.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from paddle.base.libpaddle import DataType
2626
from paddle.common_ops_import import VarDesc, dygraph_utils
2727
from paddle.pir import Value
28-
from paddle.utils.decorator_utils import ParamAliasDecorator
28+
from paddle.utils.decorator_utils import ParamAliasDecorator, param_two_alias
2929
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only
3030

3131
from ..base.data_feeder import (
@@ -4963,7 +4963,7 @@ def isnan(x: Tensor, name: str | None = None) -> Tensor:
49634963
return out
49644964

49654965

4966-
@ParamAliasDecorator({"x": ["input"], "axis": ["dim"]})
4966+
@param_two_alias(["x", "input"], ["axis", "dim"])
49674967
def prod(
49684968
x: Tensor,
49694969
axis: int | Sequence[int] | None = None,
@@ -6628,6 +6628,7 @@ def lcm_(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
66286628
return out
66296629

66306630

6631+
@ParamAliasDecorator({"x": ["input"], "axis": ["dim"]})
66316632
def diff(
66326633
x: Tensor,
66336634
n: int = 1,

0 commit comments

Comments
 (0)