Skip to content

Commit

Permalink
2.0rc api add all any (#28199)
Browse files Browse the repository at this point in the history
* reduce trt warning message (#28011)

add paddle.enable_static() on sample code

alias recude_all-->all, reduce_any-->any

add import reduce_all and reduce_any in python/paddle/tensor/math.py

import all and any in python/paddle/tensor/__init__.py

remove all and any OP in python/paddle/tensor/logic.py, add all and any OP in python/paddle/tensor/math.py

fix import error

remove TestAllAPI temporary

* fix doc of recdue_all and reduce_any, test=document_fix

* fix typo

* fix unittest for all and any API

Co-authored-by: Pei Yang <peiyang@baidu.com>
  • Loading branch information
cnn and cryoco authored Oct 22, 2020
1 parent 4ccc171 commit 97227e6
Show file tree
Hide file tree
Showing 6 changed files with 381 additions and 22 deletions.
4 changes: 2 additions & 2 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,6 @@
from .tensor.logic import logical_or #DEFINE_ALIAS
from .tensor.logic import logical_xor #DEFINE_ALIAS
from .tensor.logic import not_equal #DEFINE_ALIAS
# from .tensor.logic import reduce_all #DEFINE_ALIAS
# from .tensor.logic import reduce_any #DEFINE_ALIAS
from .tensor.logic import allclose #DEFINE_ALIAS
from .tensor.logic import equal_all #DEFINE_ALIAS
# from .tensor.logic import isnan #DEFINE_ALIAS
Expand Down Expand Up @@ -161,6 +159,8 @@
# from .tensor.math import reduce_min #DEFINE_ALIAS
# from .tensor.math import reduce_prod #DEFINE_ALIAS
# from .tensor.math import reduce_sum #DEFINE_ALIAS
from .tensor.math import all #DEFINE_ALIAS
from .tensor.math import any #DEFINE_ALIAS
from .tensor.math import round #DEFINE_ALIAS
from .tensor.math import rsqrt #DEFINE_ALIAS
from .tensor.math import scale #DEFINE_ALIAS
Expand Down
79 changes: 62 additions & 17 deletions python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,8 @@ def fc(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
# when input is single tensor
data = fluid.data(name="data", shape=[-1, 32], dtype="float32")
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
Expand Down Expand Up @@ -468,6 +470,9 @@ def embedding(input,

import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()

data = fluid.data(name='x', shape=[None, 1], dtype='int64')

# example 1
Expand Down Expand Up @@ -731,6 +736,8 @@ def linear_chain_crf(input, label, param_attr=None, length=None):

import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()

#define net structure, using LodTensor
train_program = fluid.Program()
Expand Down Expand Up @@ -855,6 +862,8 @@ def crf_decoding(input, param_attr, label=None, length=None):
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()

# LoDTensor-based example
num_labels = 10
Expand Down Expand Up @@ -1458,6 +1467,9 @@ def conv2d(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()

data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
"""
Expand Down Expand Up @@ -1728,6 +1740,8 @@ def conv3d(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
conv3d = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu")
"""
Expand Down Expand Up @@ -2377,6 +2391,7 @@ def adaptive_pool2d(input,
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle
paddle.enable_static()
data = paddle.rand(shape=[1,3,32,32])
pool_out = paddle.fluid.layers.adaptive_pool2d(
input=data,
Expand Down Expand Up @@ -2531,6 +2546,7 @@ def adaptive_pool3d(input,
#

import paddle
paddle.enable_static()
data = paddle.rand(shape=[1,3,32,32,32])
pool_out = paddle.fluid.layers.adaptive_pool3d(
input=data,
Expand Down Expand Up @@ -2726,6 +2742,8 @@ def batch_norm(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.batch_norm(input=hidden1)
Expand All @@ -2735,6 +2753,8 @@ def batch_norm(input,
# batch_norm with momentum as Variable
import paddle.fluid as fluid
import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler
import paddle
paddle.enable_static()

def get_decay_momentum(momentum_init, decay_steps, decay_rate):
global_step = lr_scheduler._decay_step_counter()
Expand Down Expand Up @@ -3134,6 +3154,8 @@ def instance_norm(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.instance_norm(input=hidden1)
Expand Down Expand Up @@ -3269,6 +3291,7 @@ def data_norm(input,
.. code-block:: python

import paddle
paddle.enable_static()

x = paddle.randn(shape=[32,100])
hidden2 = paddle.static.nn.data_norm(input=x)
Expand Down Expand Up @@ -3451,6 +3474,8 @@ def layer_norm(input,

import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[-1, 32, 32], dtype='float32')
hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1)
place = fluid.CPUPlace()
Expand Down Expand Up @@ -3566,6 +3591,9 @@ def group_norm(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()

data = fluid.data(name='data', shape=[None, 8, 32, 32], dtype='float32')
x = fluid.layers.group_norm(input=data, groups=4)
"""
Expand Down Expand Up @@ -3887,6 +3915,8 @@ def conv2d_transpose(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3)
"""
Expand Down Expand Up @@ -4177,6 +4207,8 @@ def conv3d_transpose(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
conv3d_transpose = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3)
"""
Expand Down Expand Up @@ -4659,7 +4691,7 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.

Args:
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
input (Tensor): the input tensor, it's data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
Expand All @@ -4672,27 +4704,28 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
will be named automatically. The default value is None.

Returns:
Variable, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
Tensor, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.

Examples:
.. code-block:: python

import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np

# x is a bool Tensor variable with following elements:
# [[True, False]
# [True, True]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
x = layers.cast(x, 'bool')
x = paddle.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
x = paddle.cast(x, 'bool')

out = layers.reduce_all(x) # False
out = layers.reduce_all(x, dim=0) # [True, False]
out = layers.reduce_all(x, dim=-1) # [False, True]
out = paddle.reduce_all(x) # False
out = paddle.reduce_all(x, dim=0) # [True, False]
out = paddle.reduce_all(x, dim=-1) # [False, True]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)

out = layers.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
out = paddle.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)

"""
Expand All @@ -4719,7 +4752,7 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.

Args:
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
input (Tensor): the input tensor, it's data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
Expand All @@ -4728,30 +4761,31 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Variable, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
Tensor, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.

Examples:
.. code-block:: python

import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np

# x is a bool Tensor variable with following elements:
# [[True, False]
# [False, False]]
x = layers.assign(np.array([[1, 0], [0, 0]], dtype='int32'))
x = layers.cast(x, 'bool')
x = paddle.assign(np.array([[1, 0], [0, 0]], dtype='int32'))
x = paddle.cast(x, 'bool')

out = layers.reduce_any(x) # True
out = layers.reduce_any(x, dim=0) # [True, False]
out = layers.reduce_any(x, dim=-1) # [True, False]
out = paddle.reduce_any(x) # True
out = paddle.reduce_any(x, dim=0) # [True, False]
out = paddle.reduce_any(x, dim=-1) # [True, False]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)

out = layers.reduce_any(x, dim=1,
out = paddle.reduce_any(x, dim=1,
keep_dim=True) # [[True], [False]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)

Expand Down Expand Up @@ -5613,6 +5647,8 @@ def im2sequence(input,
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 32, 32],
dtype='float32')
output = fluid.layers.im2sequence(
Expand Down Expand Up @@ -5669,6 +5705,8 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
Examples:
>>> # for LodTensor inputs
>>> import paddle.fluid as fluid
>>> import paddle
>>> paddle.enable_static()
>>> x = fluid.data(name='x', shape=[9, 16],
>>> dtype='float32', lod_level=1)
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
Expand Down Expand Up @@ -5982,6 +6020,8 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
global_step = fluid.layers.autoincreased_step_counter(
counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
"""
Expand Down Expand Up @@ -9730,6 +9770,8 @@ def prelu(x, mode, param_attr=None, name=None):
.. code-block:: python

import paddle.fluid as fluid
import paddle
paddle.enable_static()
from paddle.fluid.param_attr import ParamAttr
x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32")
mode = 'channel'
Expand Down Expand Up @@ -14307,6 +14349,9 @@ def deformable_conv(input,
#deformable conv v2:

import paddle.fluid as fluid
import paddle
paddle.enable_static()

C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
Expand Down
Loading

2 comments on commit 97227e6

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

Please sign in to comment.