diff --git a/python/paddle/base/dygraph/math_op_patch.py b/python/paddle/base/dygraph/math_op_patch.py index 5972b545f93e23..172f73bf7f531f 100644 --- a/python/paddle/base/dygraph/math_op_patch.py +++ b/python/paddle/base/dygraph/math_op_patch.py @@ -150,7 +150,7 @@ def _index_(var): return int(np.array(var)) @property - def _ndim_(var): + def _ndim(var): return len(var.shape) def ndimension(var): @@ -183,7 +183,7 @@ def _T_(var): ('astype', astype), ('dim', dim), ('ndimension', ndimension), - ('ndim', _ndim_), + ('ndim', _ndim), ('size', _size_), ('T', _T_), # for logical compare diff --git a/python/paddle/base/layers/math_op_patch.py b/python/paddle/base/layers/math_op_patch.py index f2b1ac7c6d04d1..1f070882758b92 100644 --- a/python/paddle/base/layers/math_op_patch.py +++ b/python/paddle/base/layers/math_op_patch.py @@ -355,7 +355,7 @@ def pop(self, *args): if self.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: raise TypeError( - "Only Variable with VarType.LOD_TENSOR_ARRAY support `append` method, but received type: {}".format( + "Only Variable with VarType.LOD_TENSOR_ARRAY support `pop` method, but received type: {}".format( self.type ) ) @@ -376,7 +376,7 @@ def _neg_(var): return _scalar_op_(var, -1.0, 0.0) @property - def _ndim_(self): + def _ndim(self): """ Returns the dimension of current Variable @@ -393,7 +393,7 @@ def _ndim_(self): >>> # create a static Variable >>> x = paddle.static.data(name='x', shape=[3, 2, 1]) >>> # print the dimension of the Variable - >>> print(x.ndim()) + >>> print(x.ndim) 3 """ return len(self.shape) @@ -627,7 +627,7 @@ def to_dense(var): ('pop', pop), ('dim', dim), ('ndimension', ndimension), - ('ndim', _ndim_), + ('ndim', _ndim), ( '__add__', _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_), diff --git a/python/paddle/pir/math_op_patch.py b/python/paddle/pir/math_op_patch.py index 2f52a5f8502c7a..1f9e0058b77523 100644 --- a/python/paddle/pir/math_op_patch.py +++ b/python/paddle/pir/math_op_patch.py @@ -13,12 +13,23 @@ # limitations under the License. +import warnings + from paddle.base.libpaddle import DataType from . import OpResult _already_patch_opresult = False +_supported_int_dtype_ = [ + DataType.BOOL, + DataType.UINT8, + DataType.INT8, + DataType.INT16, + DataType.INT32, + DataType.INT64, +] + def create_tensor_with_batchsize(ref_var, value, dtype): assert isinstance(ref_var, OpResult) @@ -54,14 +65,93 @@ def safe_get_dtype(var): raise ValueError("Cannot get data type from var") return dtype - _supported_int_dtype_ = [ - DataType.BOOL, - DataType.UINT8, - DataType.INT8, - DataType.INT16, - DataType.INT32, - DataType.INT64, - ] + def place(self): + """ + OpResult don't have 'place' interface in static graph mode + But this interface can greatly facilitate dy2static. + So we give a warnning here and return None. + """ + warnings.warn( + "OpResult do not have 'place' interface for pir graph mode, try not to use it. None will be returned." + ) + + @property + def _ndim(self): + """ + Returns the dimension of current OpResult + + Returns: + the dimension + + Examples: + .. code-block:: python + + >>> import paddle + + >>> paddle.enable_static() + + >>> # create a static OpResult + >>> x = paddle.static.data(name='x', shape=[3, 2, 1]) + >>> # print the dimension of the OpResult + >>> print(x.ndim) + 3 + """ + return len(self.shape) + + def ndimension(self): + """ + Returns the dimension of current OpResult + + Returns: + the dimension + + Examples: + .. code-block:: python + + >>> import paddle + + >>> paddle.enable_static() + + >>> # create a static OpResult + >>> x = paddle.static.data(name='x', shape=[3, 2, 1]) + >>> # print the dimension of the OpResult + >>> print(x.ndimension()) + 3 + """ + return len(self.shape) + + def dim(self): + """ + Returns the dimension of current OpResult + + Returns: + the dimension + + Examples: + .. code-block:: python + + >>> import paddle + + >>> paddle.enable_static() + + >>> # create a static OpResult + >>> x = paddle.static.data(name='x', shape=[3, 2, 1]) + >>> # print the dimension of the OpResult + >>> print(x.dim()) + 3 + """ + return len(self.shape) + + def _item(self): + """ + In order to be compatible with the item interface introduced by the dynamic graph, it does nothing but returns self. + It will check that the shape must be a 1-D tensor + """ + if len(self.shape) > 1: + raise TypeError( + f"Required input var should be 1-D OpResult, but received {self.shape}" + ) + return self def _scalar_div_(var, value): return paddle.scale(var, 1.0 / value, 0.0) @@ -204,6 +294,11 @@ def astype(self, dtype): import paddle opresult_methods = [ + ('place', place), + ('item', _item), + ('dim', dim), + ('ndimension', ndimension), + ('ndim', _ndim), ('astype', astype), ( '__div__', diff --git a/test/legacy_test/test_math_op_patch_pir.py b/test/legacy_test/test_math_op_patch_pir.py index e9d2ee096d7dd2..1a0254b66df52b 100644 --- a/test/legacy_test/test_math_op_patch_pir.py +++ b/test/legacy_test/test_math_op_patch_pir.py @@ -14,6 +14,7 @@ import inspect import unittest +import warnings import paddle @@ -21,6 +22,35 @@ class TestMathOpPatchesPir(unittest.TestCase): + def test_item(self): + with paddle.pir_utils.IrGuard(): + x = paddle.static.data(name='x', shape=[3, 2, 1]) + y = paddle.static.data( + name='y', + shape=[ + 3, + ], + ) + self.assertTrue(y.item() == y) + with self.assertRaises(TypeError): + x.item() + + def test_place(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with paddle.pir_utils.IrGuard(): + x = paddle.static.data(name='x', shape=[3, 2, 1]) + x.place() + self.assertTrue(len(w) == 1) + self.assertTrue("place" in str(w[-1].message)) + + def test_some_dim(self): + with paddle.pir_utils.IrGuard(): + x = paddle.static.data(name='x', shape=[3, 2, 1]) + self.assertEqual(x.dim(), 3) + self.assertEqual(x.ndimension(), 3) + self.assertEqual(x.ndim, 3) + def test_math_exists(self): with paddle.pir_utils.IrGuard(): a = paddle.static.data(name='a', shape=[1], dtype='float32')