Skip to content

Commit

Permalink
[Hackathon 5th No.49][pir] add some method property - Part 2 (PaddleP…
Browse files Browse the repository at this point in the history
  • Loading branch information
gouzil authored and hitywt committed Oct 24, 2023
1 parent ba5b95f commit 7ec93f4
Show file tree
Hide file tree
Showing 4 changed files with 139 additions and 14 deletions.
4 changes: 2 additions & 2 deletions python/paddle/base/dygraph/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def _index_(var):
return int(np.array(var))

@property
def _ndim_(var):
def _ndim(var):
return len(var.shape)

def ndimension(var):
Expand Down Expand Up @@ -183,7 +183,7 @@ def _T_(var):
('astype', astype),
('dim', dim),
('ndimension', ndimension),
('ndim', _ndim_),
('ndim', _ndim),
('size', _size_),
('T', _T_),
# for logical compare
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/base/layers/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ def pop(self, *args):

if self.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
raise TypeError(
"Only Variable with VarType.LOD_TENSOR_ARRAY support `append` method, but received type: {}".format(
"Only Variable with VarType.LOD_TENSOR_ARRAY support `pop` method, but received type: {}".format(
self.type
)
)
Expand All @@ -376,7 +376,7 @@ def _neg_(var):
return _scalar_op_(var, -1.0, 0.0)

@property
def _ndim_(self):
def _ndim(self):
"""
Returns the dimension of current Variable
Expand All @@ -393,7 +393,7 @@ def _ndim_(self):
>>> # create a static Variable
>>> x = paddle.static.data(name='x', shape=[3, 2, 1])
>>> # print the dimension of the Variable
>>> print(x.ndim())
>>> print(x.ndim)
3
"""
return len(self.shape)
Expand Down Expand Up @@ -627,7 +627,7 @@ def to_dense(var):
('pop', pop),
('dim', dim),
('ndimension', ndimension),
('ndim', _ndim_),
('ndim', _ndim),
(
'__add__',
_binary_creator_('__add__', 'elementwise_add', False, _scalar_add_),
Expand Down
111 changes: 103 additions & 8 deletions python/paddle/pir/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,23 @@
# limitations under the License.


import warnings

from paddle.base.libpaddle import DataType

from . import OpResult

_already_patch_opresult = False

_supported_int_dtype_ = [
DataType.BOOL,
DataType.UINT8,
DataType.INT8,
DataType.INT16,
DataType.INT32,
DataType.INT64,
]


def create_tensor_with_batchsize(ref_var, value, dtype):
assert isinstance(ref_var, OpResult)
Expand Down Expand Up @@ -54,14 +65,93 @@ def safe_get_dtype(var):
raise ValueError("Cannot get data type from var")
return dtype

_supported_int_dtype_ = [
DataType.BOOL,
DataType.UINT8,
DataType.INT8,
DataType.INT16,
DataType.INT32,
DataType.INT64,
]
def place(self):
"""
OpResult don't have 'place' interface in static graph mode
But this interface can greatly facilitate dy2static.
So we give a warnning here and return None.
"""
warnings.warn(
"OpResult do not have 'place' interface for pir graph mode, try not to use it. None will be returned."
)

@property
def _ndim(self):
"""
Returns the dimension of current OpResult
Returns:
the dimension
Examples:
.. code-block:: python
>>> import paddle
>>> paddle.enable_static()
>>> # create a static OpResult
>>> x = paddle.static.data(name='x', shape=[3, 2, 1])
>>> # print the dimension of the OpResult
>>> print(x.ndim)
3
"""
return len(self.shape)

def ndimension(self):
"""
Returns the dimension of current OpResult
Returns:
the dimension
Examples:
.. code-block:: python
>>> import paddle
>>> paddle.enable_static()
>>> # create a static OpResult
>>> x = paddle.static.data(name='x', shape=[3, 2, 1])
>>> # print the dimension of the OpResult
>>> print(x.ndimension())
3
"""
return len(self.shape)

def dim(self):
"""
Returns the dimension of current OpResult
Returns:
the dimension
Examples:
.. code-block:: python
>>> import paddle
>>> paddle.enable_static()
>>> # create a static OpResult
>>> x = paddle.static.data(name='x', shape=[3, 2, 1])
>>> # print the dimension of the OpResult
>>> print(x.dim())
3
"""
return len(self.shape)

def _item(self):
"""
In order to be compatible with the item interface introduced by the dynamic graph, it does nothing but returns self.
It will check that the shape must be a 1-D tensor
"""
if len(self.shape) > 1:
raise TypeError(
f"Required input var should be 1-D OpResult, but received {self.shape}"
)
return self

def _scalar_div_(var, value):
return paddle.scale(var, 1.0 / value, 0.0)
Expand Down Expand Up @@ -204,6 +294,11 @@ def astype(self, dtype):
import paddle

opresult_methods = [
('place', place),
('item', _item),
('dim', dim),
('ndimension', ndimension),
('ndim', _ndim),
('astype', astype),
(
'__div__',
Expand Down
30 changes: 30 additions & 0 deletions test/legacy_test/test_math_op_patch_pir.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,43 @@

import inspect
import unittest
import warnings

import paddle

paddle.enable_static()


class TestMathOpPatchesPir(unittest.TestCase):
def test_item(self):
with paddle.pir_utils.IrGuard():
x = paddle.static.data(name='x', shape=[3, 2, 1])
y = paddle.static.data(
name='y',
shape=[
3,
],
)
self.assertTrue(y.item() == y)
with self.assertRaises(TypeError):
x.item()

def test_place(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with paddle.pir_utils.IrGuard():
x = paddle.static.data(name='x', shape=[3, 2, 1])
x.place()
self.assertTrue(len(w) == 1)
self.assertTrue("place" in str(w[-1].message))

def test_some_dim(self):
with paddle.pir_utils.IrGuard():
x = paddle.static.data(name='x', shape=[3, 2, 1])
self.assertEqual(x.dim(), 3)
self.assertEqual(x.ndimension(), 3)
self.assertEqual(x.ndim, 3)

def test_math_exists(self):
with paddle.pir_utils.IrGuard():
a = paddle.static.data(name='a', shape=[1], dtype='float32')
Expand Down

0 comments on commit 7ec93f4

Please sign in to comment.