Skip to content

Commit

Permalink
[xdoctest] reformat example code with google style in No.21-30 (#55849)
Browse files Browse the repository at this point in the history
* [Doctest]fix No.21, test=docs_preview

* Revert "[Doctest]fix No.21, test=docs_preview"

This reverts commit 76bcdb2.

* [Doctest]fix No.21, test=docs_preview

* fix bugs,test=docs_preview

* [Doctest]fix No.22-24,26,27, test=docs_preview

* update fix

* with pre-commit, test=docs_preview

* fix seed, test=docs_preview

* fix error, test=docs_preview

* fix seed, test=docs_preview

* fix seed, test=docs_preview
  • Loading branch information
ooooo-create authored Aug 2, 2023
1 parent 5e96126 commit 5d26d79
Show file tree
Hide file tree
Showing 10 changed files with 554 additions and 523 deletions.
383 changes: 194 additions & 189 deletions python/paddle/autograd/py_layer.py

Large diffs are not rendered by default.

104 changes: 52 additions & 52 deletions python/paddle/autograd/saved_tensors_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,58 +45,58 @@ class saved_tensors_hooks:
Examples:
.. code-block:: python
# Example1
import paddle
def pack_hook(x):
print("Packing", x)
return x.numpy()
def unpack_hook(x):
print("UnPacking", x)
return paddle.to_tensor(x)
a = paddle.ones([3,3])
b = paddle.ones([3,3]) * 2
a.stop_gradient = False
b.stop_gradient = False
with paddle.autograd.saved_tensors_hooks(pack_hook, unpack_hook):
y = paddle.multiply(a, b)
y.sum().backward()
# Example2
import paddle
from paddle.autograd import PyLayer
class cus_multiply(PyLayer):
@staticmethod
def forward(ctx, a, b):
y = paddle.multiply(a, b)
ctx.save_for_backward(a, b)
return y
@staticmethod
def backward(ctx, dy):
a,b = ctx.saved_tensor()
grad_a = dy * a
grad_b = dy * b
return grad_a, grad_b
def pack_hook(x):
print("Packing", x)
return x.numpy()
def unpack_hook(x):
print("UnPacking", x)
return paddle.to_tensor(x)
a = paddle.ones([3,3])
b = paddle.ones([3,3]) * 2
a.stop_gradient = False
b.stop_gradient = False
with paddle.autograd.saved_tensors_hooks(pack_hook, unpack_hook):
y = cus_multiply.apply(a, b)
y.sum().backward()
>>> # Example1
>>> import paddle
>>> def pack_hook(x):
... print("Packing", x)
... return x.numpy()
>>> def unpack_hook(x):
... print("UnPacking", x)
... return paddle.to_tensor(x)
>>> a = paddle.ones([3,3])
>>> b = paddle.ones([3,3]) * 2
>>> a.stop_gradient = False
>>> b.stop_gradient = False
>>> with paddle.autograd.saved_tensors_hooks(pack_hook, unpack_hook):
... y = paddle.multiply(a, b)
>>> y.sum().backward()
>>> # Example2
>>> import paddle
>>> from paddle.autograd import PyLayer
>>> class cus_multiply(PyLayer):
... @staticmethod
... def forward(ctx, a, b):
... y = paddle.multiply(a, b)
... ctx.save_for_backward(a, b)
... return y
...
... @staticmethod
... def backward(ctx, dy):
... a,b = ctx.saved_tensor()
... grad_a = dy * a
... grad_b = dy * b
... return grad_a, grad_b
>>> def pack_hook(x):
... print("Packing", x)
... return x.numpy()
>>> def unpack_hook(x):
... print("UnPacking", x)
... return paddle.to_tensor(x)
>>> a = paddle.ones([3,3])
>>> b = paddle.ones([3,3]) * 2
>>> a.stop_gradient = False
>>> b.stop_gradient = False
>>> with paddle.autograd.saved_tensors_hooks(pack_hook, unpack_hook):
... y = cus_multiply.apply(a, b)
>>> y.sum().backward()
"""

def __init__(self, pack_hook, unpack_hook):
Expand Down
52 changes: 32 additions & 20 deletions python/paddle/framework/dtype.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,15 +58,19 @@ def iinfo(dtype):
Examples:
.. code-block:: python
import paddle
iinfo_uint8 = paddle.iinfo(paddle.uint8)
print(iinfo_uint8)
# paddle.iinfo(min=0, max=255, bits=8, dtype=uint8)
print(iinfo_uint8.min) # 0
print(iinfo_uint8.max) # 255
print(iinfo_uint8.bits) # 8
print(iinfo_uint8.dtype) # uint8
>>> import paddle
>>> iinfo_uint8 = paddle.iinfo(paddle.uint8)
>>> print(iinfo_uint8)
paddle.iinfo(min=0, max=255, bits=8, dtype=uint8)
>>> print(iinfo_uint8.min)
0
>>> print(iinfo_uint8.max)
255
>>> print(iinfo_uint8.bits)
8
>>> print(iinfo_uint8.dtype)
uint8
"""
return core_iinfo(dtype)
Expand Down Expand Up @@ -98,17 +102,25 @@ def finfo(dtype):
Examples:
.. code-block:: python
import paddle
finfo_float32 = paddle.finfo(paddle.float32)
print(finfo_float32.min) # -3.40282e+38
print(finfo_float32.max) # 3.40282e+38
print(finfo_float32.eps) # 1.19209e-07
print(finfo_float32.resolution) # 1e-06
print(finfo_float32.smallest_normal) # 1.17549e-38
print(finfo_float32.tiny) # 1.17549e-38
print(finfo_float32.bits) # 32
print(finfo_float32.dtype) # float32
>>> import paddle
>>> finfo_float32 = paddle.finfo(paddle.float32)
>>> print(finfo_float32.min)
-3.4028234663852886e+38
>>> print(finfo_float32.max)
3.4028234663852886e+38
>>> print(finfo_float32.eps)
1.1920928955078125e-07
>>> print(finfo_float32.resolution)
1e-06
>>> print(finfo_float32.smallest_normal)
1.1754943508222875e-38
>>> print(finfo_float32.tiny)
1.1754943508222875e-38
>>> print(finfo_float32.bits)
32
>>> print(finfo_float32.dtype)
float32
"""
return core_finfo(dtype)
8 changes: 4 additions & 4 deletions python/paddle/framework/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ def set_default_dtype(d):
Examples:
.. code-block:: python
import paddle
paddle.set_default_dtype("float32")
>>> import paddle
>>> paddle.set_default_dtype("float32")
"""
if isinstance(d, type):
Expand Down Expand Up @@ -76,7 +76,7 @@ def get_default_dtype():
Examples:
.. code-block:: python
import paddle
paddle.get_default_dtype()
>>> import paddle
>>> paddle.get_default_dtype()
"""
return LayerHelperBase.get_default_dtype()
Loading

0 comments on commit 5d26d79

Please sign in to comment.