Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[xdoctest][task cleanup 1-8] reformat example code with google style #57670

Merged
merged 4 commits into from
Sep 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
183 changes: 111 additions & 72 deletions paddle/fluid/pybind/eager_properties.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,14 @@ Tensor's name.
Examples:
.. code-block:: python

import paddle

x = paddle.to_tensor(1.)
print(x.name) # generated_tensor_0
x.name = 'test_tensor_name'
print(x.name) # test_tensor_name
>>> import paddle

>>> x = paddle.to_tensor(1.)
>>> print(x.name)
generated_tensor_0
>>> x.name = 'test_tensor_name'
>>> print(x.name)
test_tensor_name
)DOC");

PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
Expand Down Expand Up @@ -84,10 +86,11 @@ Tensor's type.
Examples:
.. code-block:: python

import paddle
>>> import paddle

x = paddle.to_tensor(1.)
print(x.type) # VarType.LOD_TENSOR
>>> x = paddle.to_tensor(1.)
>>> print(x.type)
VarType.LOD_TENSOR
)DOC");

PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
Expand Down Expand Up @@ -123,20 +126,27 @@ For the Tensor whose stop_gradient is ``False`` , it will be leaf Tensor too if
Examples:
.. code-block:: python

import paddle
>>> import paddle

>>> x = paddle.to_tensor(1.)
>>> print(x.is_leaf)
True

>>> x = paddle.to_tensor(1., stop_gradient=True)
>>> y = x + 1
>>> print(x.is_leaf)
True

x = paddle.to_tensor(1.)
print(x.is_leaf) # True
>>> print(y.is_leaf)
True

x = paddle.to_tensor(1., stop_gradient=True)
y = x + 1
print(x.is_leaf) # True
print(y.is_leaf) # True
>>> x = paddle.to_tensor(1., stop_gradient=False)
>>> y = x + 1
>>> print(x.is_leaf)
True

x = paddle.to_tensor(1., stop_gradient=False)
y = x + 1
print(x.is_leaf) # True
print(y.is_leaf) # False
>>> print(y.is_leaf)
False
)DOC");

PyObject* tensor_properties_is_leaf(TensorObject* self, void* closure) {
Expand Down Expand Up @@ -165,12 +175,15 @@ Tensor's stop_gradient.
Examples:
.. code-block:: python

import paddle
>>> import paddle

x = paddle.to_tensor(1.)
print(x.stop_gradient) # True
x.stop_gradient = False
print(x.stop_gradient) # False
>>> x = paddle.to_tensor(1.)
>>> print(x.stop_gradient)
True

>>> x.stop_gradient = False
>>> print(x.stop_gradient)
False
)DOC");

PyObject* tensor_properties_get_stop_gradient(TensorObject* self,
Expand All @@ -192,14 +205,25 @@ Tensor's self.
Examples:
.. code-block:: python

import paddle
>>> import paddle

>>> x = paddle.to_tensor(1.)
>>> print(x)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.)

x = paddle.to_tensor(1.)
print(x)
print(x.data)
x.data = paddle.to_tensor(2.)
print(x)
print(x.data)
>>> print(x.data)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.)

>>> x.data = paddle.to_tensor(2.)
>>> print(x)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
2.)

>>> print(x.data)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
2.)
)DOC");
PyObject* tensor_properties_get_data(TensorObject* self, void* closure) {
EAGER_TRY
Expand Down Expand Up @@ -234,14 +258,19 @@ Tensor's grad Tensor.
Examples:
.. code-block:: python

import paddle
>>> import paddle

x = paddle.to_tensor(1.0, stop_gradient=False)
y = x**2
y.backward()
print(x.grad)
x.grad = paddle.to_tensor(3.0)
print(x.grad)
>>> x = paddle.to_tensor(1.0, stop_gradient=False)
>>> y = x**2
>>> y.backward()
>>> print(x.grad)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False,
2.)

>>> x.grad = paddle.to_tensor(3.0)
>>> print(x.grad)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False,
3.)
)DOC");
PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_TRY
Expand Down Expand Up @@ -320,12 +349,15 @@ Tensor's persistable.
Examples:
.. code-block:: python

import paddle
>>> import paddle

>>> x = paddle.to_tensor(1.0, stop_gradient=False)
>>> print(x.persistable)
False

x = paddle.to_tensor(1.0, stop_gradient=False)
print(x.persistable) # False
x. persistable = True
print(x.persistable) # True
>>> x. persistable = True
>>> print(x.persistable)
True
)DOC");

PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) {
Expand Down Expand Up @@ -356,17 +388,18 @@ Get dist_attr property from shard tensor.
Examples:
.. code-block:: python

import paddle
import paddle.distributed as dist
>>> # doctest: +REQUIRES(env:DISTRIBUTED)
>>> import paddle
>>> import paddle.distributed as dist

mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"])
dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y'])
>>> mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"])
>>> dist_attr = dist.DistAttr(mesh=mesh, sharding_specs=['x', 'y'])

a = paddle.to_tensor([[1,2,3],
[5,6,7]])
d_tensor = dist.shard_tensor(a, dist_attr=dist_attr)
>>> a = paddle.to_tensor([[1,2,3],
... [5,6,7]])
>>> d_tensor = dist.shard_tensor(a, dist_attr=dist_attr)

print(d_tensor.dist_attr)
>>> print(d_tensor.dist_attr)

)DOC");

Expand Down Expand Up @@ -421,10 +454,11 @@ Tensor's shape.
Examples:
.. code-block:: python

import paddle
>>> import paddle

x = paddle.to_tensor(1.0, stop_gradient=False)
print(x.shape)
>>> x = paddle.to_tensor(1.0, stop_gradient=False)
>>> print(x.shape)
[]
)DOC");

PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
Expand Down Expand Up @@ -507,11 +541,12 @@ Tensor's strides.
Examples:
.. code-block:: python

import paddle
>>> import paddle

x = paddle.to_tensor([1, 2, 3])
y = x[1]
print(y.strides)
>>> x = paddle.to_tensor([1, 2, 3])
>>> y = x[1]
>>> print(y.strides)
[]
)DOC");

PyObject* tensor_properties_get_strides(TensorObject* self, void* closure) {
Expand Down Expand Up @@ -544,11 +579,12 @@ The address of the first element relative to the offset of the video memory.
Examples:
.. code-block:: python

import paddle
>>> import paddle

x = paddle.to_tensor([1, 2, 3])
y = x[1]
print(y.offset)
>>> x = paddle.to_tensor([1, 2, 3])
>>> y = x[1]
>>> print(y.offset)
8
)DOC");
PyObject* tensor_properties_get_offset(TensorObject* self, void* closure) {
EAGER_TRY
Expand Down Expand Up @@ -579,10 +615,11 @@ Tensor's memory layout.
Examples:
.. code-block:: python

import paddle
>>> import paddle

x = paddle.to_tensor([1, 2, 3])
print(x.layout)
>>> x = paddle.to_tensor([1, 2, 3])
>>> print(x.layout)
NCHW
)DOC");
PyObject* tensor_properties_get_layout(TensorObject* self, void* closure) {
EAGER_TRY
Expand Down Expand Up @@ -613,10 +650,11 @@ The device Tensor's memory locate.
Examples:
.. code-block:: python

import paddle
>>> import paddle

x = paddle.to_tensor([1, 2, 3])
print(x.place)
>>> x = paddle.to_tensor([1, 2, 3])
>>> print(x.place)
Place(cpu)
)DOC");
PyObject* tensor_properties_get_place(TensorObject* self, void* closure) {
EAGER_TRY
Expand All @@ -643,10 +681,11 @@ Tensor's data type.
Examples:
.. code-block:: python

import paddle
>>> import paddle

x = paddle.to_tensor([1, 2, 3])
print(x.dtype)
>>> x = paddle.to_tensor([1, 2, 3])
>>> print(x.dtype)
paddle.int64
)DOC");
PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) {
EAGER_TRY
Expand Down
9 changes: 4 additions & 5 deletions python/paddle/base/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,9 @@ def set_fea_eval(self, record_candidate_size, fea_eval=True):
Examples:
.. code-block:: python

import paddle.base as base
dataset = base.DatasetFactory().create_dataset("InMemoryDataset")
dataset.set_fea_eval(1000000, True)
>>> import paddle.base as base
>>> dataset = base.DatasetFactory().create_dataset("InMemoryDataset")
>>> dataset.set_fea_eval(1000000, True)

"""
if fea_eval:
Expand Down Expand Up @@ -1089,7 +1089,6 @@ def set_graph_config(self, config):
Examples:
.. code-block:: python

>>> # doctest: +SKIP
>>> import paddle.base as base
>>> from paddle.incubate.distributed.fleet.parameter_server.pslib import fleet
>>> dataset = base.DatasetFactory().create_dataset("InMemoryDataset")
Expand Down Expand Up @@ -1441,7 +1440,7 @@ def slots_shuffle(self, slots):
.. code-block:: python

>>> import paddle.base as base
>>> dataset = base.DatasetFactory().create_dataset("InMemoryDataset")
>>> dataset = base.DatasetFactory().create_dataset("BoxPSDataset")
>>> dataset.set_merge_by_lineid()
>>> #suppose there is a slot 0
>>> dataset.slots_shuffle(['0'])
Expand Down
Loading