Skip to content

Commit

Permalink
[xdoctest][task 376~381] reformat example code with google style (#58173
Browse files Browse the repository at this point in the history
)

* xdoctest 365-370

* fix
  • Loading branch information
ooooo-create authored Oct 18, 2023
1 parent 7a73d45 commit 9690055
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 26 deletions.
23 changes: 11 additions & 12 deletions python/paddle/distribution/bernoulli.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,7 @@ def rsample(self, shape, temperature=1.0):
.. code-block:: python
>>> import paddle
>>> paddle.seed(1)
>>> from paddle.distribution import Bernoulli
>>> rv = Bernoulli(paddle.full((1), 0.3))
Expand All @@ -231,28 +232,26 @@ def rsample(self, shape, temperature=1.0):
[100, 2, 2]
>>> # `rsample` has to be followed by a `sigmoid`
>>> # doctest: +SKIP
>>> rv = Bernoulli(0.3)
>>> rsample = rv.rsample([3, ])
>>> rsample_sigmoid = paddle.nn.functional.sigmoid(rsample)
>>> print(rsample, rsample_sigmoid)
Tensor(shape=[3, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[-0.88315082],
[-0.62347704],
[-0.31513220]])
Tensor(shape=[3, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.29252526],
[0.34899110],
[0.42186251]])
>>> print(rsample)
Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[-1.46112013, -0.01239836, -1.32765460])
>>> print(rsample_sigmoid)
Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.18829606, 0.49690047, 0.20954758])
>>> # The smaller the `temperature`, the distribution of `rsample` closer to `sample`, with `probs` of 0.3.
>>> print(paddle.nn.functional.sigmoid(rv.rsample([1000, ], temperature=1.0)).sum())
>>> # doctest: +SKIP('output will be different')
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
361.06829834)
365.63122559)
>>> # doctest: -SKIP
>>> print(paddle.nn.functional.sigmoid(rv.rsample([1000, ], temperature=0.1)).sum())
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
288.66418457)
320.15057373)
"""
name = self.name + '_rsample'
if not in_dynamic_mode():
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/distribution/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,12 @@ class Categorical(distribution.Distribution):
>>> cat = Categorical(x)
>>> cat2 = Categorical(y)
>>> # doctest: +SKIP
>>> paddle.seed(1000) # on CPU device
>>> print(cat.sample([2,3]))
Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 1, 5],
[3, 4, 5]])
>>> # doctest: -SKIP
>>> print(cat.entropy())
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.77528250)
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/hapi/dynamic_flops.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ def flops(net, input_size, custom_ops=None, print_detail=False):
... [1, 1, 28, 28],
... custom_ops= {nn.LeakyReLU: count_leaky_relu},
... print_detail=True)
>>> # doctest: +SKIP
>>> print(FLOPs)
<class 'paddle.nn.layer.conv.Conv2D'>'s flops has been counted
<class 'paddle.nn.layer.activation.ReLU'>'s flops has been counted
Expand All @@ -106,7 +105,6 @@ def flops(net, input_size, custom_ops=None, print_detail=False):
+--------------+-----------------+-----------------+--------+--------+
Total Flops: 347560 Total Params: 61610
347560
>>> # doctest: -SKIP
"""
if isinstance(net, nn.Layer):
# If net is a dy2stat model, net.forward is StaticFunction instance,
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/hapi/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2399,7 +2399,6 @@ def summary(self, input_size=None, dtype=None):
>>> optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
>>> model.prepare(optim, paddle.nn.CrossEntropyLoss())
>>> params_info = model.summary()
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
Expand All @@ -2424,7 +2423,6 @@ def summary(self, input_size=None, dtype=None):
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
"""
assert (
Expand Down
8 changes: 0 additions & 8 deletions python/paddle/hapi/model_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ def summary(net, input_size=None, dtypes=None, input=None):
>>> lenet = LeNet()
>>> params_info = paddle.summary(lenet, (1, 1, 28, 28))
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
Expand All @@ -103,7 +102,6 @@ def summary(net, input_size=None, dtypes=None, input=None):
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
>>> # multi input demo
>>> class LeNetMultiInput(LeNet):
... def forward(self, inputs, y):
Expand All @@ -119,7 +117,6 @@ def summary(net, input_size=None, dtypes=None, input=None):
>>> params_info = paddle.summary(lenet_multi_input,
... [(1, 1, 28, 28), (1, 400)],
... dtypes=['float32', 'float32'])
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
Expand All @@ -144,7 +141,6 @@ def summary(net, input_size=None, dtypes=None, input=None):
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
>>> # list input demo
>>> class LeNetListInput(LeNet):
... def forward(self, inputs):
Expand All @@ -158,7 +154,6 @@ def summary(net, input_size=None, dtypes=None, input=None):
>>> lenet_list_input = LeNetListInput()
>>> input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])]
>>> params_info = paddle.summary(lenet_list_input, input=input_data)
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
Expand All @@ -183,7 +178,6 @@ def summary(net, input_size=None, dtypes=None, input=None):
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
>>> # dict input demo
>>> class LeNetDictInput(LeNet):
... def forward(self, inputs):
Expand All @@ -198,7 +192,6 @@ def summary(net, input_size=None, dtypes=None, input=None):
>>> input_data = {'x1': paddle.rand([1, 1, 28, 28]),
... 'x2': paddle.rand([1, 400])}
>>> params_info = paddle.summary(lenet_dict_input, input=input_data)
>>> # doctest: +SKIP
>>> print(params_info)
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
Expand All @@ -223,7 +216,6 @@ def summary(net, input_size=None, dtypes=None, input=None):
Estimated Total Size (MB): 0.35
---------------------------------------------------------------------------
{'total_params': 61610, 'trainable_params': 61610}
>>> # doctest: -SKIP
"""
if input_size is None and input is None:
Expand Down

0 comments on commit 9690055

Please sign in to comment.