Skip to content

Commit

Permalink
fix: updated code examples.
Browse files Browse the repository at this point in the history
  • Loading branch information
PommesPeter committed Aug 9, 2023
1 parent 0399b39 commit 7f43f82
Show file tree
Hide file tree
Showing 3 changed files with 124 additions and 129 deletions.
65 changes: 32 additions & 33 deletions python/paddle/optimizer/adadelta.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,39 +70,38 @@ class Adadelta(Optimizer):
Examples:
.. code-block:: python
import paddle
inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adadelta = paddle.optimizer.Adadelta(learning_rate=0.1, parameters=linear.parameters(), weight_decay=0.01)
back = out.backward()
adadelta.step()
adadelta.clear_grad()
#Note that the learning_rate of linear_2 is 0.01.
linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear_1(inp)
out = linear_2(out)
loss = paddle.mean(out)
adadelta = paddle.optimizer.Adadelta(
learning_rate=0.1,
parameters=[{
'params': linear_1.parameters()
}, {
'params': linear_2.parameters(),
'weight_decay': 0.001,
'learning_rate': 0.1,
}],
weight_decay=0.01)
out.backward()
adadelta.step()
adadelta.clear_grad()
>>> import paddle
>>> paddle.seed(2023)
>>> inp = paddle.uniform([10, 10], dtype="float32", min=-0.1, max=0.1)
>>> linear = paddle.nn.Linear(10, 10)
>>> out = linear(inp)
>>> loss = paddle.mean(out)
>>> beta1 = paddle.to_tensor([0.9], dtype="float32")
>>> beta2 = paddle.to_tensor([0.99], dtype="float32")
>>> adadelta = paddle.optimizer.Adadelta(learning_rate=0.1, parameters=linear.parameters(), weight_decay=0.01)
>>> back = out.backward()
>>> adadelta.step()
>>> adadelta.clear_grad()
>>> # Note that the learning_rate of linear_2 is 0.01.
>>> linear_1 = paddle.nn.Linear(10, 10)
>>> linear_2 = paddle.nn.Linear(10, 10)
>>> inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
>>> out = linear_1(inp)
>>> out = linear_2(out)
>>> loss = paddle.mean(out)
>>> adadelta = paddle.optimizer.Adadelta(
... learning_rate=0.1,
... parameters=[{
... 'params': linear_1.parameters()
... }, {
... 'params': linear_2.parameters(),
... 'weight_decay': 0.001,
... 'learning_rate': 0.1,
... }],
... weight_decay=0.01)
>>> out.backward()
>>> adadelta.step()
>>> adadelta.clear_grad()
"""

Expand Down
63 changes: 31 additions & 32 deletions python/paddle/optimizer/adagrad.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,38 +70,37 @@ class Adagrad(Optimizer):
Examples:
.. code-block:: python
import paddle
inp = paddle.rand(shape=[10, 10])
linear = paddle.nn.Linear(10, 10)
out = linear(inp)
loss = paddle.mean(out)
adagrad = paddle.optimizer.Adagrad(learning_rate=0.1,
parameters=linear.parameters())
out.backward()
adagrad.step()
adagrad.clear_grad()
#Note that the learning_rate of linear_2 is 0.01.
linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear_1(inp)
out = linear_2(out)
loss = paddle.mean(out)
adagrad = paddle.optimizer.Adagrad(
learning_rate=0.1,
parameters=[{
'params': linear_1.parameters()
}, {
'params': linear_2.parameters(),
'weight_decay': 0.001,
'learning_rate': 0.1,
}],
weight_decay=0.01)
out.backward()
adagrad.step()
adagrad.clear_grad()
>>> import paddle
>>> paddle.seed(2023)
>>> inp = paddle.rand(shape=[10, 10])
>>> linear = paddle.nn.Linear(10, 10)
>>> out = linear(inp)
>>> loss = paddle.mean(out)
>>> adagrad = paddle.optimizer.Adagrad(learning_rate=0.1,
... parameters=linear.parameters())
>>> out.backward()
>>> adagrad.step()
>>> adagrad.clear_grad()
>>> # Note that the learning_rate of linear_2 is 0.01.
>>> linear_1 = paddle.nn.Linear(10, 10)
>>> linear_2 = paddle.nn.Linear(10, 10)
>>> inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
>>> out = linear_1(inp)
>>> out = linear_2(out)
>>> loss = paddle.mean(out)
>>> adagrad = paddle.optimizer.Adagrad(
... learning_rate=0.1,
... parameters=[{
... 'params': linear_1.parameters()
... }, {
... 'params': linear_2.parameters(),
... 'weight_decay': 0.001,
... 'learning_rate': 0.1,
... }],
... weight_decay=0.01)
>>> out.backward()
>>> adagrad.step()
>>> adagrad.clear_grad()
"""
_moment_acc_str = "moment"
Expand Down
125 changes: 61 additions & 64 deletions python/paddle/optimizer/adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,63 +98,60 @@ class Adam(Optimizer):
.. code-block:: python
:name: code-example1
import paddle
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand([10,10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters())
loss.backward()
adam.step()
adam.clear_grad()
>>> import paddle
>>> paddle.seed(2023)
>>> linear = paddle.nn.Linear(10, 10)
>>> inp = paddle.rand([10,10], dtype="float32")
>>> out = linear(inp)
>>> loss = paddle.mean(out)
>>> adam = paddle.optimizer.Adam(learning_rate=0.1,
... parameters=linear.parameters())
>>> loss.backward()
>>> adam.step()
>>> adam.clear_grad()
.. code-block:: python
:name: code-example2
# Adam with beta1/beta2 as Tensor and weight_decay as float
import paddle
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand([10,10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters(),
beta1=beta1,
beta2=beta2,
weight_decay=0.01)
loss.backward()
adam.step()
adam.clear_grad()
#Note that the learning_rate of linear_2 is 0.01.
linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear_1(inp)
out = linear_2(out)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(
learning_rate=0.1,
parameters=[{
'params': linear_1.parameters()
}, {
'params': linear_2.parameters(),
'weight_decay': 0.001,
'learning_rate': 0.1,
'beta1': 0.8
}],
weight_decay=0.01,
beta1=0.9)
loss.backward()
adam.step()
adam.clear_grad()
>>> # Adam with beta1/beta2 as Tensor and weight_decay as float
>>> import paddle
>>> paddle.seed(2023)
>>> linear = paddle.nn.Linear(10, 10)
>>> inp = paddle.rand([10,10], dtype="float32")
>>> out = linear(inp)
>>> loss = paddle.mean(out)
>>> beta1 = paddle.to_tensor([0.9], dtype="float32")
>>> beta2 = paddle.to_tensor([0.99], dtype="float32")
>>> adam = paddle.optimizer.Adam(learning_rate=0.1,
... parameters=linear.parameters(),
... beta1=beta1,
... beta2=beta2,
... weight_decay=0.01)
>>> loss.backward()
>>> adam.step()
>>> adam.clear_grad()
>>> # Note that the learning_rate of linear_2 is 0.01.
>>> linear_1 = paddle.nn.Linear(10, 10)
>>> linear_2 = paddle.nn.Linear(10, 10)
>>> inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
>>> out = linear_1(inp)
>>> out = linear_2(out)
>>> loss = paddle.mean(out)
>>> adam = paddle.optimizer.Adam(
... learning_rate=0.1,
... parameters=[{
... 'params': linear_1.parameters()
... }, {
... 'params': linear_2.parameters(),
... 'weight_decay': 0.001,
... 'learning_rate': 0.1,
... 'beta1': 0.8
... }],
... weight_decay=0.01,
... beta1=0.9)
>>> loss.backward()
>>> adam.step()
>>> adam.clear_grad()
"""
_moment1_acc_str = "moment1"
Expand Down Expand Up @@ -409,17 +406,17 @@ def step(self):
Examples:
.. code-block:: python
import paddle
a = paddle.rand([2,13], dtype="float32")
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Adam(learning_rate = 0.01,
parameters = linear.parameters())
out = linear(a)
out.backward()
adam.step()
adam.clear_grad()
>>> import paddle
>>> a = paddle.rand([2,13], dtype="float32")
>>> linear = paddle.nn.Linear(13, 5)
>>> # This can be any optimizer supported by dygraph.
>>> adam = paddle.optimizer.Adam(learning_rate = 0.01,
... parameters = linear.parameters())
>>> out = linear(a)
>>> out.backward()
>>> adam.step()
>>> adam.clear_grad()
"""
if paddle.fluid.dygraph.base.in_declarative_mode():
self._declarative_step()
Expand Down

0 comments on commit 7f43f82

Please sign in to comment.