From 838e6dc2692c0c2fbd414d4192df658390166f5e Mon Sep 17 00:00:00 2001 From: Android zhang <53324261+zade23@users.noreply.github.com> Date: Mon, 25 Sep 2023 10:44:55 +0800 Subject: [PATCH] [Docathon] Fix NO.8-NO.11 API label (#57614) --- python/paddle/base/layers/math_op_patch.py | 2 +- python/paddle/incubate/optimizer/lars_momentum.py | 4 ++-- python/paddle/incubate/optimizer/lbfgs.py | 4 ++-- python/paddle/nn/clip.py | 8 ++++---- python/paddle/optimizer/adadelta.py | 4 ++-- python/paddle/optimizer/adam.py | 4 ++-- python/paddle/optimizer/adamax.py | 4 ++-- python/paddle/optimizer/adamw.py | 4 ++-- python/paddle/optimizer/lbfgs.py | 4 ++-- python/paddle/optimizer/momentum.py | 4 ++-- python/paddle/optimizer/optimizer.py | 4 ++-- python/paddle/optimizer/rmsprop.py | 4 ++-- python/paddle/optimizer/sgd.py | 4 ++-- 13 files changed, 27 insertions(+), 27 deletions(-) diff --git a/python/paddle/base/layers/math_op_patch.py b/python/paddle/base/layers/math_op_patch.py index cba6b9a3b55de..ba327411264ea 100644 --- a/python/paddle/base/layers/math_op_patch.py +++ b/python/paddle/base/layers/math_op_patch.py @@ -241,7 +241,7 @@ def place(self): def astype(self, dtype): """ **Notes**: - **The variable must be a** :ref:`api_base_Tensor` + **The variable must be a** :ref:`api_paddle_Tensor` Cast a variable to a specified data type. diff --git a/python/paddle/incubate/optimizer/lars_momentum.py b/python/paddle/incubate/optimizer/lars_momentum.py index 1c6ecc263e6f6..088721b929ee7 100644 --- a/python/paddle/incubate/optimizer/lars_momentum.py +++ b/python/paddle/incubate/optimizer/lars_momentum.py @@ -50,8 +50,8 @@ class LarsMomentumOptimizer(Optimizer): Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): This parameter is used by developers to print debugging information. \ For details, please refer to :ref:`api_guide_Name`. Default is None. exclude_from_weight_decay (list[str], optional): Name string of layers which will be exclude from lars weight decay. Default is None. diff --git a/python/paddle/incubate/optimizer/lbfgs.py b/python/paddle/incubate/optimizer/lbfgs.py index 137b8eb7ccbdc..1e0d959f5ecc5 100644 --- a/python/paddle/incubate/optimizer/lbfgs.py +++ b/python/paddle/incubate/optimizer/lbfgs.py @@ -64,8 +64,8 @@ class LBFGS(Optimizer): Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \ some derived class of ``GradientClipBase`` . There are three cliping strategies \ - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , \ - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , \ + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. The default value is None. diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index e262401fd15e5..5fda0adff5efa 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -950,16 +950,16 @@ def set_gradient_clip(clip, param_list=None, program=None): and it may be removed in future releases, so it is not recommended. It is recommended to set ``grad_clip`` when initializing the ``optimizer`` , this is a better method to clip gradient. There are three clipping strategies: - :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , - :ref:`api_base_clip_GradientClipByValue` . + :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , + :ref:`api_paddle_nn_ClipGradByValue` . To specify parameters that require gradient clip. Args: grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , - :ref:`api_base_clip_GradientClipByValue` ). Default value: None, and there is no + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , + :ref:`api_paddle_nn_ClipGradByValue` ). Default value: None, and there is no gradient clipping. param_list (list(Variable), optional): Parameters that require gradient clip. It can be a list of parameter or a list of parameter's name. diff --git a/python/paddle/optimizer/adadelta.py b/python/paddle/optimizer/adadelta.py index ae8e5d2dc6b26..d2a572fefb91d 100644 --- a/python/paddle/optimizer/adadelta.py +++ b/python/paddle/optimizer/adadelta.py @@ -61,8 +61,8 @@ class Adadelta(Optimizer): Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . diff --git a/python/paddle/optimizer/adam.py b/python/paddle/optimizer/adam.py index 12e932c6fb218..a876b23cafac6 100644 --- a/python/paddle/optimizer/adam.py +++ b/python/paddle/optimizer/adam.py @@ -79,8 +79,8 @@ class Adam(Optimizer): Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators. The accumulators are updated at every step. Every element of the two moving-average is updated in both dense mode and sparse mode. If the size of parameter is very large, diff --git a/python/paddle/optimizer/adamax.py b/python/paddle/optimizer/adamax.py index 354c5a9bb531a..68b92819f3680 100644 --- a/python/paddle/optimizer/adamax.py +++ b/python/paddle/optimizer/adamax.py @@ -74,8 +74,8 @@ class Adamax(Optimizer): Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. The default value is None. diff --git a/python/paddle/optimizer/adamw.py b/python/paddle/optimizer/adamw.py index 6336f1914280f..ae746581cca79 100644 --- a/python/paddle/optimizer/adamw.py +++ b/python/paddle/optimizer/adamw.py @@ -77,8 +77,8 @@ class AdamW(Optimizer): Default: None. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators. The accumulators are updated at every step. Every element of the two moving-average is updated in both dense mode and sparse mode. If the size of parameter is very large, diff --git a/python/paddle/optimizer/lbfgs.py b/python/paddle/optimizer/lbfgs.py index c2f9cb6b52263..4f36dab76e160 100644 --- a/python/paddle/optimizer/lbfgs.py +++ b/python/paddle/optimizer/lbfgs.py @@ -346,8 +346,8 @@ class LBFGS(Optimizer): Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \ some derived class of ``GradientClipBase`` . There are three cliping strategies \ - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , \ - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , \ + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. The default value is None. diff --git a/python/paddle/optimizer/momentum.py b/python/paddle/optimizer/momentum.py index 5dd0a424778bb..2a8c095fd6adb 100644 --- a/python/paddle/optimizer/momentum.py +++ b/python/paddle/optimizer/momentum.py @@ -66,8 +66,8 @@ class Momentum(Optimizer): Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. multi_precision (bool, optional): Whether to use multi-precision during weight updating. Default is false. rescale_grad (float, optional): Multiply the gradient with `rescale_grad` before updating. \ Often choose to be ``1.0/batch_size``. diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index d529b605d8447..d10adbce4d70a 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -115,8 +115,8 @@ class Optimizer: Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \ some derived class of ``GradientClipBase`` . There are three cliping strategies \ - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , \ - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , \ + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. The default value is None. diff --git a/python/paddle/optimizer/rmsprop.py b/python/paddle/optimizer/rmsprop.py index 07bb27b46e6b3..2ce94f3471173 100644 --- a/python/paddle/optimizer/rmsprop.py +++ b/python/paddle/optimizer/rmsprop.py @@ -98,8 +98,8 @@ class RMSProp(Optimizer): Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): This parameter is used by developers to print debugging information. For details, please refer to :ref:`api_guide_Name`. Default is None. diff --git a/python/paddle/optimizer/sgd.py b/python/paddle/optimizer/sgd.py index e0edcbfc0e395..56c2c3ae19eb8 100644 --- a/python/paddle/optimizer/sgd.py +++ b/python/paddle/optimizer/sgd.py @@ -47,8 +47,8 @@ class SGD(Optimizer): Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , - :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , + :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .