diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index adce805195960..c7e69753b5335 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -2021,7 +2021,6 @@ def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): @framework.static_only def gradients(targets, inputs, target_gradients=None, no_grad_set=None): """ - :api_attr: Static Graph Backpropagate the gradients of targets to inputs. @@ -2042,8 +2041,9 @@ def gradients(targets, inputs, target_gradients=None, no_grad_set=None): will be None. Examples: + .. code-block:: python - + :name: code-example import paddle import paddle.nn.functional as F diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 86b0d6560c927..56b743f4463ae 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -75,7 +75,6 @@ def _switch_scope(scope): @signature_safe_contextmanager def scope_guard(scope): """ - :api_attr: Static Graph This function switches scope through python `with` statement. Scope records the mapping between variable names and variables ( :ref:`api_guide_Variable` ), @@ -94,6 +93,7 @@ def scope_guard(scope): None Examples: + .. code-block:: python import paddle diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 314a502a3cbef..817e742fd1d8a 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -729,7 +729,7 @@ def is_compiled_with_rocm(): def cuda_places(device_ids=None): """ - **Note**: + Note: For multi-card tasks, please use `FLAGS_selected_gpus` environment variable to set the visible GPU device. The next version will fix the problem with `CUDA_VISIBLE_DEVICES` environment variable. @@ -754,6 +754,7 @@ def cuda_places(device_ids=None): list of paddle.CUDAPlace: Created GPU place list. Examples: + .. code-block:: python import paddle @@ -874,6 +875,7 @@ def cpu_places(device_count=None): list of paddle.CPUPlace: Created list of CPU places. Examples: + .. code-block:: python import paddle @@ -993,7 +995,6 @@ def name(self): @signature_safe_contextmanager def name_scope(prefix=None): """ - :api_attr: Static Graph Generate hierarchical name prefix for the operators in Static Graph. @@ -1006,6 +1007,7 @@ def name_scope(prefix=None): prefix(str, optional): prefix. Default is none. Examples: + .. code-block:: python import paddle @@ -6916,8 +6918,9 @@ def switch_device(device): @signature_safe_contextmanager def device_guard(device=None): """ - **Notes**: - **The API only supports static mode.** + + Note: + The API only supports static mode. A context manager that specifies the device on which the OP will be placed. @@ -6931,8 +6934,10 @@ def device_guard(device=None): assigned devices. Examples: + .. code-block:: python - + + # required: gpu import paddle paddle.enable_static() diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index a48cfd9150c65..7c7f101286e24 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -2154,7 +2154,6 @@ def set_var(var, ndarray): def load_program_state(model_path, var_list=None): """ - :api_attr: Static Graph Load program state from local file @@ -2169,6 +2168,7 @@ def load_program_state(model_path, var_list=None): state_dict(dict): the dict store Parameter and optimizer information Examples: + .. code-block:: python import paddle diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 1fdf59948345b..8b10a5f454e69 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11850,8 +11850,7 @@ def _elementwise_op(helper): def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): """ - Scale operator. - + Putting scale and bias to the input Tensor as following: ``bias_after_scale`` is True: @@ -11876,6 +11875,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): Tensor: Output tensor of scale operator, with shape and data type same as input. Examples: + .. code-block:: python # scale as a float32 number diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index c3ee11ff5d906..a10ce1ce808f6 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -30,16 +30,17 @@ class ParamAttr(object): """ - Create a object to represent the attribute of parameter. The attributes are: - name, initializer, learning rate, regularizer, trainable, gradient clip, - and model average. - + Note: ``gradient_clip`` of ``ParamAttr`` HAS BEEN DEPRECATED since 2.0. Please use ``need_clip`` in ``ParamAttr`` to speficiy the clip scope. There are three clipping strategies: :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` , :ref:`api_paddle_nn_ClipGradByValue` . + Create a object to represent the attribute of parameter. The attributes are: + name, initializer, learning rate, regularizer, trainable, gradient clip, + and model average. + Parameters: name (str, optional): The parameter's name. Default None, meaning that the name would be created automatically. @@ -63,6 +64,7 @@ class ParamAttr(object): ParamAttr Object. Examples: + .. code-block:: python import paddle @@ -213,24 +215,22 @@ def _to_kwargs(self, with_initializer=False): class WeightNormParamAttr(ParamAttr): r""" - :api_attr: Static Graph Note: Please use 'paddle.nn.utils.weight_norm' in dygraph mode. - + + Note: + ``gradient_clip`` of ``ParamAttr`` HAS BEEN DEPRECATED since 2.0. + Please use ``need_clip`` in ``ParamAttr`` to speficiy the clip scope. + There are three clipping strategies: :ref:`api_paddle_nn_ClipGradByGlobalNorm` , + :ref:`api_paddle_nn_ClipGradByNorm` , :ref:`api_paddle_nn_ClipGradByValue` . + Parameter of weight Norm. Weight Norm is a reparameterization of the weight vectors in a neural network that decouples the magnitude of those weight vectors from their direction. Weight Norm has been implemented as discussed in this paper: `Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_. - - Note: - ``gradient_clip`` of ``ParamAttr`` HAS BEEN DEPRECATED since 2.0. - Please use ``need_clip`` in ``ParamAttr`` to speficiy the clip scope. - There are three clipping strategies: :ref:`api_paddle_nn_ClipGradByGlobalNorm` , - :ref:`api_paddle_nn_ClipGradByNorm` , :ref:`api_paddle_nn_ClipGradByValue` . - Args: dim(int, optional): Dimension over which to compute the norm. Dim is a non-negative @@ -258,6 +258,7 @@ class WeightNormParamAttr(ParamAttr): need_clip (bool, optional): Whether the parameter gradient need to be cliped in optimizer. Default is True. Examples: + .. code-block:: python import paddle