From 32d6f607a790c29c20da00a42fdaaa698791320c Mon Sep 17 00:00:00 2001 From: AyaseNana <13659110308@163.com> Date: Sat, 21 Dec 2024 10:45:18 +0800 Subject: [PATCH] update docs --- python/paddle/nn/functional/loss.py | 10 ++- python/paddle/nn/layer/loss.py | 98 ++++++++++++++--------------- 2 files changed, 52 insertions(+), 56 deletions(-) diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index e99f584712fef0..575c461f54d914 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -3449,13 +3449,11 @@ def multi_label_soft_margin_loss( For each sample in the mini-batch: .. math:: - \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)} + \text{loss}(x, y) = - \frac{1}{C} * \sum_i y[i] * \log((1 + \exp(-x[i]))^{-1}) + + (1-y[i]) * \log\left(\frac{\exp(-x[i])}{(1 + \exp(-x[i]))}\right) - where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \ - :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \ - :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \ - and :math:`i \neq y[j]` for all :math:`i` and :math:`j`. - :math:`y` and :math:`x` must have the same size. + where :math:`i \in \left\{0, \; \cdots , \; \text{x.nElement}() - 1\right\}`, + :math:`y[i] \in \left\{0, \; 1\right\}`. Parameters: input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 9290f7f6c9ede0..abb27159232af9 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -1536,65 +1536,63 @@ def forward(self, input: Tensor, label: Tensor) -> Tensor: class MultiLabelSoftMarginLoss(Layer): r"""Creates a criterion that optimizes a multi-class multi-classification - hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) - and output :math:`y` (which is a 2D `Tensor` of target class indices). - For each sample in the mini-batch: + hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) + and output :math:`y` (which is a 2D `Tensor` of target class indices). + For each sample in the mini-batch: - .. math:: - \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)} - - where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \ - :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \ - :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \ - and :math:`i \neq y[j]` for all :math:`i` and :math:`j`. - :math:`y` and :math:`x` must have the same size. - - Parameters: - weight (Tensor,optional): a manual rescaling weight given to each class. - If given, has to be a Tensor of size C and the data type is float32, float64. - Default is ``'None'`` . - reduction (str, optional): Indicate how to average the loss by batch_size, - the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. - If :attr:`reduction` is ``'none'``, the unreduced loss is returned; - If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; - If :attr:`reduction` is ``'sum'``, the summed loss is returned. - Default: ``'mean'`` - name (str|None, optional): Name for the operation (optional, default is None). - For more information, please refer to :ref:`api_guide_Name`. + .. math:: + \text{loss}(x, y) = - \frac{1}{C} * \sum_i y[i] * \log((1 + \exp(-x[i]))^{-1}) + + (1-y[i]) * \log\left(\frac{\exp(-x[i])}{(1 + \exp(-x[i]))}\right) - Call parameters: - input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. - label (Tensor): Label tensor containing 1 or -1, the data type is float32 or float64. The shape of label is the same as the shape of input. + where :math:`i \in \left\{0, \; \cdots , \; \text{x.nElement}() - 1\right\}`, + :math:`y[i] \in \left\{0, \; 1\right\}`. + + Parameters: + weight (Tensor,optional): a manual rescaling weight given to each class. + If given, has to be a Tensor of size C and the data type is float32, float64. + Default is ``'None'`` . + reduction (str, optional): Indicate how to average the loss by batch_size, + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned; + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; + If :attr:`reduction` is ``'sum'``, the summed loss is returned. + Default: ``'mean'`` + name (str|None, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Call parameters: + input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. + label (Tensor): Label tensor containing 1 or -1, the data type is float32 or float64. The shape of label is the same as the shape of input. - Shape: - input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. - label: N-D Tensor, same shape as the input. - output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. + Shape: + input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. + label: N-D Tensor, same shape as the input. + output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. - Returns: - A callable object of MultiLabelSoftMarginLoss. + Returns: + A callable object of MultiLabelSoftMarginLoss. - Examples: - .. code-block:: python + Examples: + .. code-block:: python - >>> import paddle - >>> import paddle.nn as nn + >>> import paddle + >>> import paddle.nn as nn - >>> input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) - >>> label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) + >>> input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) + >>> label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) - >>> multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none') - >>> loss = multi_label_soft_margin_loss(input, label) - >>> print(loss) - Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, - [3.49625897, 0.71111226, 0.43989015]) + >>> multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none') + >>> loss = multi_label_soft_margin_loss(input, label) + >>> print(loss) + Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, + [3.49625897, 0.71111226, 0.43989015]) - >>> multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='mean') - >>> loss = multi_label_soft_margin_loss(input, label) - >>> print(loss) - Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, - 1.54908717) - """ + >>> multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='mean') + >>> loss = multi_label_soft_margin_loss(input, label) + >>> print(loss) + Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 1.54908717) + """ weight: Tensor | None reduction: _ReduceMode