-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
【Hackathon No.21】为 Paddle 新增 SoftMarginLoss #42364
Changes from 42 commits
2c0a131
5cb644c
eec4f11
60e3fe8
7099799
bef3f8f
04a8215
25e62db
bebb9c3
eeee007
2168c15
298c238
f970b37
d133b4b
60f353b
90dd6ed
371b955
4a85b06
ec502c6
d593f0c
8e49f6e
0101201
4c5bd16
7bc9952
84dae9a
cb64918
ae92ab6
e7d23ed
60d636a
aab1136
fa51768
f5549be
ff7d7a0
3403bf5
7bdf5d3
6c9f780
977f90e
91111ee
468847f
c558d14
cc269a9
bfde404
038f50e
0b0e00a
95f802e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,177 @@ | ||
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
import paddle | ||
import numpy as np | ||
import unittest | ||
|
||
|
||
def test_static_layer( | ||
place, | ||
input_np, | ||
label_np, | ||
reduction='mean', | ||
): | ||
paddle.enable_static() | ||
prog = paddle.static.Program() | ||
startup_prog = paddle.static.Program() | ||
with paddle.static.program_guard(prog, startup_prog): | ||
input = paddle.static.data(name='input', | ||
shape=input_np.shape, | ||
dtype=input_np.dtype) | ||
label = paddle.static.data(name='label', | ||
shape=label_np.shape, | ||
dtype=label_np.dtype) | ||
sm_loss = paddle.nn.loss.SoftMarginLoss(reduction=reduction) | ||
res = sm_loss(input, label) | ||
exe = paddle.static.Executor(place) | ||
static_result = exe.run(prog, | ||
feed={ | ||
"input": input_np, | ||
"label": label_np | ||
}, | ||
fetch_list=[res]) | ||
return static_result | ||
|
||
|
||
def test_static_functional( | ||
place, | ||
input_np, | ||
label_np, | ||
reduction='mean', | ||
): | ||
paddle.enable_static() | ||
prog = paddle.static.Program() | ||
startup_prog = paddle.static.Program() | ||
with paddle.static.program_guard(prog, startup_prog): | ||
input = paddle.static.data(name='input', | ||
shape=input_np.shape, | ||
dtype=input_np.dtype) | ||
label = paddle.static.data(name='label', | ||
shape=label_np.shape, | ||
dtype=label_np.dtype) | ||
|
||
res = paddle.nn.functional.soft_margin_loss(input, | ||
label, | ||
reduction=reduction) | ||
exe = paddle.static.Executor(place) | ||
static_result = exe.run(prog, | ||
feed={ | ||
"input": input_np, | ||
"label": label_np | ||
}, | ||
fetch_list=[res]) | ||
return static_result | ||
|
||
|
||
def test_dygraph_layer( | ||
place, | ||
input_np, | ||
label_np, | ||
reduction='mean', | ||
): | ||
paddle.disable_static() | ||
sm_loss = paddle.nn.loss.SoftMarginLoss(reduction=reduction) | ||
dy_res = sm_loss(paddle.to_tensor(input_np), paddle.to_tensor(label_np)) | ||
dy_result = dy_res.numpy() | ||
paddle.enable_static() | ||
return dy_result | ||
|
||
|
||
def test_dygraph_functional( | ||
place, | ||
input_np, | ||
label_np, | ||
reduction='mean', | ||
): | ||
paddle.disable_static() | ||
input = paddle.to_tensor(input_np) | ||
label = paddle.to_tensor(label_np) | ||
|
||
dy_res = paddle.nn.functional.soft_margin_loss(input, | ||
label, | ||
reduction=reduction) | ||
dy_result = dy_res.numpy() | ||
paddle.enable_static() | ||
return dy_result | ||
|
||
|
||
def calc_softmarginloss( | ||
input_np, | ||
label_np, | ||
reduction='mean', | ||
): | ||
expected = np.log(1 + np.exp(-label_np * input_np)) | ||
# expected = np.mean(expected, axis=-1) | ||
|
||
if reduction == 'mean': | ||
expected = np.mean(expected) | ||
elif reduction == 'sum': | ||
expected = np.sum(expected) | ||
else: | ||
expected = expected | ||
|
||
return expected | ||
|
||
|
||
class TestSoftMarginLoss(unittest.TestCase): | ||
|
||
def test_SoftMarginLoss(self): | ||
input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) | ||
types = [np.int32, np.int64, np.float32, np.float64] | ||
places = ['cpu'] | ||
if paddle.device.is_compiled_with_cuda(): | ||
places.append('gpu') | ||
reductions = ['sum', 'mean', 'none'] | ||
for place in places: | ||
for reduction in reductions: | ||
for _type in types: | ||
label_np = np.random.randint(0, 2, | ||
size=(5, 5)).astype(_type) | ||
label_np[label_np == 0] = -1 | ||
static_result = test_static_layer(place, input_np, label_np, | ||
reduction) | ||
dy_result = test_dygraph_layer(place, input_np, label_np, | ||
reduction) | ||
expected = calc_softmarginloss(input_np, label_np, | ||
reduction) | ||
self.assertTrue(np.allclose(static_result, expected)) | ||
self.assertTrue(np.allclose(static_result, dy_result)) | ||
self.assertTrue(np.allclose(dy_result, expected)) | ||
static_functional = test_static_functional( | ||
place, input_np, label_np, reduction) | ||
dy_functional = test_dygraph_functional( | ||
place, input_np, label_np, reduction) | ||
self.assertTrue(np.allclose(static_functional, expected)) | ||
self.assertTrue( | ||
np.allclose(static_functional, dy_functional)) | ||
self.assertTrue(np.allclose(dy_functional, expected)) | ||
|
||
def test_SoftMarginLoss_error(self): | ||
paddle.disable_static() | ||
self.assertRaises(ValueError, | ||
paddle.nn.loss.SoftMarginLoss, | ||
reduction="unsupport reduction") | ||
input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') | ||
label = paddle.to_tensor([[-1.0, 1.0]], dtype='float32') | ||
self.assertRaises(ValueError, | ||
paddle.nn.functional.soft_margin_loss, | ||
input=input, | ||
label=label, | ||
reduction="unsupport reduction") | ||
paddle.enable_static() | ||
|
||
|
||
if __name__ == "__main__": | ||
unittest.main() |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3200,3 +3200,82 @@ def triplet_margin_loss(input, | |
return paddle.sum(loss, name=name) | ||
elif reduction == 'none': | ||
return loss | ||
|
||
|
||
def soft_margin_loss(input, label, reduction='mean', name=None): | ||
""" | ||
The API measures the soft margin loss between input predictions ``input`` | ||
and target labels ``label`` . It can be described as: | ||
|
||
.. math:: | ||
Out = log(1 + exp((-label * input))) | ||
|
||
Parameters: | ||
|
||
input (Tensor): The input predications tensor with shape: [N, *], | ||
N is batch_size, `*` means any number of additional dimensions. The ``input`` ranges from -inf to inf. | ||
Available dtype is float32, float64. | ||
|
||
label (Tensor): The target labels tensor with the same shape as | ||
``input``. The target labels which values should be numbers -1 or 1. | ||
Available dtype is int32, int64, float32, float64. | ||
|
||
reduction (str, optional): Indicate how to average the loss by batch_size, | ||
the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. | ||
If :attr:`reduction` is ``'none'``, the unreduced loss is returned; | ||
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; | ||
If :attr:`reduction` is ``'sum'``, the summed loss is returned. | ||
Default is ``'mean'``. | ||
|
||
name (str, optional): Name for the operation (optional, default is None). | ||
For more information, please refer to :ref:`api_guide_Name`. | ||
|
||
Returns: | ||
|
||
Output (Tensor): If ``reduction`` is ``'none'``, the shape of output is | ||
same as ``input`` , else the shape of output is [1]. | ||
|
||
Examples: | ||
.. code-block:: python | ||
|
||
import paddle | ||
import numpy as np | ||
|
||
input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32') | ||
label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32') | ||
output = paddle.nn.functional.soft_margin_loss(input, label) | ||
|
||
input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里直接用 paddle 创建 Tensor 吧,不用引入 numpy;下同 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里主要是由于 label 的值只能为 -1 和 1,用numpy能比较快速的创建。为了统一风格, input 也使用 numpy 再转为 tensor。我不太清楚如何用 paddle 创建只含有 -1 和 1 的tensor。 |
||
label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64) | ||
label_np[label_np==0]=-1 | ||
input = paddle.to_tensor(input_np) | ||
label = paddle.to_tensor(label_np) | ||
output = paddle.nn.functional.soft_margin_loss(input, label, reduction='none') | ||
""" | ||
if reduction not in ['sum', 'mean', 'none']: | ||
raise ValueError( | ||
"The value of 'reduction' in soft_margin_loss should be 'sum', " | ||
"'mean' or 'none', but received %s, which is not allowed." % | ||
reduction) | ||
|
||
if not _non_static_mode(): | ||
fluid.data_feeder.check_variable_and_dtype(input, 'input', | ||
['float32', 'float64'], | ||
'soft_margin_loss') | ||
fluid.data_feeder.check_variable_and_dtype( | ||
label, 'label', ['int32', 'int64', 'float32', 'float64'], | ||
'soft_margin_loss') | ||
|
||
if not (input.shape == label.shape): | ||
raise ValueError("input's shape must equal to " | ||
"label's shape") | ||
|
||
label = fluid.layers.cast(label, input.dtype) | ||
out = paddle.log(1 + paddle.exp(-label * input)) | ||
|
||
if reduction == 'sum': | ||
return paddle.sum(out, name=name) | ||
elif reduction == 'mean': | ||
return paddle.mean(out, name=name) | ||
else: | ||
return out |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
the parameter name must same with rfc, here I suggest modifying parameter name
target
in rfc tolabel
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
done