Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add GaussianNLLLoss API. #50843

Merged
merged 32 commits into from
Apr 13, 2023
Merged
Show file tree
Hide file tree
Changes from 28 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
bec2c4a
Add GaussianNLLLoss API.
Atlantisming Feb 23, 2023
13d3880
Change `rotl` `atol`.Check `var` in dynamic graph
Atlantisming Feb 26, 2023
5079f74
Merge branch 'PaddlePaddle:develop' into GsNLLLoss_branch
Atlantisming Feb 27, 2023
75d858c
remove assertTrue
Atlantisming Feb 27, 2023
0110b07
update unittest
Atlantisming Feb 28, 2023
380faeb
update unittest for ci-covarage.add broadcast with same dim.
Atlantisming Mar 2, 2023
8c66074
Supply static err print.
Atlantisming Mar 3, 2023
335c7a8
Repair note and example.
Atlantisming Mar 3, 2023
7ac1556
Split unitest.
Atlantisming Mar 6, 2023
dd50740
empty commit.
Atlantisming Mar 9, 2023
5da754f
for standard commit.
Atlantisming Mar 10, 2023
0784a34
for standard commit.
Atlantisming Mar 10, 2023
86ba005
Add int dynamic graph test.
Atlantisming Mar 11, 2023
90b5616
Repair parameters name.
Atlantisming Mar 14, 2023
f11f97a
Repair unitest parameters name.
Atlantisming Mar 16, 2023
5fa70b8
Repair unitest parameters name
Atlantisming Mar 16, 2023
3601625
Repair unitest parameters name
Atlantisming Mar 17, 2023
8fd7e30
Repair unitest parameters name
Atlantisming Mar 17, 2023
16f21aa
Merge remote-tracking branch 'origin/GsNLLLoss_branch' into GsNLLLoss…
Atlantisming Mar 20, 2023
2cb2432
add square in code-block
Atlantisming Mar 24, 2023
e2d74a5
fit few notes.
Atlantisming Mar 24, 2023
2854f3d
fit few notes.
Atlantisming Mar 30, 2023
a547070
fit few notes.
Atlantisming Mar 31, 2023
2dc4a7b
fit few notes.
Atlantisming Apr 4, 2023
d8b7316
add few interpretations.
Atlantisming Apr 7, 2023
1d99e85
add few interpretations.
Atlantisming Apr 7, 2023
9c0e135
add few interpretations.
Atlantisming Apr 10, 2023
bb2b36e
fix import.
Atlantisming Apr 11, 2023
c36fd88
fix space.
Atlantisming Apr 11, 2023
70960a1
empty commit for ci.
Atlantisming Apr 11, 2023
1b8a851
Merge remote-tracking branch 'origin/GsNLLLoss_branch' into GsNLLLoss…
Atlantisming Apr 12, 2023
4c299ac
Merge branch 'PaddlePaddle:develop' into GsNLLLoss_branch
Atlantisming Apr 12, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
216 changes: 216 additions & 0 deletions python/paddle/fluid/tests/unittests/test_gaussian_nll_loss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,216 @@
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import numpy as np

import paddle
import paddle.nn.functional as F
from paddle.fluid import core

np.random.seed(10)


def ref_gaussian_nll_loss(
input, label, variance, full=False, eps=1e-6, reduction='none'
):
if variance.shape != input.shape:
if input.shape[:-1] == variance.shape:
variance = np.expand_dims(variance, -1)
elif (
input.shape[:-1] == variance.shape[:-1] and variance.shape[-1] == 1
):
pass
else:
raise ValueError("variance is of incorrect size")
if reduction != 'none' and reduction != 'mean' and reduction != 'sum':
raise ValueError(reduction + " is not valid")

if np.any(variance < 0):
raise ValueError("var has negative entry/entries")

variance = variance.copy()
variance = np.clip(variance, a_min=eps, a_max=None)

loss = 0.5 * (np.log(variance) + (input - label) ** 2 / variance)
if full:
loss += 0.5 * np.log(2 * np.pi)

if reduction == 'none':
return loss
elif reduction == 'sum':
return [np.sum(loss)]
elif reduction == 'mean':
return [np.mean(loss)]


class TestGaussianNLLLossAPI(unittest.TestCase):
Copy link
Contributor

@GGBond8488 GGBond8488 Mar 6, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

其他的没问题了,这个单测不同的场景分写成不同的test_case吧(把这些用例写到单独的class里面),方便后续直接定位是哪个case不通过。

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done.

# test paddle.nn.functional.gaussian_nll_loss, paddle.nn.gaussian_nll_loss

def setUp(self, type=None):
self.shape = [10, 2]
if type in ['float16', 'float64', 'int32', 'int64']:
dtype = np.dtype(type)
self.input_np = np.random.random(self.shape).astype(dtype)
self.label_np = np.random.random(self.shape).astype(dtype)
self.variance_np = np.ones(self.shape).astype(dtype)
elif type == 'broadcast1':
self.shape = [10, 2, 3]
self.broadcast_shape = [10, 2]
self.input_np = np.random.random(self.shape).astype(np.float32)
self.label_np = np.random.random(self.shape).astype(np.float32)
self.variance_np = np.ones(self.broadcast_shape).astype(np.float32)
elif type == 'broadcast2':
self.shape = [10, 2, 3]
self.broadcast_shape = [10, 2, 1]
self.input_np = np.random.random(self.shape).astype(np.float32)
self.label_np = np.random.random(self.shape).astype(np.float32)
self.variance_np = np.ones(self.broadcast_shape).astype(np.float32)
else:
dtype = np.dtype('float32')
self.input_np = np.random.random(self.shape).astype(dtype)
self.label_np = np.random.random(self.shape).astype(dtype)
self.variance_np = np.ones(self.shape).astype(dtype)
if type == 'test_err':
self.variance_np = -np.ones(self.shape).astype(np.float32)

self.place = (
paddle.CUDAPlace(0)
if core.is_compiled_with_cuda()
else paddle.CPUPlace()
)

def test_dynamic_case(self, type=None, full=False, reduction='none'):
self.setUp(type)
paddle.disable_static(self.place)

input_x = paddle.to_tensor(self.input_np)
label = paddle.to_tensor(self.label_np)
variance = paddle.to_tensor(self.variance_np)
if type in ['test_err', 'int32', 'int64']:
self.assertRaises(
ValueError,
paddle.nn.functional.gaussian_nll_loss,
input=input_x,
label=label,
variance=variance,
)
else:
out_ref = ref_gaussian_nll_loss(
self.input_np,
self.label_np,
self.variance_np,
full=full,
reduction=reduction,
)
out1 = F.gaussian_nll_loss(
input_x, label, variance, full=full, reduction=reduction
)
gaussian_nll_loss = paddle.nn.GaussianNLLLoss(
full, reduction=reduction
)
out2 = gaussian_nll_loss(input_x, label, variance)

for r in [out1, out2]:
np.allclose(out_ref, r.numpy(), rtol=1e-5, atol=1e-5)
paddle.enable_static()

def test_static_case(self, type=None, full=False, reduction='none'):
self.setUp(type)
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
if type in ['int32', 'int64', 'float64']:
input_x = paddle.static.data('Input_x', self.shape, type)
label = paddle.static.data('Label', self.shape, type)
variance = paddle.static.data('Variance', self.shape, type)
elif type in ['broadcast1', 'broadcast2']:
input_x = paddle.static.data('Input_x', self.shape)
label = paddle.static.data('Label', self.shape)
variance = paddle.static.data('Variance', self.broadcast_shape)
else:
input_x = paddle.static.data('Input_x', self.shape, 'float32')
label = paddle.static.data('Label', self.shape, 'float32')
variance = paddle.static.data('Variance', self.shape, 'float32')
out1 = F.gaussian_nll_loss(
input_x, label, variance, full=full, reduction=reduction
)
gaussian_nll_loss = paddle.nn.GaussianNLLLoss(
full, reduction=reduction
)
out2 = gaussian_nll_loss(input_x, label, variance)
exe = paddle.static.Executor(self.place)
if type not in ['test_err', 'int32', 'int64']:
out_ref = ref_gaussian_nll_loss(
self.input_np,
self.label_np,
self.variance_np,
full=full,
reduction=reduction,
)
res = exe.run(
feed={
'Input_x': self.input_np,
'Label': self.label_np,
'Variance': self.variance_np,
},
fetch_list=[out1, out2],
)
for r in res:
np.allclose(out_ref, r, rtol=1e-5, atol=1e-5)
else:
try:
res = exe.run(
feed={
'Input_x': self.input_np,
'Label': self.label_np,
'Variance': self.variance_np,
},
fetch_list=[out1, out2],
)
except ValueError:
pass

def test_api(self):
self.test_dynamic_case()
self.test_static_case()

def test_float64(self):
self.test_dynamic_case('float64')
self.test_static_case('float64')

def test_broadcast(self):
self.test_dynamic_case('broadcast1')
self.test_static_case('broadcast1')

def test_broadcast_with_same_dim(self):
self.test_dynamic_case('broadcast2')
self.test_static_case('broadcast2')

def test_reduction(self):
self.test_dynamic_case(full=True, reduction='mean')
self.test_dynamic_case(full=True, reduction='sum')
self.test_static_case(full=True, reduction='mean')

def test_error(self):
self.test_dynamic_case('test_err')
self.test_static_case('test_err')

def test_int(self):
self.test_dynamic_case('int64')
self.test_dynamic_case('int32')


if __name__ == "__main__":
unittest.main()
3 changes: 3 additions & 0 deletions python/paddle/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,8 @@
from .layer.loss import TripletMarginWithDistanceLoss
from .layer.loss import TripletMarginLoss
from .layer.loss import SoftMarginLoss
from .layer.loss import GaussianNLLLoss

from .layer.norm import BatchNorm # noqa: F401
from .layer.norm import SyncBatchNorm # noqa: F401
from .layer.norm import GroupNorm # noqa: F401
Expand Down Expand Up @@ -332,4 +334,5 @@ def weight_norm(*args):
'TripletMarginWithDistanceLoss',
'TripletMarginLoss',
'SoftMarginLoss',
'GaussianNLLLoss',
]
3 changes: 3 additions & 0 deletions python/paddle/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@
from .loss import triplet_margin_with_distance_loss
from .loss import triplet_margin_loss
from .loss import soft_margin_loss
from .loss import gaussian_nll_loss

from .norm import batch_norm # noqa: F401
from .norm import instance_norm # noqa: F401
from .norm import layer_norm # noqa: F401
Expand Down Expand Up @@ -246,4 +248,5 @@
'triplet_margin_loss',
'multi_margin_loss',
'soft_margin_loss',
'gaussian_nll_loss',
]
Loading