Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 5th No.25】add gammaln api -part #60553

Merged
merged 15 commits into from
Jan 5, 2024
10 changes: 10 additions & 0 deletions paddle/phi/api/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -921,6 +921,16 @@
kernel :
func : frame_grad

- backward_op : gammaln_grad
forward : gammaln(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : gammaln_grad

- backward_op : gather_grad
forward : gather(Tensor x, Tensor index, Scalar axis=0) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, Scalar axis=0)
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/api/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1040,6 +1040,16 @@
data_type : dtype
backend : place

- op : gammaln
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : gammaln
inplace: (x -> out)
backward : gammaln_grad

- op : gather
args : (Tensor x, Tensor index, Scalar axis=0)
output : Tensor(out)
Expand Down
22 changes: 22 additions & 0 deletions paddle/phi/kernels/cpu/gammaln_grad_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/gammaln_grad_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/gammaln_grad_kernel_impl.h"

PD_REGISTER_KERNEL(
gammaln_grad, CPU, ALL_LAYOUT, phi::GammalnGradKernel, float, double) {}
22 changes: 22 additions & 0 deletions paddle/phi/kernels/cpu/gammaln_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/gammaln_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/gammaln_kernel_impl.h"

PD_REGISTER_KERNEL(
gammaln, CPU, ALL_LAYOUT, phi::GammalnKernel, float, double) {}
27 changes: 27 additions & 0 deletions paddle/phi/kernels/gammaln_grad_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@

// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"

namespace phi {

template <typename T, typename Context>
void GammalnGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& d_out,
DenseTensor* d_x);
} // namespace phi
26 changes: 26 additions & 0 deletions paddle/phi/kernels/gammaln_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@

// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"

namespace phi {

template <typename T, typename Context>
void GammalnKernel(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out);
} // namespace phi
30 changes: 30 additions & 0 deletions paddle/phi/kernels/gpu/gammaln_grad_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/gammaln_grad_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/kernel_registry.h"

#include "paddle/phi/kernels/impl/gammaln_grad_kernel_impl.h"

PD_REGISTER_KERNEL(gammaln_grad,
GPU,
ALL_LAYOUT,
phi::GammalnGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
29 changes: 29 additions & 0 deletions paddle/phi/kernels/gpu/gammaln_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/gammaln_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

#include "paddle/phi/kernels/impl/gammaln_kernel_impl.h"

PD_REGISTER_KERNEL(gammaln,
GPU,
ALL_LAYOUT,
phi::GammalnKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
92 changes: 92 additions & 0 deletions paddle/phi/kernels/impl/gammaln_grad_kernel_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/kernels/funcs/for_range.h"

namespace phi {
template <typename T>
HOSTDEVICE T digamma(T x) {
static T c = T{8.5};
static T euler_mascheroni = T{0.57721566490153286060};
T r;
T value;
T x2;

if (x <= T{0.0}) {
value = T{0.0};
return value;
}

if (x <= T{0.000001}) {
value = -euler_mascheroni - T{1.0} / x + T{1.6449340668482264365} * x;
return value;
}

value = T{0.0};
x2 = x;
while (x2 < c) {
value = value - T{1.0} / x2;
x2 = x2 + T{1.0};
}

r = T{1.0} / x2;
value = value + std::log(x2) - T{0.5} * r;

r = r * r;

value = value -
r * (T{1.0} / T{12.0} -
r * (T{1.0} / T{120.0} -
r * (T{1.0} / T{252.0} -
r * (T{1.0} / T{240.0} - r * (T{1.0} / T{132.0})))));

return value;
}

template <typename T>
struct GammalnGradFunctor {
GammalnGradFunctor(const T* dout, const T* x, T* output, int64_t numel)
: dout_(dout), x_(x), output_(output), numel_(numel) {}

HOSTDEVICE void operator()(int64_t idx) const {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
const MT mp_dout = static_cast<MT>(dout_[idx]);
const MT mp_x = static_cast<MT>(x_[idx]);
output_[idx] = static_cast<T>(mp_dout * digamma<MT>(mp_x));
}

private:
const T* dout_;
const T* x_;
T* output_;
int64_t numel_;
};
template <typename T, typename Context>
void GammalnGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& d_out,
DenseTensor* d_x) {
auto numel = d_out.numel();
auto* dout_data = d_out.data<T>();
auto* x_data = x.data<T>();
auto* dx_data =
dev_ctx.template Alloc<T>(d_x, static_cast<size_t>(numel * sizeof(T)));
phi::funcs::ForRange<Context> for_range(dev_ctx, numel);
GammalnGradFunctor<T> functor(dout_data, x_data, dx_data, numel);
for_range(functor);
}
} // namespace phi
49 changes: 49 additions & 0 deletions paddle/phi/kernels/impl/gammaln_kernel_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/kernels/funcs/for_range.h"

namespace phi {
template <typename T>
struct GammalnFunctor {
GammalnFunctor(const T* x, T* output, int64_t numel)
: x_(x), output_(output), numel_(numel) {}

HOSTDEVICE void operator()(int64_t idx) const {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
const MT mp_x = static_cast<MT>(x_[idx]);
output_[idx] = static_cast<T>(std::lgamma(mp_x));
}

private:
const T* x_;
T* output_;
int64_t numel_;
};

template <typename T, typename Context>
void GammalnKernel(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out) {
auto numel = x.numel();
auto* x_data = x.data<T>();
auto* out_data = dev_ctx.template Alloc<T>(out);
phi::funcs::ForRange<Context> for_range(dev_ctx, numel);
GammalnFunctor<T> functor(x_data, out_data, numel);
for_range(functor);
}
} // namespace phi
4 changes: 4 additions & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,8 @@
frac,
frac_,
frexp,
gammaln,
gammaln_,
gcd,
gcd_,
heaviside,
Expand Down Expand Up @@ -770,6 +772,8 @@
'square_',
'divide',
'divide_',
'gammaln',
'gammaln_',
'ceil',
'atan',
'atan_',
Expand Down
4 changes: 4 additions & 0 deletions python/paddle/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,8 @@
frac,
frac_,
frexp,
gammaln,
gammaln_,
gcd,
gcd_,
heaviside,
Expand Down Expand Up @@ -665,6 +667,8 @@
'real',
'imag',
'is_floating_point',
'gammaln',
'gammaln_',
'digamma',
'digamma_',
'diagonal',
Expand Down
Loading