Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 5th No.18】Add Binomial kernel for Hackthon No. 18 -part #59690

Merged
merged 13 commits into from
Dec 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions paddle/phi/api/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,14 @@
func: bincount
optional: weights

- op : binomial
args : (Tensor count, Tensor prob)
output : Tensor(out)
infer_meta :
func : BinomialInferMeta
kernel :
func : binomial

- op : bitwise_and
args : (Tensor x, Tensor y)
output : Tensor(out)
Expand Down
28 changes: 28 additions & 0 deletions paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,34 @@ void BincountInferMeta(const MetaTensor& x,
out->share_lod(x);
}

void BinomialInferMeta(const MetaTensor& count,
const MetaTensor& prob,
MetaTensor* out,
MetaConfig config) {
auto count_dims = count.dims();
auto prob_dims = prob.dims();

bool check = true;
if ((!config.is_runtime) &&
(phi::product(count_dims) <= 0 || phi::product(prob_dims) <= 0)) {
check = false;
}

if (check) {
PADDLE_ENFORCE_EQ(count_dims,
prob_dims,
phi::errors::InvalidArgument(
"Input(count) and Input(prob) shall have the same "
"shape. But received: the shape of Input(count) is "
"[%s], the shape of Input(prob) is [%s].",
count_dims,
prob_dims));
}

out->set_dims(count_dims);
out->set_dtype(DataType::INT64);
}

void BmmInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) {
std::vector<int64_t> x_dims = phi::vectorize(x.dims());
std::vector<int64_t> y_dims = phi::vectorize(y.dims());
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/infermeta/binary.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,11 @@ void BincountInferMeta(const MetaTensor& x,
const Scalar& minlength,
MetaTensor* out);

void BinomialInferMeta(const MetaTensor& count,
const MetaTensor& prob,
MetaTensor* out,
MetaConfig config = MetaConfig());

Copy link
Contributor

@cxxly cxxly Dec 8, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

同上,签名统一修改下

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已修改

void BmmInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out);

void CholeskySolveInferMeta(const MetaTensor& x,
Expand Down
37 changes: 37 additions & 0 deletions paddle/phi/kernels/binomial_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"

namespace phi {

/**
* @brief This kernel generate random value that follow binomial distribution.
* @param ctx device context
* @param count A Tensor with each element inidicating the number of
* bernoulli experiments
* @param prob A Tensor with each element inidicating probability of
* success for one bernoulli experiment
* @param out A Tensor filled with returned random value
*/
template <typename T, typename Context>
void BinomialiKernel(const Context& ctx,
const DenseTensor& count,
const DenseTensor& prob,
DenseTensor* out);

} // namespace phi
43 changes: 43 additions & 0 deletions paddle/phi/kernels/cpu/binomial_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/binomial_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/binomial_functor.h"

namespace phi {

template <typename T, typename Context>
void BinomialKernel(const Context& ctx,
const DenseTensor& count,
const DenseTensor& prob,
DenseTensor* out) {
auto numel = count.numel();
auto* count_data = count.data<T>();
auto* prob_data = prob.data<T>();
int64_t* out_data = ctx.template Alloc<int64_t>(out);

for (int64_t i = 0; i < numel; ++i) {
out_data[i] = funcs::BinomialFunctor<T>(ctx, count_data[i], prob_data[i]);
}
}

} // namespace phi

PD_REGISTER_KERNEL(
binomial, CPU, ALL_LAYOUT, phi::BinomialKernel, float, double) {
kernel->OutputAt(0).SetDataType(phi::DataType::INT64);
}
134 changes: 134 additions & 0 deletions paddle/phi/kernels/funcs/binomial_functor.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/phi/core/device_context.h"
#include "paddle/phi/core/enforce.h"

namespace phi {
namespace funcs {

template <typename T>
inline T stirling_approx_tail(int64_t k) {
const T kTailValues[] = {0.0810614667953272,
0.0413406959554092,
0.0276779256849983,
0.02079067210376509,
0.0166446911898211,
0.0138761288230707,
0.0118967099458917,
0.0104112652619720,
0.00925546218271273,
0.00833056343336287};
if (k <= 9) {
return static_cast<T>(kTailValues[static_cast<size_t>(k)]);
}
T kp1sq = (k + 1) * (k + 1);
return (1.0 / 12 - (1.0 / 360 - 1.0 / 1260 / kp1sq) / kp1sq) / (k + 1);
}

template <typename T, typename Context>
inline int64_t btrs(const Context& ctx, const T n, const T p) {
int64_t k;
T U, V, us;
std::uniform_real_distribution<T> dist(0.0, 1.0);
auto gen_ptr = ctx.GetGenerator();
auto engine = gen_ptr->GetCPUEngine();

const T stddev = std::sqrt(n * p * (1 - p));

const T b = 1.15 + 2.53 * stddev;
const T a = -0.0873 + 0.0248 * b + 0.01 * p;
const T c = n * p + 0.5;
const T v_r = 0.92 - 4.2 / b;
const T r = p / (1 - p);

const T alpha = (2.83 + 5.1 / b) * stddev;
const T m = std::floor((n + 1) * p);

while (1) {
U = dist(*engine) - 0.5;
V = dist(*engine);

us = 0.5 - std::abs(U);
k = static_cast<int64_t>(std::floor((2 * a / us + b) * U + c));

if (k < 0 || k > n) {
continue;
}
if (us >= 0.07 && V <= v_r) {
return k;
}

V = std::log(V * alpha / (a / (us * us) + b));
T upperbound =
((m + 0.5) * std::log((m + 1) / (r * (n - m + 1))) +
(n + 1) * std::log((n - m + 1) / (n - k + 1)) +
(k + 0.5) * std::log(r * (n - k + 1) / (k + 1)) +
stirling_approx_tail<T>(m) + stirling_approx_tail<T>(n - m) -
stirling_approx_tail<T>(k) - stirling_approx_tail<T>(n - k));

if (V <= upperbound) {
return k;
}
}
}

template <typename T, typename Context>
inline int64_t binomial_inversion(const Context& ctx, const T n, const T p) {
T unif;
T geom_sum = 0.0;
int64_t num_geom = 0;
T logprob = std::log1p(-p);
std::uniform_real_distribution<T> dist(0.0, 1.0);
auto gen_ptr = ctx.GetGenerator();
auto engine = gen_ptr->GetCPUEngine();

while (1) {
unif = dist(*engine);
T geom = std::ceil(std::log(unif) / logprob);
geom_sum += geom;
if (geom_sum > n) {
break;
}
num_geom = num_geom + 1;
}
return num_geom;
}

template <typename T, typename Context>
inline int64_t BinomialFunctor(const Context& ctx, const T n, const T p) {
if (n <= 0.0 || p <= 0.0) {
return 0;
} else if (p >= 1.0) {
return static_cast<int64_t>(n);
} else if (p <= 0.5) {
if (n * p >= 10.0) {
return btrs<T>(ctx, n, p);
} else {
return binomial_inversion<T>(ctx, n, p);
}
} else {
T qprob = 1.0 - p;
if (n * qprob >= 10.0) {
return static_cast<int64_t>(n) - btrs<T>(ctx, n, qprob);
} else {
return static_cast<int64_t>(n) - binomial_inversion<T>(ctx, n, qprob);
}
}
}

} // namespace funcs
} // namespace phi
Loading