Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PaddlePaddle Hackathon 2nd No.16] add API RReLU #42466

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
25c6bf1
Add files via upload
OccupyMars2025 May 4, 2022
2408fd4
Add files via upload
OccupyMars2025 May 4, 2022
49d3083
Add files via upload
OccupyMars2025 May 4, 2022
9518be8
Add files via upload
OccupyMars2025 May 4, 2022
eb4ee10
Add files via upload
OccupyMars2025 May 4, 2022
27270f2
Add files via upload
OccupyMars2025 May 4, 2022
4c14062
Add files via upload
OccupyMars2025 May 4, 2022
9629741
Add files via upload
OccupyMars2025 May 4, 2022
f946d7d
Add files via upload
OccupyMars2025 May 4, 2022
563f11e
Add files via upload
OccupyMars2025 May 4, 2022
659d3d4
Add files via upload
OccupyMars2025 May 4, 2022
74edcf1
Add files via upload
OccupyMars2025 May 4, 2022
6dc0dc0
Add files via upload
OccupyMars2025 May 4, 2022
2ff4a0b
Add files via upload
OccupyMars2025 May 4, 2022
3a5e1ac
Add files via upload
OccupyMars2025 May 4, 2022
a2879d5
Add files via upload
OccupyMars2025 May 5, 2022
bea0bc2
Add files via upload
OccupyMars2025 May 6, 2022
8d95518
Add files via upload
OccupyMars2025 May 6, 2022
ad15c35
Add files via upload
OccupyMars2025 May 6, 2022
0ae7718
Add files via upload
OccupyMars2025 May 6, 2022
0e5ce2a
Delete rrelu_impl.cu.h
OccupyMars2025 May 6, 2022
b560736
Add files via upload
OccupyMars2025 May 6, 2022
09f8cce
Add files via upload
OccupyMars2025 May 6, 2022
21f55c2
Add files via upload
OccupyMars2025 May 6, 2022
db873f3
Add files via upload
OccupyMars2025 May 6, 2022
6acdff1
Add files via upload
OccupyMars2025 May 6, 2022
345592a
Add files via upload
OccupyMars2025 May 6, 2022
2ab658a
Add files via upload
OccupyMars2025 May 6, 2022
0a58df9
Add files via upload
OccupyMars2025 May 6, 2022
87fed26
Add files via upload
OccupyMars2025 May 6, 2022
f7c9966
Add files via upload
OccupyMars2025 May 6, 2022
a652b1d
Add files via upload
OccupyMars2025 May 6, 2022
9452071
Add files via upload
OccupyMars2025 May 6, 2022
c396ed1
Add files via upload
OccupyMars2025 May 6, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
142 changes: 142 additions & 0 deletions paddle/fluid/operators/rrelu_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <memory>
#include <string>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/unary.h"

namespace paddle {
namespace operators {

using framework::Tensor;

class RReluOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace());
}
};

class RReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "The input of RReLU op.");
AddOutput("Out", "The output of RReLU op.");
AddOutput("Mask",
"The random sampled RReLU mask which is based on X."
"Mask has the same shape as X. Mask[i] is 1 if X[i]>=0."
"Mask[i] is a random sampled value taken from a uniform "
"distribution if X[i]<0 when training. Mask[i] is "
"(lower + upper)/2.0 if X[i]<0 when inference .")
.AsIntermediate()
.AsExtra();
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false);
// AddAttr<bool>("fix_seed",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这些都可以去掉了

// "(bool, default false) A flag indicating whether to use a fixed "
// "seed to generate random mask. NOTE: DO NOT set this flag to true in "
// "training. Setting this flag to true is only useful in "
// "unittest or for debug that always the same random sampled "
// "values will be generated.")
// .SetDefault(false)
// .AsExtra();

// AddAttr<int>("seed", "RReLU random seed.")
// .SetDefault(0)
// .AsExtra();

AddAttr<float>("lower", "Lower bound of the uniform distribution.")
.SetDefault(0.125f)
.AddCustomChecker([](const float& lower) {
PADDLE_ENFORCE_EQ(lower >= 0.0f && lower <= 1.0f, true,
platform::errors::InvalidArgument(
"'rrelu lower' must be in [0, 1]."));
});

AddAttr<float>("upper", "Upper bound of the uniform distribution.")
.SetDefault(0.3333f)
.AddCustomChecker([](const float& upper) {
PADDLE_ENFORCE_EQ(upper >= 0.0f && upper <= 1.0f, true,
platform::errors::InvalidArgument(
"'rrelu upper' must be in [0, 1]."));
});
AddComment(R"DOC(
RReLU Operator.

Applies the randomized leaky rectified liner unit function, element-wise,
as described in the paper:

`Empirical Evaluation of Rectified Activations in Convolutional Network`_.

The function is defined as:

.. math::
\text{RReLU}(x) =
\begin{cases}
x & \text{if } x \geq 0 \\
ax & \text{ otherwise }
\end{cases}

where :math:`a` is randomly sampled from uniform distribution
:math:`\mathcal{U}(\text{lower}, \text{upper})`.

See: https://arxiv.org/pdf/1505.00853.pdf

)DOC");
}
};

class RReluGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};

template <typename T>
class RReluGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("rrelu_grad");
op->SetInput("Mask", this->Output("Mask"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(rrelu, RReluInferShapeFunctor,
PD_INFER_META(phi::RReluInferMeta));

REGISTER_OPERATOR(rrelu, ops::RReluOp, ops::RReluOpMaker,
ops::RReluGradOpMaker<paddle::framework::OpDesc>,
ops::RReluGradOpMaker<paddle::imperative::OpBase>,
RReluInferShapeFunctor);

DECLARE_INFER_SHAPE_FUNCTOR(rrelu_grad, RReluGradInferShapeFunctor,
PD_INFER_META(phi::RReluGradInferMeta));
REGISTER_OPERATOR(rrelu_grad, ops::RReluGradOp, RReluGradInferShapeFunctor);
50 changes: 50 additions & 0 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1915,6 +1915,56 @@ void RollInferMeta(const MetaTensor& x,
out->set_dtype(x.dtype());
}

void RReluInferMeta(const MetaTensor& x,
float lower,
float upper,
bool is_test,
// bool fix_seed,
// int seed,
MetaTensor* out,
MetaTensor* mask) {
auto x_dims = x.dims();
PADDLE_ENFORCE_GE(lower,
0,
phi::errors::InvalidArgument(
"The lower value should be greater than or equal to 0. "
"But received lower value = %f.",
lower));
PADDLE_ENFORCE_LE(upper,
1,
phi::errors::InvalidArgument(
"The upper value should be less than or equal to 1. "
"But received upper value = %f.",
upper));
PADDLE_ENFORCE_GE(
upper,
lower,
phi::errors::InvalidArgument(
"The upper value should be greater than or equal to lower value "
"But received upper value = %f, lower value = %f.",
upper,
lower));

out->set_dims(x_dims);
out->set_dtype(x.dtype());
out->set_layout(x.layout());
out->share_lod(x);

if (mask != nullptr) {
mask->set_dims(x_dims);
mask->set_dtype(x.dtype());
mask->set_layout(x.layout());
}
}

void RReluGradInferMeta(const MetaTensor& out_grad,
const MetaTensor& mask,
MetaTensor* x_grad) {
x_grad->set_dims(out_grad.dims());
x_grad->set_dtype(out_grad.dtype());
x_grad->share_lod(out_grad);
}

void SetValueInferMeta(const MetaTensor& x, MetaTensor* out) {
auto in_dims = x.dims();
PADDLE_ENFORCE_LT(
Expand Down
11 changes: 11 additions & 0 deletions paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,17 @@ void RollInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
MetaTensor* out);

void RReluInferMeta(const MetaTensor& x,
float lower,
float upper,
bool is_test,
MetaTensor* out,
MetaTensor* mask);

void RReluGradInferMeta(const MetaTensor& out_grad,
const MetaTensor& mask,
MetaTensor* x_grad);

void SetValueInferMeta(const MetaTensor& x, MetaTensor* out);

void ShapeInferMeta(const MetaTensor& input, MetaTensor* out);
Expand Down
50 changes: 50 additions & 0 deletions paddle/phi/kernels/cpu/rrelu_grad_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/rrelu_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"

namespace phi {

template <typename T, typename Context>
void RReluGradKernel(const Context& ctx,
const DenseTensor& mask,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
x_grad->mutable_data<T>(ctx.GetPlace());

auto dX = EigenVector<T>::Flatten(*x_grad);
auto dY = EigenVector<T>::Flatten(out_grad);
auto M = EigenVector<T>::Flatten(mask);

auto& place = *ctx.eigen_device();

// Can the following be changed to :
// dX.device(place) = dY * M ;
// dX.device(place) = dY * M.cast<T>();
dX.device(place) = dY * M;
}

} // namespace phi

PD_REGISTER_KERNEL(
rrelu_grad,
CPU,
ALL_LAYOUT,
phi::RReluGradKernel,
float,
double,
phi::dtype::float16) {}
86 changes: 86 additions & 0 deletions paddle/phi/kernels/cpu/rrelu_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <random>

#include "paddle/phi/kernels/rrelu_kernel.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

namespace phi {

template <typename T, typename Context>
void RReluKernel(const Context& ctx,
const DenseTensor& x,
const float lower,
const float upper,
bool is_test,
// bool fix_seed,
// int seed,
DenseTensor* out,
DenseTensor* mask) {
// auto* y = out;
const T* x_data = x.data<T>();
// you may try the following 2 lines(what is the difference?)
T* out_data = ctx.template Alloc<T>(out);
T* mask_data = ctx.template Alloc<T>(mask);
// auto* y_data = y->mutable_data<T>(dev_ctx.GetPlace());
// auto* mask_data = mask->mutable_data<T>(dev_ctx.GetPlace());
uint64_t size = x.numel();
auto zero = static_cast<T>(0);
auto one = static_cast<T>(1);

if (!is_test) {
// int seed_data = fix_seed ? seed : 0;
// auto engine = paddle::framework::GetCPURandomEngine(seed_data);
// std::uniform_real_distribution<float> dist(lower, upper);

auto gen = ctx.GetGenerator();
auto engine = gen->GetCPUEngine();
std::uniform_real_distribution<float> dist(lower, upper);

for (uint64_t i = 0; i < size; ++i) {
if (x_data[i] >= zero) {
mask_data[i] = one;
out_data[i] = x_data[i];
} else {
auto ramdom_sampled_value = static_cast<T>(dist(*engine));
mask_data[i] = ramdom_sampled_value;
out_data[i] = x_data[i] * ramdom_sampled_value;
}
}
} else {
auto middle_value = static_cast<T>((lower + upper) / 2.0f);
for (uint64_t i = 0; i < size; ++i) {
if (x_data[i] >= zero) {
out_data[i] = x_data[i];
mask_data[i] = one;
} else {
out_data[i] = x_data[i] * middle_value;
mask_data[i] = middle_value;
}
}
}
}

} // namespace phi

PD_REGISTER_KERNEL(rrelu,
CPU,
ALL_LAYOUT,
phi::RReluKernel,
float,
double,
phi::dtype::float16) {}
Loading