Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add New OP and API: bitwise_and/or/xor/not #33524

Merged
merged 1 commit into from
Jun 16, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmake/operators.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ function(op_library TARGET)
endif()

# Define operators that don't need pybind here.
foreach(manual_pybind_op "compare_all_op" "compare_op" "logical_op" "nccl_op"
foreach(manual_pybind_op "compare_all_op" "compare_op" "logical_op" "bitwise_op" "nccl_op"
"tensor_array_read_write_op" "tensorrt_engine_op" "conv_fusion_op"
"fusion_transpose_flatten_concat_op" "fusion_conv_inception_op"
"sync_batch_norm_op" "dgc_op" "fused_fc_elementwise_layernorm_op"
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@ set(pybind_file ${PADDLE_BINARY_DIR}/paddle/fluid/pybind/pybind.h.tmp CACHE INTE
set(pybind_file_final ${PADDLE_BINARY_DIR}/paddle/fluid/pybind/pybind.h)
file(WRITE ${pybind_file} "// Generated by the paddle/fluid/operators/CMakeLists.txt. DO NOT EDIT!\n\n")

copy_if_different(${pybind_file} ${pybind_file_final})

add_subdirectory(math)
add_subdirectory(eigen)
add_subdirectory(controlflow)
Expand Down Expand Up @@ -203,3 +201,5 @@ endif()
if (WITH_GPU OR WITH_ASCEND_CL)
cc_test(copy_cross_scope_test SRCS copy_cross_scope_test.cc DEPS op_registry copy_cross_scope_op scope device_context enforce executor)
endif()

copy_if_different(${pybind_file} ${pybind_file_final})
4 changes: 3 additions & 1 deletion paddle/fluid/operators/controlflow/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,6 @@ else()
target_link_libraries(conditional_block_infer_op conditional_block_op)
endif()

file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(equal_all);\nUSE_OP(logical_and);\nUSE_NO_KERNEL_OP(read_from_array);\n")
file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(equal_all);\nUSE_NO_KERNEL_OP(read_from_array);\n")
file(APPEND ${pybind_file} "USE_OP(logical_and);\nUSE_OP(logical_or);\nUSE_OP(logical_xor);\nUSE_OP(logical_not);\n")
file(APPEND ${pybind_file} "USE_OP(bitwise_and);\nUSE_OP(bitwise_or);\nUSE_OP(bitwise_xor);\nUSE_OP(bitwise_not);\n")
174 changes: 174 additions & 0 deletions paddle/fluid/operators/controlflow/bitwise_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/controlflow/bitwise_op.h"
#include <algorithm>
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"

namespace paddle {
namespace operators {

template <typename OpComment>
class BinaryBitwiseOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
OpComment comment;
AddInput("X", string::Sprintf(
"Input Tensor of ``%s`` . It is "
"a N-D Tensor of bool, uint8, int8, int16, int32, int64.",
comment.type));
AddInput("Y", string::Sprintf(
"Input Tensor of ``%s`` . It is "
"a N-D Tensor of bool, uint8, int8, int16, int32, int64.",
comment.type));
AddOutput("Out",
string::Sprintf("Result of ``%s`` . It is a N-D Tensor with "
"the same data type of input Tensor.",
comment.type));
AddComment(string::Sprintf(R"DOC(
It operates ``%s`` on Tensor ``X`` and ``Y`` .

.. math::
%s

.. note::
``paddle.%s`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
)DOC",
comment.type, comment.equation, comment.type));
}
};

template <typename OpComment>
class UnaryBitwiseOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
OpComment comment;
AddInput("X", string::Sprintf(
"Input Tensor of ``%s`` . It is "
"a N-D Tensor of bool, uint8, int8, int16, int32, int64.",
comment.type));
AddOutput("Out",
string::Sprintf("Result of ``%s`` . It is a N-D Tensor with "
"the same data type of input Tensor.",
comment.type));
AddComment(string::Sprintf(R"DOC(
It operates ``%s`` on Tensor ``X`` .

.. math::
%s

)DOC",
comment.type, comment.equation));
}
};

class BitwiseOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
framework::OpKernelType kt = OperatorWithKernel::GetExpectedKernelType(ctx);
// BitwiseOp kernel's device type is decided by input tensor place
kt.place_ = ctx.Input<framework::LoDTensor>("X")->place();
return kt;
}
};

template <typename OpComment>
class UnaryBitwiseOp : public BitwiseOp {
public:
using BitwiseOp::BitwiseOp;

protected:
void InferShape(framework::InferShapeContext *context) const override {
OpComment comment;
OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type);
context->SetOutputDim("Out", context->GetInputDim("X"));
context->ShareLoD("X", "Out");
}
};

template <typename OpComment>
class BinaryBitwiseOp : public BitwiseOp {
public:
using BitwiseOp::BitwiseOp;

protected:
void InferShape(framework::InferShapeContext *context) const override {
OpComment comment;
OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type);
OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", comment.type);
auto dim_x = context->GetInputDim("X");
auto dim_y = context->GetInputDim("Y");
if (dim_x == dim_y) {
context->SetOutputDim("Out", dim_x);
} else {
int max_dim = std::max(dim_x.size(), dim_y.size());
int axis = std::abs(dim_x.size() - dim_y.size());
std::vector<int> x_dims_array(max_dim);
std::vector<int> y_dims_array(max_dim);
std::vector<int> out_dims_array(max_dim);
GetBroadcastDimsArrays(dim_x, dim_y, x_dims_array.data(),
y_dims_array.data(), out_dims_array.data(),
max_dim, axis);
context->SetOutputDim("Out", framework::make_ddim(out_dims_array));
}
context->ShareLoD("X", "Out");
}
};

} // namespace operators
} // namespace paddle

namespace ops = ::paddle::operators;

#define REGISTER_BINARY_BITWISE_OP(op_type, _equation) \
struct _##op_type##Comment { \
static char type[]; \
static char equation[]; \
}; \
char _##op_type##Comment::type[]{#op_type}; \
char _##op_type##Comment::equation[]{_equation}; \
REGISTER_OPERATOR( \
op_type, ops::BinaryBitwiseOp<_##op_type##Comment>, \
ops::BinaryBitwiseOpProtoMaker<_##op_type##Comment>, \
::paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>, \
::paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);

#define REGISTER_UNARY_BITWISE_OP(op_type, _equation) \
struct _##op_type##Comment { \
static char type[]; \
static char equation[]; \
}; \
char _##op_type##Comment::type[]{#op_type}; \
char _##op_type##Comment::equation[]{_equation}; \
REGISTER_OPERATOR( \
op_type, ops::UnaryBitwiseOp<_##op_type##Comment>, \
ops::UnaryBitwiseOpProtoMaker<_##op_type##Comment>, \
::paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>, \
::paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);

REGISTER_BINARY_BITWISE_OP(bitwise_and, "Out = X \\& Y");
REGISTER_BINARY_BITWISE_OP(bitwise_or, "Out = X | Y");
REGISTER_BINARY_BITWISE_OP(bitwise_xor, "Out = X ^\\wedge Y");
REGISTER_UNARY_BITWISE_OP(bitwise_not, "Out = \\sim X");

REGISTER_BINARY_BITWISE_KERNEL(bitwise_and, CPU, ops::BitwiseAndFunctor);
REGISTER_BINARY_BITWISE_KERNEL(bitwise_or, CPU, ops::BitwiseOrFunctor);
REGISTER_BINARY_BITWISE_KERNEL(bitwise_xor, CPU, ops::BitwiseXorFunctor);
REGISTER_UNARY_BITWISE_KERNEL(bitwise_not, CPU, ops::BitwiseNotFunctor);
87 changes: 87 additions & 0 deletions paddle/fluid/operators/controlflow/bitwise_op.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/controlflow/bitwise_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h"

namespace paddle {
namespace operators {

#define BITWISE_BINARY_FUNCTOR(func, expr, bool_expr) \
template <typename T> \
struct Bitwise##func##CUDAFunctor { \
using ELEM_TYPE = T; \
HOSTDEVICE T operator()(const T* args) const { \
return args[0] expr args[1]; \
} \
}; \
\
template <> \
struct Bitwise##func##CUDAFunctor<bool> { \
using ELEM_TYPE = bool; \
HOSTDEVICE bool operator()(const bool* args) const { \
return args[0] bool_expr args[1]; \
} \
};

BITWISE_BINARY_FUNCTOR(And, &, &&)
BITWISE_BINARY_FUNCTOR(Or, |, ||)
BITWISE_BINARY_FUNCTOR(Xor, ^, !=)
#undef BITWISE_BINARY_FUNCTOR

template <typename T>
struct BitwiseNotCUDAFunctor {
using ELEM_TYPE = T;
HOSTDEVICE T operator()(const T* args) const { return ~args[0]; }
};

template <>
struct BitwiseNotCUDAFunctor<bool> {
using ELEM_TYPE = bool;
HOSTDEVICE bool operator()(const bool* args) const { return !args[0]; }
};

template <typename Functor>
class BinaryBitwiseOpKernel<platform::CUDADeviceContext, Functor>
: public framework::OpKernel<typename Functor::ELEM_TYPE> {
public:
using T = typename Functor::ELEM_TYPE;
void Compute(const framework::ExecutionContext& ctx) const override {
auto functor = Functor();
std::vector<const framework::Tensor*> ins;
std::vector<framework::Tensor*> outs;
const auto& cuda_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
int axis = PackTensorsIntoVector<T>(ctx, &ins, &outs);

if (ins.size() == 1) {
LaunchElementwiseCudaKernel<ElementwiseType::kUnary, T, T>(
cuda_ctx, ins, &outs, axis, functor);
} else {
LaunchElementwiseCudaKernel<ElementwiseType::kBinary, T, T>(
cuda_ctx, ins, &outs, axis, functor);
}
}
};

} // namespace operators
} // namespace paddle

namespace ops = ::paddle::operators;
namespace plat = ::paddle::platform;

REGISTER_BINARY_BITWISE_KERNEL(bitwise_and, CUDA, ops::BitwiseAndCUDAFunctor);
REGISTER_BINARY_BITWISE_KERNEL(bitwise_or, CUDA, ops::BitwiseOrCUDAFunctor);
REGISTER_BINARY_BITWISE_KERNEL(bitwise_xor, CUDA, ops::BitwiseXorCUDAFunctor);
REGISTER_BINARY_BITWISE_KERNEL(bitwise_not, CUDA, ops::BitwiseNotCUDAFunctor);
Loading