Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename mkldnn onednn in paddle/cinn #63199

Merged
merged 1 commit into from
Apr 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions paddle/cinn/hlir/op/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ std::shared_ptr<OpStrategy> StrategyForConv2d(
int groups = 1;
std::string key = "";
std::string conv_type = "";
bool use_mkldnn = false;
bool use_onednn = false;
if (attrs.attr_store.find("padding") != attrs.attr_store.end()) {
padding = absl::get<std::vector<int>>(attrs.attr_store.at("padding"));
}
Expand All @@ -183,8 +183,8 @@ std::shared_ptr<OpStrategy> StrategyForConv2d(
if (attrs.attr_store.find("groups") != attrs.attr_store.end()) {
groups = absl::get<int>(attrs.attr_store.at("groups"));
}
if (attrs.attr_store.find("use_mkldnn") != attrs.attr_store.end()) {
use_mkldnn = absl::get<bool>(attrs.attr_store.at("use_mkldnn"));
if (attrs.attr_store.find("use_onednn") != attrs.attr_store.end()) {
use_onednn = absl::get<bool>(attrs.attr_store.at("use_onednn"));
}
if (attrs.attr_store.find("key") != attrs.attr_store.end()) {
key = absl::get<std::string>(attrs.attr_store.at("key"));
Expand Down Expand Up @@ -231,7 +231,7 @@ std::shared_ptr<OpStrategy> StrategyForConv2d(
// A is input: [N, C, H, W], B is filter: [C_out, C_in/group,
// filter_h, filter_w]
if (target.arch == Target::Arch::X86) {
if (groups == 1 && !use_mkldnn) {
if (groups == 1 && !use_onednn) {
out = pe::Conv2d_NCHW_5D(A.as_tensor_ref(),
B.as_tensor_ref(),
padding[0],
Expand All @@ -245,7 +245,7 @@ std::shared_ptr<OpStrategy> StrategyForConv2d(
target);
} else {
#ifdef CINN_WITH_DNNL
out = pe::Conv2d_NCHW_MKLDNN(A.as_tensor_ref(),
out = pe::Conv2d_NCHW_ONEDNN(A.as_tensor_ref(),
B.as_tensor_ref(),
padding[0],
padding[1],
Expand Down Expand Up @@ -1912,12 +1912,12 @@ std::shared_ptr<OpStrategy> StrategyForSoftmax(
const std::vector<std::vector<int>> &output_shapes,
const Target &target) {
int axis = -1;
bool use_mkldnn = false;
bool use_onednn = false;
if (attrs.attr_store.count("axis")) {
axis = absl::get<int>(attrs.attr_store.at("axis"));
}
if (attrs.attr_store.count("use_mkldnn")) {
use_mkldnn = absl::get<bool>(attrs.attr_store.at("use_mkldnn"));
if (attrs.attr_store.count("use_onednn")) {
use_onednn = absl::get<bool>(attrs.attr_store.at("use_onednn"));
}
framework::CINNCompute softmax_compute(
[=](lang::Args args, lang::RetValue *ret) {
Expand All @@ -1942,8 +1942,8 @@ std::shared_ptr<OpStrategy> StrategyForSoftmax(
pack_args[pack_args.size() - 1].operator std::string();

#ifdef CINN_WITH_DNNL
if (use_mkldnn) {
out = pe::SoftmaxMKLDNN(A, new_axis, tensor_name);
if (use_onednn) {
out = pe::SoftmaxONEDNN(A, new_axis, tensor_name);
} else {
out = pe::Softmax(A, new_axis, tensor_name);
}
Expand Down Expand Up @@ -2043,7 +2043,7 @@ std::vector<std::vector<std::string>> InferLayoutForSoftmax(
CHECK_EQ(input_layouts.size(), 1U)
<< "The input's layout size is not 1! Please check again.";
if (input_shapes[0].size() > 4) {
// input tensor needs to be transformed back to NCHW for mkldnn
// input tensor needs to be transformed back to NCHW for onednn
return {{"NCHW", "NCHW"}, {"NCHW"}};
}
return {{input_layouts[0], input_layouts[0]}, input_layouts};
Expand Down
12 changes: 6 additions & 6 deletions paddle/cinn/hlir/pe/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,7 @@ std::vector<ir::Tensor> Conv2d_NCHWc(const ir::Tensor &input,
}

#ifdef CINN_WITH_DNNL
std::vector<ir::Tensor> Conv2d_NCHW_MKLDNN(const ir::Tensor &input,
std::vector<ir::Tensor> Conv2d_NCHW_ONEDNN(const ir::Tensor &input,
const ir::Tensor &weights,
int pad_h,
int pad_w,
Expand All @@ -674,7 +674,7 @@ std::vector<ir::Tensor> Conv2d_NCHW_MKLDNN(const ir::Tensor &input,
auto call = Compute(
{Expr(1)},
[=]() -> Expr {
return lang::CallExtern("cinn_cpu_mkldnn_conv2d_nchw_fp32",
return lang::CallExtern("cinn_cpu_onednn_conv2d_nchw_fp32",
{
Expr(input->shape[0]), // batch_size
Expr(input->shape[1]), // c_in
Expand All @@ -694,7 +694,7 @@ std::vector<ir::Tensor> Conv2d_NCHW_MKLDNN(const ir::Tensor &input,
weights // weights
});
},
UniqName("conv2d_nchw_mkldnn_out"));
UniqName("conv2d_nchw_onednn_out"));
auto out = call->TupleGet(0);
out->WithBuffer(input->type());
return {out, call};
Expand Down Expand Up @@ -1020,11 +1020,11 @@ std::vector<ir::Tensor> Softmax(const ir::Tensor &A,
}

#ifdef CINN_WITH_DNNL
std::vector<ir::Tensor> SoftmaxMKLDNN(const ir::Tensor &A,
std::vector<ir::Tensor> SoftmaxONEDNN(const ir::Tensor &A,
int axis,
const std::string &output_name) {
CHECK_LE(A->shape.size(), 4U)
<< "Input's dimension of mkldnn softmax op is less than 4! Please check.";
<< "Input's dimension of onednn softmax op is less than 4! Please check.";
if (axis == -1) {
axis = A->shape.size() - 1;
}
Expand All @@ -1036,7 +1036,7 @@ std::vector<ir::Tensor> SoftmaxMKLDNN(const ir::Tensor &A,
auto call = Compute(
{Expr(1)},
[=]() -> Expr {
return lang::CallExtern("cinn_cpu_mkldnn_softmax_fp32",
return lang::CallExtern("cinn_cpu_onednn_softmax_fp32",
{
shape[0], // batch_size
shape[1], // c_in
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/hlir/pe/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ std::vector<ir::Tensor> Conv2d_NCHWc(
const cinn::common::Target &target = cinn::common::DefaultHostTarget());

#ifdef CINN_WITH_DNNL
std::vector<ir::Tensor> Conv2d_NCHW_MKLDNN(
std::vector<ir::Tensor> Conv2d_NCHW_ONEDNN(
const ir::Tensor &input,
const ir::Tensor &weights,
int pad_h,
Expand Down Expand Up @@ -333,7 +333,7 @@ std::vector<ir::Tensor> Softmax(
const std::string &output_name = UniqName("T_softmax_out"));

#ifdef CINN_WITH_DNNL
std::vector<ir::Tensor> SoftmaxMKLDNN(
std::vector<ir::Tensor> SoftmaxONEDNN(
const ir::Tensor &A,
int axis = -1,
const std::string &output_name = UniqName("T_softmax_out"));
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/runtime/cpu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ gather_srcs(cinnapi_src SRCS host_intrinsics.cc thread_backend.cc)
if(WITH_MKL_CBLAS)
gather_srcs(cinnapi_src SRCS mkl_math.cc cblas.cc)
if(WITH_MKLDNN)
gather_srcs(cinnapi_src SRCS mkldnn_math.cc)
gather_srcs(cinnapi_src SRCS onednn_math.cc)
endif()
endif()

Expand All @@ -16,7 +16,7 @@ if(WITH_MKL_CBLAS)
endif()

if(WITH_MKLDNN)
cinn_cc_test(test_mkldnn_math SRCS mkldnn_math_test.cc mkldnn_math.cc DEPS
cinn_cc_test(test_onednn_math SRCS onednn_math_test.cc onednn_math.cc DEPS
cinncore)
endif()
endif()
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/cinn/runtime/cpu/mkldnn_math.h"
#include "paddle/cinn/runtime/cpu/onednn_math.h"

#include <vector>

Expand All @@ -25,7 +25,7 @@ using dnnl::memory;
using tag = memory::format_tag;
using dt = memory::data_type;

void cinn_cpu_mkldnn_softmax_fp32(int batch,
void cinn_cpu_onednn_softmax_fp32(int batch,
int channel,
int h,
int w,
Expand Down Expand Up @@ -75,7 +75,7 @@ void cinn_cpu_mkldnn_softmax_fp32(int batch,
engine_stream.wait();
}

void cinn_cpu_mkldnn_conv2d_nchw_fp32(int batch_size,
void cinn_cpu_onednn_conv2d_nchw_fp32(int batch_size,
int c_in,
int input_h,
int input_w,
Expand Down Expand Up @@ -157,7 +157,7 @@ void cinn_cpu_mkldnn_conv2d_nchw_fp32(int batch_size,
cpu_stream.wait();
}

CINN_REGISTER_HELPER(cinn_cpu_mkldnn) {
CINN_REGISTER_HELPER(cinn_cpu_onednn) {
using namespace cinn; // NOLINT
using backends::FunctionProto;
auto host_target = cinn::common::DefaultHostTarget();
Expand Down Expand Up @@ -195,7 +195,7 @@ CINN_REGISTER_HELPER(cinn_cpu_mkldnn) {
return shape;
};

REGISTER_EXTERN_FUNC_HELPER(cinn_cpu_mkldnn_conv2d_nchw_fp32, host_target)
REGISTER_EXTERN_FUNC_HELPER(cinn_cpu_onednn_conv2d_nchw_fp32, host_target)
.SetRetType<void>()
.AddInputType<int>() // batch_size
.AddInputType<int>() // c_in
Expand All @@ -217,7 +217,7 @@ CINN_REGISTER_HELPER(cinn_cpu_mkldnn) {
.SetShapeInference(inference_shape_conv2d_nchw)
.End();

REGISTER_EXTERN_FUNC_HELPER(cinn_cpu_mkldnn_softmax_fp32, host_target)
REGISTER_EXTERN_FUNC_HELPER(cinn_cpu_onednn_softmax_fp32, host_target)
.SetRetType<void>()
.AddInputType<int>() // batch_size
.AddInputType<int>() // c_in
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,15 @@

// define some C APIs
extern "C" {
void cinn_cpu_mkldnn_softmax_fp32(int batch,
void cinn_cpu_onednn_softmax_fp32(int batch,
int channel,
int h,
int w,
int axis,
cinn_buffer_t* inputs,
cinn_buffer_t* out);

void cinn_cpu_mkldnn_conv2d_nchw_fp32(int batch_size,
void cinn_cpu_onednn_conv2d_nchw_fp32(int batch_size,
int c_in,
int input_h,
int input_w,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ cinn_buffer_t *CreateBuffer(const std::vector<int> shape,
return cinn::common::BufferBuilder(Float(32), shape).set_zero().Build();
}

TEST(cinn_cpu_mkldnn_conv2d_nchw_fp32, test) {
TEST(cinn_cpu_onednn_conv2d_nchw_fp32, test) {
int n(1);
int c_in(3);
int i_h(224);
Expand All @@ -65,7 +65,7 @@ TEST(cinn_cpu_mkldnn_conv2d_nchw_fp32, test) {
auto call = Compute(
{Expr(1)},
[=]() -> Expr {
return lang::CallExtern("cinn_cpu_mkldnn_conv2d_nchw_fp32",
return lang::CallExtern("cinn_cpu_onednn_conv2d_nchw_fp32",
{
Expr(n), // batch_size
Expr(c_in), // c_in
Expand All @@ -85,7 +85,7 @@ TEST(cinn_cpu_mkldnn_conv2d_nchw_fp32, test) {
weights.tensor() // weights
});
},
"cinn_cpu_mkldnn_conv2d_nchw_fp32");
"cinn_cpu_onednn_conv2d_nchw_fp32");

auto out = call->TupleGet(0);
out->WithBuffer(Float(32));
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/runtime/cpu/use_extern_funcs.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ CINN_USE_REGISTER(host_intrinsics)
CINN_USE_REGISTER(mkl_math)
CINN_USE_REGISTER(cinn_cpu_mkl)
#ifdef CINN_WITH_DNNL
CINN_USE_REGISTER(cinn_cpu_mkldnn)
CINN_USE_REGISTER(cinn_cpu_onednn)
#endif
#endif
CINN_USE_REGISTER(cinn_backend_parallel)