diff --git a/paddle/fluid/framework/new_executor/instruction/onednn/onednn_instruction.cc b/paddle/fluid/framework/new_executor/instruction/onednn/onednn_instruction.cc index d51b857493090b..b1e7c10b70633f 100644 --- a/paddle/fluid/framework/new_executor/instruction/onednn/onednn_instruction.cc +++ b/paddle/fluid/framework/new_executor/instruction/onednn/onednn_instruction.cc @@ -375,9 +375,9 @@ OneDNNPhiKernelInstruction::OneDNNPhiKernelInstruction( } TensorNameMap(op, *value_exec_info_, yaml_info_parser, inputs_, outputs_); - // Step4: Mark is_run_mkldnn_kernel=true + // Step4: Mark is_run_onednn_kernel=true phi::MetaConfig new_config = infer_meta_context_.GetMetaConfig(); - new_config.is_run_mkldnn_kernel = true; + new_config.is_run_onednn_kernel = true; infer_meta_context_.SetMetaConfig(new_config); // Step5: Handle skip_transform_inputs diff --git a/paddle/fluid/framework/new_executor/instruction/onednn/onednn_legacy_instruction.cc b/paddle/fluid/framework/new_executor/instruction/onednn/onednn_legacy_instruction.cc index 00bdd2e1c1a19e..65c58cfa52e588 100644 --- a/paddle/fluid/framework/new_executor/instruction/onednn/onednn_legacy_instruction.cc +++ b/paddle/fluid/framework/new_executor/instruction/onednn/onednn_legacy_instruction.cc @@ -229,9 +229,9 @@ OneDNNLegacyKernelInstruction::OneDNNLegacyKernelInstruction( } } - // Step3: Mark is_run_mkldnn_kernel=true + // Step3: Mark is_run_onednn_kernel=true phi::MetaConfig new_config = infer_meta_context_.GetMetaConfig(); - new_config.is_run_mkldnn_kernel = true; + new_config.is_run_onednn_kernel = true; infer_meta_context_.SetMetaConfig(new_config); // Step4: Handle skip_transform_inputs diff --git a/paddle/phi/core/meta_tensor.h b/paddle/phi/core/meta_tensor.h index 570f519870a841..5a97c487720c6e 100644 --- a/paddle/phi/core/meta_tensor.h +++ b/paddle/phi/core/meta_tensor.h @@ -25,13 +25,13 @@ namespace phi { struct TEST_API MetaConfig { bool is_runtime{true}; - bool is_run_mkldnn_kernel{false}; + bool is_run_onednn_kernel{false}; MetaConfig() = default; // supporting implicit construction is easier to use - MetaConfig(bool is_runtime, bool is_run_mkldnn_kernel) + MetaConfig(bool is_runtime, bool is_run_onednn_kernel) : is_runtime(is_runtime), - is_run_mkldnn_kernel(is_run_mkldnn_kernel) {} // NOLINT + is_run_onednn_kernel(is_run_onednn_kernel) {} // NOLINT }; class TEST_API MetaTensor { diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index 462d4b6da9e69a..c6a1ab135f2a7d 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -587,7 +587,7 @@ void ConvInferMeta(const MetaTensor& input, "dilation is %d.", dilations[i])); } - const bool channel_last = (config.is_run_mkldnn_kernel == false) && + const bool channel_last = (config.is_run_onednn_kernel == false) && (data_format == "NHWC" || data_format == "NDHWC"); PADDLE_ENFORCE_EQ( @@ -773,7 +773,7 @@ void ConvTransposeInferMeta(const MetaTensor& x, std::vector paddings_ = paddings; std::vector dilations_ = dilations; - const DataLayout data_layout = config.is_run_mkldnn_kernel + const DataLayout data_layout = config.is_run_onednn_kernel ? DataLayout::kNCHW : common::StringToDataLayout(data_format); @@ -1700,7 +1700,7 @@ void ElementwiseRawInferMeta(const MetaTensor& x, #ifdef PADDLE_WITH_DNNL bool should_rotate = - config.is_run_mkldnn_kernel && + config.is_run_onednn_kernel && (phi::OneDNNContext::tls().get_cur_paddle_data_layout() == phi::DataLayout::kNHWC) && (x_dims.size() >= 3 || y_dims.size() >= 3); @@ -3374,7 +3374,7 @@ void PReluInferMeta(const MetaTensor& x, "For mode 'channel', data_format must be one of " "NCHW and NHWC. But received data_format: %s", data_format)); - if (data_format == "NCHW" || config.is_run_mkldnn_kernel) { + if (data_format == "NCHW" || config.is_run_onednn_kernel) { PADDLE_ENFORCE_EQ(product(alpha.dims()) == x_dim[1], true, common::errors::InvalidArgument( diff --git a/paddle/phi/infermeta/fusion.cc b/paddle/phi/infermeta/fusion.cc index bc6b1c5452874a..33c0ef03cf52d7 100644 --- a/paddle/phi/infermeta/fusion.cc +++ b/paddle/phi/infermeta/fusion.cc @@ -2914,7 +2914,7 @@ void BNActXPUInferMeta(const MetaTensor& x, x_dims, x_dims.size())); - const int64_t C = ((config.is_run_mkldnn_kernel == true) || + const int64_t C = ((config.is_run_onednn_kernel == true) || (data_layout_str == DataLayout::kNCHW) ? x_dims[1] : x_dims[x_dims.size() - 1]); diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index f4fb316c499ed3..eaddd75c097c7d 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -965,7 +965,7 @@ void BatchNormInferMeta(const MetaTensor& x, x_dims, x_dims.size())); - const int64_t C = ((config.is_run_mkldnn_kernel == true) || + const int64_t C = ((config.is_run_onednn_kernel == true) || (data_layout == DataLayout::kNCHW) ? x_dims[1] : x_dims[x_dims.size() - 1]); diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 4a058e0efdb6b5..89135880485c18 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -3625,7 +3625,7 @@ void Pool2DInferMeta(const MetaTensor& x, const std::string& padding_algorithm, MetaTensor* out, MetaConfig config) { - const bool channel_last = (config.is_run_mkldnn_kernel == false) && + const bool channel_last = (config.is_run_onednn_kernel == false) && (data_format == "NHWC" || data_format == "NDHWC"); if (!config.is_runtime && kernel_size.FromTensor()) { auto x_dims = x.dims(); @@ -3755,7 +3755,7 @@ void PoolInferMeta(const MetaTensor& x, // MKL-DNN Kernels are using NCHW order of dims description // so we ignore data_format consideration for MKL-DNN kernel - const bool channel_last = (config.is_run_mkldnn_kernel == false) && + const bool channel_last = (config.is_run_onednn_kernel == false) && (data_format == "NHWC" || data_format == "NDHWC"); // update paddings if "SAME" or global_pooling @@ -4430,7 +4430,7 @@ void Shape64InferMeta(const MetaTensor& input, MetaConfig config) { auto in_dim = input.dims(); out->set_dims(common::make_ddim({in_dim.size()})); - if (config.is_run_mkldnn_kernel) { + if (config.is_run_onednn_kernel) { out->set_dtype(DataType::INT32); } else { out->set_dtype(DataType::INT64);