Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -375,9 +375,9 @@ OneDNNPhiKernelInstruction::OneDNNPhiKernelInstruction(
}
TensorNameMap(op, *value_exec_info_, yaml_info_parser, inputs_, outputs_);

// Step4: Mark is_run_mkldnn_kernel=true
// Step4: Mark is_run_onednn_kernel=true
phi::MetaConfig new_config = infer_meta_context_.GetMetaConfig();
new_config.is_run_mkldnn_kernel = true;
new_config.is_run_onednn_kernel = true;
infer_meta_context_.SetMetaConfig(new_config);

// Step5: Handle skip_transform_inputs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -229,9 +229,9 @@ OneDNNLegacyKernelInstruction::OneDNNLegacyKernelInstruction(
}
}

// Step3: Mark is_run_mkldnn_kernel=true
// Step3: Mark is_run_onednn_kernel=true
phi::MetaConfig new_config = infer_meta_context_.GetMetaConfig();
new_config.is_run_mkldnn_kernel = true;
new_config.is_run_onednn_kernel = true;
infer_meta_context_.SetMetaConfig(new_config);

// Step4: Handle skip_transform_inputs
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/core/meta_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@ namespace phi {

struct TEST_API MetaConfig {
bool is_runtime{true};
bool is_run_mkldnn_kernel{false};
bool is_run_onednn_kernel{false};
MetaConfig() = default;

// supporting implicit construction is easier to use
MetaConfig(bool is_runtime, bool is_run_mkldnn_kernel)
MetaConfig(bool is_runtime, bool is_run_onednn_kernel)
: is_runtime(is_runtime),
is_run_mkldnn_kernel(is_run_mkldnn_kernel) {} // NOLINT
is_run_onednn_kernel(is_run_onednn_kernel) {} // NOLINT
};

class TEST_API MetaTensor {
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -587,7 +587,7 @@ void ConvInferMeta(const MetaTensor& input,
"dilation is %d.",
dilations[i]));
}
const bool channel_last = (config.is_run_mkldnn_kernel == false) &&
const bool channel_last = (config.is_run_onednn_kernel == false) &&
(data_format == "NHWC" || data_format == "NDHWC");

PADDLE_ENFORCE_EQ(
Expand Down Expand Up @@ -773,7 +773,7 @@ void ConvTransposeInferMeta(const MetaTensor& x,
std::vector<int> paddings_ = paddings;
std::vector<int> dilations_ = dilations;

const DataLayout data_layout = config.is_run_mkldnn_kernel
const DataLayout data_layout = config.is_run_onednn_kernel
? DataLayout::kNCHW
: common::StringToDataLayout(data_format);

Expand Down Expand Up @@ -1700,7 +1700,7 @@ void ElementwiseRawInferMeta(const MetaTensor& x,

#ifdef PADDLE_WITH_DNNL
bool should_rotate =
config.is_run_mkldnn_kernel &&
config.is_run_onednn_kernel &&
(phi::OneDNNContext::tls().get_cur_paddle_data_layout() ==
phi::DataLayout::kNHWC) &&
(x_dims.size() >= 3 || y_dims.size() >= 3);
Expand Down Expand Up @@ -3374,7 +3374,7 @@ void PReluInferMeta(const MetaTensor& x,
"For mode 'channel', data_format must be one of "
"NCHW and NHWC. But received data_format: %s",
data_format));
if (data_format == "NCHW" || config.is_run_mkldnn_kernel) {
if (data_format == "NCHW" || config.is_run_onednn_kernel) {
PADDLE_ENFORCE_EQ(product(alpha.dims()) == x_dim[1],
true,
common::errors::InvalidArgument(
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/fusion.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2914,7 +2914,7 @@ void BNActXPUInferMeta(const MetaTensor& x,
x_dims,
x_dims.size()));

const int64_t C = ((config.is_run_mkldnn_kernel == true) ||
const int64_t C = ((config.is_run_onednn_kernel == true) ||
(data_layout_str == DataLayout::kNCHW)
? x_dims[1]
: x_dims[x_dims.size() - 1]);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/multiary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -965,7 +965,7 @@ void BatchNormInferMeta(const MetaTensor& x,
x_dims,
x_dims.size()));

const int64_t C = ((config.is_run_mkldnn_kernel == true) ||
const int64_t C = ((config.is_run_onednn_kernel == true) ||
(data_layout == DataLayout::kNCHW)
? x_dims[1]
: x_dims[x_dims.size() - 1]);
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3625,7 +3625,7 @@ void Pool2DInferMeta(const MetaTensor& x,
const std::string& padding_algorithm,
MetaTensor* out,
MetaConfig config) {
const bool channel_last = (config.is_run_mkldnn_kernel == false) &&
const bool channel_last = (config.is_run_onednn_kernel == false) &&
(data_format == "NHWC" || data_format == "NDHWC");
if (!config.is_runtime && kernel_size.FromTensor()) {
auto x_dims = x.dims();
Expand Down Expand Up @@ -3755,7 +3755,7 @@ void PoolInferMeta(const MetaTensor& x,

// MKL-DNN Kernels are using NCHW order of dims description
// so we ignore data_format consideration for MKL-DNN kernel
const bool channel_last = (config.is_run_mkldnn_kernel == false) &&
const bool channel_last = (config.is_run_onednn_kernel == false) &&
(data_format == "NHWC" || data_format == "NDHWC");

// update paddings if "SAME" or global_pooling
Expand Down Expand Up @@ -4430,7 +4430,7 @@ void Shape64InferMeta(const MetaTensor& input,
MetaConfig config) {
auto in_dim = input.dims();
out->set_dims(common::make_ddim({in_dim.size()}));
if (config.is_run_mkldnn_kernel) {
if (config.is_run_onednn_kernel) {
out->set_dtype(DataType::INT32);
} else {
out->set_dtype(DataType::INT64);
Expand Down
Loading