Skip to content

Commit

Permalink
Adapting device-specific Extra Attributes for the PHI kernel (PaddleP…
Browse files Browse the repository at this point in the history
…addle#46342)

* add extra attr property set

* add type_info for all context

* add onednn context to all context

* fix context compile error

* simplify conv kernel args

* pass runtime attr into dev_ctx

* fix marco error

* clear conv_grad_kernel extra args

* merge conv_grad_grad into conv_grad

* clear conv2d_grad_grad extra attrs

* clear yaml and eager extra attr

* fix conv1d error

* change to thread local

* fix npu compile failed

* try to fix windows compile failed

* add conv2d onednn phi kernel

* fix ci bugs (#36)

* fix compile bugs (#38)

* fix extra input transform bug (#39)

* support dynamic created attr (#40)

* reset extra info gen code

* rm conv_grad_grad kernel

* reimpl pass attr adapting

* add int attr support

* remove vector inputnames creating

* fix map at error

* Update paddle/phi/kernels/onednn/conv_grad_kernel.cc

Co-authored-by: Sławomir Siwek <slawomir.siwek@intel.com>

* remove useless extra attrs

* replace mkldnn_engine by onednn_engine

Co-authored-by: YuanRisheng <yuanrisheng@baidu.com>
Co-authored-by: Sławomir Siwek <slawomir.siwek@intel.com>
  • Loading branch information
3 people authored Nov 1, 2022
1 parent f82d7e3 commit c923e6c
Show file tree
Hide file tree
Showing 68 changed files with 3,673 additions and 1,955 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,7 @@ paddle::experimental::Tensor conv2d_ad_func(
const paddle::experimental::Tensor& filter,
std::vector<int> strides,
std::vector<int> paddings,
std::string paddding_algorithm,
int groups,
std::string padding_algorithm,
std::vector<int> dilations,
std::string data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search);
int groups,
std::string data_format);
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,10 @@ paddle::experimental::Tensor conv2d_ad_func(
const paddle::experimental::Tensor& filter,
std::vector<int> strides,
std::vector<int> paddings,
std::string paddding_algorithm,
int groups,
std::string padding_algorithm,
std::vector<int> dilations,
std::string data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search) {
int groups,
std::string data_format) {
// Dygraph Record Event
paddle::platform::RecordEvent dygraph_entrance_record_event(
"conv2d dygraph", paddle::platform::TracerEventType::Operator, 1);
Expand Down Expand Up @@ -64,13 +61,10 @@ paddle::experimental::Tensor conv2d_ad_func(
new_filter,
strides,
paddings,
paddding_algorithm,
groups,
padding_algorithm,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search);
groups,
data_format);
}
}

Expand All @@ -92,13 +86,10 @@ paddle::experimental::Tensor conv2d_ad_func(
filter,
strides,
paddings,
paddding_algorithm,
groups,
padding_algorithm,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search);
groups,
data_format);
transformer->SetOutTensorLayout(&out);
if (need_tune) {
egr::Controller::Instance().EnableLayoutAutoTune();
Expand All @@ -119,13 +110,10 @@ paddle::experimental::Tensor conv2d_ad_func(
filter,
strides,
paddings,
paddding_algorithm,
groups,
padding_algorithm,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search);
groups,
data_format);
// Check NaN and Inf if needed
if (FLAGS_check_nan_inf) {
egr::CheckTensorHasNanOrInf("conv2d", api_result);
Expand Down Expand Up @@ -157,13 +145,10 @@ paddle::experimental::Tensor conv2d_ad_func(
// SetAttributes if needed
grad_node->SetAttributestrides(strides);
grad_node->SetAttributepaddings(paddings);
grad_node->SetAttributepaddding_algorithm(paddding_algorithm);
grad_node->SetAttributepadding_algorithm(padding_algorithm);
grad_node->SetAttributegroups(groups);
grad_node->SetAttributedilations(dilations);
grad_node->SetAttributedata_format(data_format);
grad_node->SetAttributeuse_addto(use_addto);
grad_node->SetAttributeworkspace_size_MB(workspace_size_MB);
grad_node->SetAttributeexhaustive_search(exhaustive_search);
// Set TensorWrappers for Forward Inputs if needed
grad_node->SetTensorWrapperinput(input);
grad_node->SetTensorWrapperfilter(filter);
Expand Down
29 changes: 7 additions & 22 deletions paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,13 +46,10 @@ Conv2dGradNodeFinal::operator()(
auto& grad_out = hooked_grads[0][0];
auto& strides = this->strides_;
auto& paddings = this->paddings_;
auto& paddding_algorithm = this->paddding_algorithm_;
auto& padding_algorithm = this->padding_algorithm_;
auto& groups = this->groups_;
auto& dilations = this->dilations_;
auto& data_format = this->data_format_;
auto& use_addto = this->use_addto_;
auto& workspace_size_MB = this->workspace_size_MB_;
auto& exhaustive_search = this->exhaustive_search_;
// Prepare Grad function call

const auto& out_metas = OutputMeta();
Expand Down Expand Up @@ -87,13 +84,10 @@ Conv2dGradNodeFinal::operator()(
grad_out,
strides,
paddings,
paddding_algorithm,
groups,
padding_algorithm,
dilations,
groups,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search,
api_output_0,
api_output_1);
// Check NaN and Inf id needed
Expand Down Expand Up @@ -134,13 +128,10 @@ Conv2dGradNodeFinal::operator()(
// SetAttributes if needed
grad_node->SetAttributestrides(strides);
grad_node->SetAttributepaddings(paddings);
grad_node->SetAttributepaddding_algorithm(paddding_algorithm);
grad_node->SetAttributepadding_algorithm(padding_algorithm);
grad_node->SetAttributegroups(groups);
grad_node->SetAttributedilations(dilations);
grad_node->SetAttributedata_format(data_format);
grad_node->SetAttributeuse_addto(use_addto);
grad_node->SetAttributeworkspace_size_MB(workspace_size_MB);
grad_node->SetAttributeexhaustive_search(exhaustive_search);
// Set TensorWrappers for Forward Inputs if needed
grad_node->SetTensorWrapperinput(input);
grad_node->SetTensorWrapperfilter(filter);
Expand Down Expand Up @@ -215,13 +206,10 @@ Conv2dDoubleGradNodeFinal::operator()(

auto& strides = this->strides_;
auto& paddings = this->paddings_;
auto& paddding_algorithm = this->paddding_algorithm_;
auto& padding_algorithm = this->padding_algorithm_;
auto& groups = this->groups_;
auto& dilations = this->dilations_;
auto& data_format = this->data_format_;
auto& use_addto = this->use_addto_;
auto& workspace_size_MB = this->workspace_size_MB_;
auto& exhaustive_search = this->exhaustive_search_;
// Prepare Grad function call

const auto& out_metas = OutputMeta();
Expand Down Expand Up @@ -261,13 +249,10 @@ Conv2dDoubleGradNodeFinal::operator()(
grad_filter_grad_optional,
strides,
paddings,
paddding_algorithm,
groups,
padding_algorithm,
dilations,
groups,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search,
api_output_0,
api_output_1,
api_output_2);
Expand Down
32 changes: 6 additions & 26 deletions paddle/fluid/eager/api/manual/eager_manual/nodes/nodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ class Conv2dGradNodeFinal : public egr::GradNodeBase {
void SetAttributepaddings(const std::vector<int>& paddings) {
paddings_ = paddings;
}
void SetAttributepaddding_algorithm(const std::string& paddding_algorithm) {
paddding_algorithm_ = paddding_algorithm;
void SetAttributepadding_algorithm(const std::string& padding_algorithm) {
padding_algorithm_ = padding_algorithm;
}
void SetAttributegroups(const int& groups) { groups_ = groups; }
void SetAttributedilations(const std::vector<int>& dilations) {
Expand All @@ -73,13 +73,6 @@ class Conv2dGradNodeFinal : public egr::GradNodeBase {
void SetAttributedata_format(const std::string& data_format) {
data_format_ = data_format;
}
void SetAttributeuse_addto(const bool& use_addto) { use_addto_ = use_addto; }
void SetAttributeworkspace_size_MB(const int& workspace_size_MB) {
workspace_size_MB_ = workspace_size_MB;
}
void SetAttributeexhaustive_search(const bool& exhaustive_search) {
exhaustive_search_ = exhaustive_search;
}

private:
// TensorWrappers
Expand All @@ -89,13 +82,10 @@ class Conv2dGradNodeFinal : public egr::GradNodeBase {
// Attributes
std::vector<int> strides_;
std::vector<int> paddings_;
std::string paddding_algorithm_;
std::string padding_algorithm_;
int groups_;
std::vector<int> dilations_;
std::string data_format_;
bool use_addto_;
int workspace_size_MB_;
bool exhaustive_search_;
};

class Conv2dDoubleGradNodeFinal : public egr::GradNodeBase {
Expand Down Expand Up @@ -146,8 +136,8 @@ class Conv2dDoubleGradNodeFinal : public egr::GradNodeBase {
void SetAttributepaddings(const std::vector<int>& paddings) {
paddings_ = paddings;
}
void SetAttributepaddding_algorithm(const std::string& paddding_algorithm) {
paddding_algorithm_ = paddding_algorithm;
void SetAttributepadding_algorithm(const std::string& padding_algorithm) {
padding_algorithm_ = padding_algorithm;
}
void SetAttributegroups(const int& groups) { groups_ = groups; }
void SetAttributedilations(const std::vector<int>& dilations) {
Expand All @@ -156,13 +146,6 @@ class Conv2dDoubleGradNodeFinal : public egr::GradNodeBase {
void SetAttributedata_format(const std::string& data_format) {
data_format_ = data_format;
}
void SetAttributeuse_addto(const bool& use_addto) { use_addto_ = use_addto; }
void SetAttributeworkspace_size_MB(const int& workspace_size_MB) {
workspace_size_MB_ = workspace_size_MB;
}
void SetAttributeexhaustive_search(const bool& exhaustive_search) {
exhaustive_search_ = exhaustive_search;
}

private:
// TensorWrappers
Expand All @@ -173,13 +156,10 @@ class Conv2dDoubleGradNodeFinal : public egr::GradNodeBase {
// Attributes
std::vector<int> strides_;
std::vector<int> paddings_;
std::string paddding_algorithm_;
std::string padding_algorithm_;
int groups_;
std::vector<int> dilations_;
std::string data_format_;
bool use_addto_;
int workspace_size_MB_;
bool exhaustive_search_;
};

class AddNGradNodeFinal : public egr::GradNodeBase {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/archive.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@
#include <valarray>
#include <vector>

#include "paddle/fluid/framework/expect.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/core/expect.h"

namespace paddle {
namespace framework {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/channel.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#include <utility>
#include <vector>

#include "paddle/fluid/framework/expect.h"
#include "paddle/phi/core/expect.h"

namespace paddle {
namespace framework {
Expand Down
Loading

0 comments on commit c923e6c

Please sign in to comment.