Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clear extra attributes of some Op in OpMaker #45613

Merged
merged 9 commits into from
Sep 2, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/framework/new_executor/new_executor_defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ bool InterpretercoreInferShapeContext::HasOutputs(const std::string& name,
}

AttrReader InterpretercoreInferShapeContext::Attrs() const {
return AttrReader(op_.Attrs());
return AttrReader(op_.Attrs(), op_.RuntimeAttrs());
}

std::vector<std::string> InterpretercoreInferShapeContext::Inputs(
Expand Down
10 changes: 9 additions & 1 deletion paddle/fluid/framework/op_registry.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,15 @@ std::unique_ptr<OperatorBase> OpRegistry::CreateOp(
op_base = std::unique_ptr<OperatorBase>(
info.Creator()(type, inputs, outputs, attrs));
}
op_base->SetRuntimeAttributeMap(runtime_attrs);
const auto& extra_attr_checkers =
operators::ExtraInfoUtils::Instance().GetExtraAttrsChecker(type);
if (!extra_attr_checkers.empty()) {
auto op_runtime_attr_map = runtime_attrs;
for (const auto& checker : extra_attr_checkers) {
checker(&op_runtime_attr_map, false);
}
op_base->SetRuntimeAttributeMap(op_runtime_attr_map);
}
return op_base;
}

Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -767,7 +767,9 @@ class RuntimeInferShapeContext : public InferShapeContext {
}
}

AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }
AttrReader Attrs() const override {
return AttrReader(op_.Attrs(), op_.RuntimeAttrs());
}

std::vector<std::string> Inputs(const std::string& name) const override {
return op_.Inputs(name);
Expand Down
9 changes: 0 additions & 9 deletions paddle/fluid/operators/abs_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,6 @@ class AbsOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X", "(Tensor), The input tensor of abs op.");
AddOutput("Out", "(Tensor), The output tensor of abs op.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_cudnn",
"(bool, default false) Only used in cudnn kernel, need "
"install cudnn")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Abs Operator.

Expand Down
4 changes: 0 additions & 4 deletions paddle/fluid/operators/addmm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,6 @@ class AddMMOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "(Tensor), The first input tensor for mul.");
AddInput("Y", "(Tensor), The second input tensor for mul.");
AddOutput("Out", "(Tensor), The output tensor of addmm op.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<float>("Alpha", "coefficient of x*y.").SetDefault(1.0f);
AddAttr<float>("Beta", "coefficient of input.").SetDefault(1.0f);
AddComment(R"DOC(
Expand Down
5 changes: 0 additions & 5 deletions paddle/fluid/operators/affine_grid_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -158,11 +158,6 @@ class AffineGridOpMaker : public framework::OpProtoAndCheckerMaker {
"(Tensor) The shape of target image with format [N, C, H, W].")
.AsDispensable();
AddOutput("Output", "(Tensor) Output Tensor with shape [N, H, W, 2].");
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(true)
.AsExtra();
AddAttr<bool>("align_corners",
"(bool, default false) Whether to align the corners of input"
"and output.")
Expand Down
9 changes: 0 additions & 9 deletions paddle/fluid/operators/angle_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,6 @@ class AngleOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X", "(Tensor), The input tensor of angle op.");
AddOutput("Out", "(Tensor), The output tensor of angle op.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_cudnn",
"(bool, default false) Only used in cudnn kernel, need "
"install cudnn")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Angle Operator.

Expand Down
8 changes: 0 additions & 8 deletions paddle/fluid/operators/batch_norm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -293,14 +293,6 @@ void BatchNormOpMaker::Make() {
"NHWC kernel")
.AsDispensable()
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_with_relu",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_global_stats",
"(bool, default false) Whether to use global mean and "
"variance. In inference or test mode, set use_global_stats "
Expand Down
10 changes: 0 additions & 10 deletions paddle/fluid/operators/clip_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,16 +64,6 @@ class ClipOpMaker : public framework::OpProtoAndCheckerMaker {
"input(x)");
AddAttr<AttrType>("min", "float number, the minimum value to clip by.");
AddAttr<AttrType>("max", "float number, the maximum value to clip by.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "bfloat16"})
.AsExtra();
AddComment(R"DOC(
Clip Operator.

Expand Down
17 changes: 0 additions & 17 deletions paddle/fluid/operators/concat_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -81,11 +81,6 @@ class ConcatOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X", "Input tensors of concat operator.").AsDuplicable();
AddOutput("Out", "Output tensor of concat operator.");
AddAttr<bool>(
"use_mkldnn",
"(bool, default false) Indicates if MKL-DNN kernel will be used")
.SetDefault(false)
.AsExtra();
AddAttr<int>("axis",
"The axis along which the input tensors will be concatenated."
"The axis could also be negative numbers. Negative axis is "
Expand All @@ -99,18 +94,6 @@ class ConcatOpMaker : public framework::OpProtoAndCheckerMaker {
"It has higher priority than Attr(axis). "
"The shape of AxisTensor must be [1].")
.AsDispensable();
AddAttr<bool>(
"use_quantizer",
"(bool, default false) "
"This parameter is no longer used. Use 'mkldnn_data_type' instead.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddComment(R"DOC(
Concat Operator.

Expand Down
99 changes: 12 additions & 87 deletions paddle/fluid/operators/conv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -285,11 +285,6 @@ framework::OpKernelType ConvOp::GetKernelTypeForVar(
}

void Conv2DOpMaker::Make() {
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddInput("Input",
"(Tensor) The input tensor of convolution operator. "
"The format of input tensor is NCHW or NHWC, where N is batch size, "
Expand Down Expand Up @@ -356,22 +351,6 @@ void Conv2DOpMaker::Make() {
"the input will be transformed automatically. ")
.SetDefault("NCHW");
// TODO(dzhwinter): need to registered layout transform function
AddAttr<int>("workspace_size_MB",
"Only used in cudnn kernel. Need set use_cudnn to true."
"workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddAttr<bool>("exhaustive_search",
"(bool, default false) cuDNN has many algorithm to calculation "
"convolution, whether enable exhaustive search "
"for cuDNN convolution or not, default is False.")
.SetDefault(false)
.AsExtra();

AddComment(R"DOC(
Convolution Operator.

Expand Down Expand Up @@ -403,12 +382,18 @@ The input(X) size and output(Out) size may be different.
Apply();
}

class DepthwiseConv2DOpMaker : public Conv2DOpMaker {
protected:
void Apply() override {
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false)
.AsExtra();
}
};

void Conv3DOpMaker::Make() {
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddInput(
"Input",
"(Tensor) The input tensor of convolution operator. "
Expand Down Expand Up @@ -465,73 +450,13 @@ void Conv3DOpMaker::Make() {
"dilations(d_dilation, h_dilation, w_dilation) of "
"convolution operator.")
.SetDefault({1, 1, 1});
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "int8", "bfloat16"})
.AsExtra();
AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>("fuse_activation",
"(string, default \"\") Only used in mkldnn kernel")
.SetDefault("")
.AsExtra();
AddAttr<float>("fuse_alpha",
"(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f)
.AsExtra();
AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f)
.AsExtra();
AddAttr<bool>(
"use_addto",
"(bool, default false) If use addto strategy or not, only used in "
"cudnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("fuse_residual_connection",
"(bool, default false) Only used in mkldnn kernel. Used "
"whenever convolution output is as an input to residual "
"connection.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCDHW) Only used in "
"An optional string from: \"NDHWC\", \"NCDHW\". "
"Defaults to \"NDHWC\". Specify the data format of the output data, "
"the input will be transformed automatically. ")
.SetDefault("NCDHW");
AddAttr<bool>("force_fp32_output",
"(bool, default false) Only used in mkldnn INT8 kernel")
.SetDefault(false)
.AsExtra();
// TODO(dzhwinter): need to registered layout transform function
AddAttr<int>("workspace_size_MB",
"Only used in cudnn kernel. workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddAttr<bool>("exhaustive_search",
"(bool, default false) cuDNN has many algorithm to calculation "
"convolution, whether enable exhaustive search "
"for cuDNN convolution or not, default is False.")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC(
Convolution3D Operator.

Expand Down Expand Up @@ -811,7 +736,7 @@ REGISTER_OPERATOR(conv2d_grad_grad, ops::ConvOpDoubleGrad);
// depthwise convolution op
REGISTER_OPERATOR(depthwise_conv2d,
ops::ConvOp,
ops::Conv2DOpMaker,
ops::DepthwiseConv2DOpMaker,
ops::ConvOpInferVarType,
ops::Conv2DGradMaker<paddle::framework::OpDesc>,
ops::Conv2DGradMaker<paddle::imperative::OpBase>);
Expand Down
64 changes: 0 additions & 64 deletions paddle/fluid/operators/conv_transpose_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,6 @@ framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar(
}

void Conv2DTransposeOpMaker::Make() {
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false)
.AsExtra();
AddInput("Input",
"(Tensor) The input tensor of convolution transpose operator. "
"The format of input tensor is NCHW or NHWC. Where N is batch size, "
Expand Down Expand Up @@ -146,40 +141,6 @@ void Conv2DTransposeOpMaker::Make() {
"(vector<int> default:{0, 0}), the paddings(h_pad, w_pad) of convolution "
"transpose operator.")
.SetDefault({0, 0});
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("force_fp32_output",
"(bool, default false) Force BF16 kernel output FP32, only "
"used in MKL-DNN BF16")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"mkldnn_data_type",
"(string, default \"float32\"). Data type of mkldnn kernel")
.SetDefault("float32")
.InEnum({"float32", "bfloat16"})
.AsExtra();
AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>("fuse_activation",
"(string, default \"\") Only used in mkldnn kernel")
.SetDefault("")
.AsExtra();
AddAttr<float>("fuse_alpha",
"(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f)
.AsExtra();
AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel")
.SetDefault(0.0f)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCHW) Only used in "
Expand All @@ -193,14 +154,6 @@ void Conv2DTransposeOpMaker::Make() {
"\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
"Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
.SetDefault("EXPLICIT");
AddAttr<int>("workspace_size_MB",
"Used in cudnn kernel only. workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardward. This size should be carefully set.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddComment(R"DOC(
Convolution2D Transpose Operator.

Expand Down Expand Up @@ -280,15 +233,6 @@ void Conv3DTransposeOpMaker::Make() {
"(int default:1), the groups number of the convolution3d "
"transpose operator. ")
.SetDefault(1);
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn")
.SetDefault(false)
.AsExtra();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"data_format",
"(string, default NCHW) Only used in "
Expand All @@ -302,14 +246,6 @@ void Conv3DTransposeOpMaker::Make() {
"\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
"Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
.SetDefault("EXPLICIT");
AddAttr<int>("workspace_size_MB",
"Used in cudnn kernel only. workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardward. This size should be carefully set.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB())
.AsExtra();
AddComment(R"DOC(
Convolution3D Transpose Operator.

Expand Down
Loading