From 3b59eabc2b739224c24d1676135705f14f0b7237 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Wed, 7 Sep 2022 09:51:39 +0000 Subject: [PATCH 1/4] clear extra attrs of elementwise op in opmaker --- .../operators/elementwise/elementwise_op.h | 35 ---------- paddle/phi/api/yaml/api_compat.yaml | 70 +++++++++++++++++++ 2 files changed, 70 insertions(+), 35 deletions(-) diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index 610e5932b1c36..e722d5f7e6e99 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -216,47 +216,12 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { AddInputX(); AddInputY(); AddOpOutput(); - AddAttr("axis", "(int, default -1). If X.dimension != Y.dimension," "Y.dimension must be a subsequence of x.dimension. And axis " "is the start dimension index " "for broadcasting Y onto X. ") .SetDefault(-1); - AddAttr("use_mkldnn", "(bool, default false). Used by MKLDNN.") - .SetDefault(false) - .AsExtra(); - AddAttr("x_data_format", "This parameter is no longer used.") - .SetDefault("") - .AsExtra(); - AddAttr("y_data_format", "This parameter is no longer used.") - .SetDefault("") - .AsExtra(); - AddAttr( - "use_quantizer", - "(bool, default false) " - "This parameter is no longer used. Use 'mkldnn_data_type' instead.") - .SetDefault(false) - .AsExtra(); - AddAttr( - "mkldnn_data_type", - "(string, default \"float32\"). Data type of mkldnn kernel") - .SetDefault("float32") - .InEnum({"float32", "int8", "bfloat16"}) - .AsExtra(); - /* int8 parameters */ - AddAttr("Scale_x", - "(float, default 1.0f), The quantize scale of X tensor") - .SetDefault(1.0f) - .AsExtra(); - AddAttr("Scale_y", - "(float, default 1.0f), The quantize scale of Y tensor") - .SetDefault(1.0f) - .AsExtra(); - AddAttr("Scale_out", - "(float, default 1.0f), The quantize scale of output data") - .SetDefault(1.0f) - .AsExtra(); AddOpComment(); } diff --git a/paddle/phi/api/yaml/api_compat.yaml b/paddle/phi/api/yaml/api_compat.yaml index 2f34993e7ff10..d147d38d3f0c4 100644 --- a/paddle/phi/api/yaml/api_compat.yaml +++ b/paddle/phi/api/yaml/api_compat.yaml @@ -3,6 +3,12 @@ extra : attrs : [bool use_cudnn = false, bool use_mkldnn = false] +- api : add (elementwise_add) + backward : add_grad (elementwise_add_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - api : addmm backward : addmm_grad extra : @@ -163,6 +169,12 @@ outputs : out : Out +- api : divide (elementwise_div) + backward : divide_grad (elementwise_div) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - api : dot inputs : {x : X, y : Y} @@ -179,6 +191,12 @@ extra : attrs : [bool fix_seed = false, int seed = 0] +- api : elementwise_pow + backward : elementwise_pow_grad + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - api : erf inputs : x : X @@ -203,6 +221,23 @@ inputs: {x: X} outputs: {out: Out} +- api : floor_divide (elementwise_floordiv) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + +- api : fmax (elementwise_fmax) + backward : fmax_grad (elementwise_fmax_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + +- api : fmin (elementwise_fmin) + backward : fmin_grad (elementwise_fmin_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - api : frobenius_norm backward : frobenius_norm_grad extra : @@ -223,6 +258,12 @@ extra : attrs : [bool is_test = false] +- api : heaviside (elementwise_heaviside) + backward : heaviside_grad (elementwise_heaviside_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - api : inplace_abn backward : inplace_abn_grad extra : @@ -261,6 +302,24 @@ str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}', 'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',] +- api : maximum (elementwise_max) + backward : maximum_grad (elementwise_max_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + +- api : maximum (elementwise_min) + backward : maximum_grad (elementwise_min_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + +- api : multiply (elementwise_mul) + backward : multiply_grad (elementwise_mul_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - api : mv inputs : {x : X, vec : Vec} @@ -336,6 +395,11 @@ extra : attrs : [bool use_mkldnn = false] +- api : remainder (elementwise_mod) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - api : renorm backward : renorm_grad extra : @@ -389,6 +453,12 @@ extra : attrs : [bool use_mkldnn = false] +- api : subtract (elementwise_sub) + backward : subtract_grad (elementwise_sub_grad) + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - api : sync_batch_norm backward : sync_batch_norm_grad extra : From 9078633094e674156f4eede12baf4ca0b144bc64 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Wed, 7 Sep 2022 16:36:36 +0000 Subject: [PATCH 2/4] fix op_debug_string_test --- paddle/fluid/operators/op_debug_string_test.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/paddle/fluid/operators/op_debug_string_test.cc b/paddle/fluid/operators/op_debug_string_test.cc index 372a71706ab5e..fd8e027092410 100644 --- a/paddle/fluid/operators/op_debug_string_test.cc +++ b/paddle/fluid/operators/op_debug_string_test.cc @@ -41,8 +41,6 @@ TEST(op_debug_str, test_unknown_dtype) { desc.SetOutput(framework::GradVarName("Y"), {framework::GradVarName("Y")}); desc.SetAttr("axis", -1); desc.SetAttr("use_mkldnn", false); - desc.SetAttr("x_data_format", ""); - desc.SetAttr("y_data_format", ""); auto x_tensor = scope.Var("X")->GetMutable(); x_tensor->Resize(dim); From 9469f9545aa629dc4cfe4a1d4bb3a5a63f0b8fd6 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Thu, 8 Sep 2022 03:25:41 +0000 Subject: [PATCH 3/4] fix bug of grad_add --- paddle/phi/api/yaml/api_compat.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/paddle/phi/api/yaml/api_compat.yaml b/paddle/phi/api/yaml/api_compat.yaml index d147d38d3f0c4..63f05ddf20dae 100644 --- a/paddle/phi/api/yaml/api_compat.yaml +++ b/paddle/phi/api/yaml/api_compat.yaml @@ -248,6 +248,11 @@ extra : attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false] +- api : grad_add + extra : + attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", + bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + - api : grid_sampler backward : grid_sampler_grad extra : From 6805e8a2891cd1a985dfbabb7c8212f678a0f760 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Tue, 13 Sep 2022 16:49:12 +0000 Subject: [PATCH 4/4] fix sort of runtime attrs --- paddle/fluid/framework/op_desc.cc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 4d0d10c7836c1..a725521c42347 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -998,16 +998,25 @@ void OpDesc::Flush() { std::vector> sorted_attrs{attrs_.begin(), attrs_.end()}; + + std::vector> sorted_runtime_attrs{ + runtime_attrs_.begin(), runtime_attrs_.end()}; + std::sort( sorted_attrs.begin(), sorted_attrs.end(), [](std::pair a, std::pair b) { return a.first < b.first; }); + std::sort( + sorted_runtime_attrs.begin(), + sorted_runtime_attrs.end(), + [](std::pair a, + std::pair b) { return a.first < b.first; }); for (auto &attr : sorted_attrs) { set_attr_desc(attr.first, attr.second); } - for (auto &attr : runtime_attrs_) { + for (auto &attr : sorted_runtime_attrs) { set_attr_desc(attr.first, attr.second); }