From 7b369ecca08e33efc50b888273a508b2e448cd16 Mon Sep 17 00:00:00 2001 From: zhangbo9674 Date: Mon, 14 Aug 2023 09:31:22 +0000 Subject: [PATCH 1/3] fix inset get_parameter op bug --- paddle/fluid/framework/executor_cache.cc | 2 +- paddle/fluid/framework/new_executor/new_ir_interpreter.cc | 7 ++++--- paddle/fluid/ir_adaptor/translator/program_translator.cc | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/framework/executor_cache.cc b/paddle/fluid/framework/executor_cache.cc index 67646dec6db67..455f11d128032 100644 --- a/paddle/fluid/framework/executor_cache.cc +++ b/paddle/fluid/framework/executor_cache.cc @@ -367,7 +367,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram( } } - // add fetch with place op to program + // add data op to program auto *block = local_program.MutableBlock(0); for (auto &in_t : x) { auto name = in_t.name(); diff --git a/paddle/fluid/framework/new_executor/new_ir_interpreter.cc b/paddle/fluid/framework/new_executor/new_ir_interpreter.cc index ead957f7e87ec..7dac96b8f6679 100644 --- a/paddle/fluid/framework/new_executor/new_ir_interpreter.cc +++ b/paddle/fluid/framework/new_executor/new_ir_interpreter.cc @@ -481,9 +481,10 @@ std::string NewIRInterpreter::DebugValueInfo() { platform::errors::PreconditionNotMet( "var(%s) should exist in var_name_2_id_", kv.second)); auto* var = InnerScope()->FindVar(kv.second); - PADDLE_ENFORCE(var != nullptr, - platform::errors::PreconditionNotMet( - "var(%s) should exist in var_name_2_id_", kv.second)); + PADDLE_ENFORCE( + var != nullptr, + platform::errors::PreconditionNotMet( + "var(%s) should exist in scope (%p)", kv.second, InnerScope())); os << kv.first.impl() << " -> " << kv.second << " -> " << var_name_2_id_.at(kv.second) << " -> " << var << "\n"; } diff --git a/paddle/fluid/ir_adaptor/translator/program_translator.cc b/paddle/fluid/ir_adaptor/translator/program_translator.cc index 342d5f1aa0ac0..e7f2966c29401 100644 --- a/paddle/fluid/ir_adaptor/translator/program_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/program_translator.cc @@ -143,7 +143,7 @@ void ProgramTranslator::GetParameterForSingleBlock(const BlockDesc& block) { var_desc = block.FindVarRecursive(var_name); } - bool need_get_parameter_op = is_parameter || is_unseen_variable; + bool need_get_parameter_op = is_parameter && is_unseen_variable; if (need_get_parameter_op) { ir::Operation* op = InsertGetParamaterOp(ctx_, var_desc); program_->block()->push_back(op); From af2f66756a29a9034a052d8df82bf6174dfc8675 Mon Sep 17 00:00:00 2001 From: kangguangli Date: Tue, 15 Aug 2023 12:43:52 +0000 Subject: [PATCH 2/3] fix bug: insert only for parameters --- .../ir_adaptor/translator/op_translator.cc | 59 ++++--------------- .../translator/program_translator.cc | 4 +- paddle/fluid/ir_adaptor/translator/utils.cc | 11 ++++ paddle/fluid/ir_adaptor/translator/utils.h | 3 + python/paddle/static/input.py | 21 ++++++- 5 files changed, 48 insertions(+), 50 deletions(-) diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index ef53f1d496a3d..5465ad5e445c8 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -325,18 +325,15 @@ void OpTranscriber::InsertSliceOperationForInput( // scan all inputs to see if any of them is generated as a vector // so need an additional `SliceOp` to take it out. for (const auto& n : op_desc.Inputs()) { - auto& name = n.first; auto& args = n.second; for (const auto& arg_name : args) { bool check = - param_map->count(arg_name) != 0 || !yaml_input_set.count(arg_name); - IR_ENFORCE(check, - "arg %s.%s as input should be exists before prasing %s", - name, - arg_name, - op_desc.Type()); - auto defining_info = (*param_map)[arg_name]; + param_map->count(arg_name) != 0 && !yaml_input_set.count(arg_name); + if (!check) { + continue; + } + auto defining_info = param_map->at(arg_name); if (defining_info.generated_by_vector) { InsertSliceOperationForTarget( ctx, param_map, program, defining_info, arg_name); @@ -391,7 +388,8 @@ std::vector OpTranscriber::GenerateOperationInput( } } VLOG(10) << "[op:" << op_desc.Type() << "][input]" << info.name << " " - << legacy_input_name << " " << legacy_input_vars.size(); + << legacy_input_name << " " << legacy_input_vars.size() << "[" + << legacy_input_vars << "]"; if (legacy_input_vars.empty() && mutable_attributes != nullptr && mutable_attributes->count(info.name) != 0) { @@ -613,45 +611,14 @@ void OpTranscriber::RecordOpResultMapping(ir::IrContext* ctx, const OpDesc& op_desc, ir::Operation* operation, const OpOutputMapping& arg_to_idx) { - for (const auto& n : op_desc.Outputs()) { - auto& name = n.first; + for (const auto& [arg_name, idx] : arg_to_idx) { VLOG(10) << "[output recording]" - << "[" << op_desc.Type() << "]" << name; - const auto& args = n.second; - size_t idx_in_vector = 0; - for (const auto& arg_name : args) { - if (arg_name == kEmptyVarName) { - idx_in_vector++; - continue; - } - auto idx_iter = arg_to_idx.find(arg_name); - if (idx_iter == arg_to_idx.end()) { - VLOG(4) << "[output recording]" - << "[" << op_desc.Type() << "][skip]" << arg_name; - continue; - } - auto idx = idx_iter->second; - VLOG(10) << "[output recording]" - << "[" << op_desc.Type() << "]" << arg_name << " " << idx; - - ir::OpResult value = operation->result(idx); - bool generated_by_vector = value.type().isa(); - - // Specially process TensorArray, this because we cannot distinguish it - // with Vector by other conditions but we cannot support it - // like Vector - if (args.size() == 1) { - VarDesc* var = op_desc.Block()->FindVarRecursive(args[0]); - if (var->GetType() == - paddle::framework::proto::VarType::LOD_TENSOR_ARRAY) { - generated_by_vector = false; - } - } + << "[" << op_desc.Type() << "]" << arg_name << " " << idx; + ir::OpResult value = operation->result(idx); + bool generated_by_vector = value.type().isa(); - (*param_map)[arg_name] = VariableDefiningInfo( - value, generated_by_vector, generated_by_vector ? idx_in_vector : -1); - idx_in_vector++; - } + (*param_map)[arg_name] = VariableDefiningInfo( + value, generated_by_vector, generated_by_vector ? idx : -1); } } diff --git a/paddle/fluid/ir_adaptor/translator/program_translator.cc b/paddle/fluid/ir_adaptor/translator/program_translator.cc index e7f2966c29401..cc076dd30ba21 100644 --- a/paddle/fluid/ir_adaptor/translator/program_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/program_translator.cc @@ -148,7 +148,7 @@ void ProgramTranslator::GetParameterForSingleBlock(const BlockDesc& block) { ir::Operation* op = InsertGetParamaterOp(ctx_, var_desc); program_->block()->push_back(op); param_map_[var_name] = VariableDefiningInfo(op->result(0)); - VLOG(10) << "[op translated][get parameter]" << op; + VLOG(10) << "[op translated][get parameter]" << var_name; program_->SetParameter(var_name, nullptr); parameter_visited_.insert(var_name); @@ -224,7 +224,7 @@ void ProgramTranslator::SetParameterFromSingleBlock(const BlockDesc& block) { insert_pos++; block->insert(insert_pos, op); - VLOG(10) << "[op translated][set parameter]" << op; + VLOG(10) << "[op translated][set parameter]" << var_name; program_->SetParameter(var_name, nullptr); parameter_visited_.insert(var_name); diff --git a/paddle/fluid/ir_adaptor/translator/utils.cc b/paddle/fluid/ir_adaptor/translator/utils.cc index 75bcebd16d734..592d248f0d14b 100644 --- a/paddle/fluid/ir_adaptor/translator/utils.cc +++ b/paddle/fluid/ir_adaptor/translator/utils.cc @@ -18,6 +18,7 @@ #include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/builtin_type.h" +#include "paddle/ir/core/utils.h" namespace paddle { namespace translator { @@ -46,5 +47,15 @@ ir::Operation* InsertSliceOperationForTarget( return operation; } +std::ostream& operator<<(std::ostream& os, + const std::vector& vec_str) { + ir::PrintInterleave( + vec_str.begin(), + vec_str.end(), + [&os](std::string s) { os << s; }, + [&os]() { os << ", "; }); + return os; +} + } // namespace translator } // namespace paddle diff --git a/paddle/fluid/ir_adaptor/translator/utils.h b/paddle/fluid/ir_adaptor/translator/utils.h index a6e5a8fd20969..a207e727ed8f6 100644 --- a/paddle/fluid/ir_adaptor/translator/utils.h +++ b/paddle/fluid/ir_adaptor/translator/utils.h @@ -31,5 +31,8 @@ ir::Operation* InsertSliceOperationForTarget( const VariableDefiningInfo& defining_info, const std::string& arg_name); +std::ostream& operator<<(std::ostream& os, + const std::vector& vec_str); + } // namespace translator } // namespace paddle diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py index 4f856227bda71..3fae1a657316f 100644 --- a/python/paddle/static/input.py +++ b/python/paddle/static/input.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os + import paddle from paddle.fluid import Variable, core from paddle.fluid.data_feeder import check_type @@ -96,7 +98,7 @@ def data(name, shape, dtype=None, lod_level=0): shape[i] = -1 if dtype: - return helper.create_global_variable( + out = helper.create_global_variable( name=name, shape=shape, dtype=dtype, @@ -107,7 +109,7 @@ def data(name, shape, dtype=None, lod_level=0): need_check_feed=True, ) else: - return helper.create_global_variable( + out = helper.create_global_variable( name=name, shape=shape, dtype=paddle.get_default_dtype(), @@ -118,6 +120,21 @@ def data(name, shape, dtype=None, lod_level=0): need_check_feed=True, ) + if os.environ.get("FLAGS_enable_new_ir_in_executor", None): + helper = LayerHelper('data', **locals()) + helper.append_op( + type='data', + inputs={}, + outputs={'out': out}, + attrs={ + 'index': 0, + 'dtype': 0, + 'place': 0, + 'name': name, + }, + ) + return out + class InputSpec: """ From 722f615c7b36e3f1e576dea9d491551efa779dd8 Mon Sep 17 00:00:00 2001 From: kangguangli Date: Wed, 16 Aug 2023 03:19:08 +0000 Subject: [PATCH 3/3] fix bug: wrong idx in vector --- .../ir_adaptor/translator/op_translator.cc | 31 ++++++++++++------- test/ir/new_ir/test_ir_vjp.py | 2 +- test/prim/new_ir_prim/test_decomp_op.py | 2 +- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index 5465ad5e445c8..f27c35ca37d05 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -49,7 +49,9 @@ namespace translator { namespace { -using ResultIdx = size_t; +using IdxInOp = size_t; +using IdxInVector = size_t; +using ResultIdx = std::tuple; using OpDesc = paddle::framework::OpDesc; using BlockDesc = paddle::framework::BlockDesc; using VarDesc = paddle::framework::VarDesc; @@ -505,7 +507,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, ir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); op_output_types.push_back(translated_var_type); - arg_to_idx[var->Name()] = cur_output_idx; + arg_to_idx[var->Name()] = {cur_output_idx, 0}; continue; } } @@ -533,7 +535,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, ir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); - arg_to_idx[var_name] = cur_output_idx; + arg_to_idx[var_name] = {cur_output_idx, 0}; op_output_types.push_back(translated_var_type); // if src type is Vector @@ -542,10 +544,12 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, << "[" << op_desc.Type() << "]" << info.name << " :" << info.type_name << " var: " << legacy_output_name; std::vector types; - for (const auto& var_name : legacy_output_vars) { + for (IdxInVector idx_in_vec = 0; idx_in_vec < legacy_output_vars.size(); + idx_in_vec++) { + const auto& var_name = legacy_output_vars[idx_in_vec]; if (var_name == kEmptyVarName) { types.emplace_back(nullptr); - arg_to_idx[var_name] = cur_output_idx; + arg_to_idx[var_name] = {cur_output_idx, idx_in_vec}; continue; } VarDesc* var = block->FindVarRecursive(var_name); @@ -555,7 +559,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, ir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); types.push_back(translated_var_type); - arg_to_idx[var_name] = cur_output_idx; + arg_to_idx[var_name] = {cur_output_idx, idx_in_vec}; } ir::Type vec_type = ir::VectorType::get(ctx, types); op_output_types.push_back(vec_type); @@ -612,13 +616,15 @@ void OpTranscriber::RecordOpResultMapping(ir::IrContext* ctx, ir::Operation* operation, const OpOutputMapping& arg_to_idx) { for (const auto& [arg_name, idx] : arg_to_idx) { + const auto& [idx_in_op, idx_in_vec] = idx; VLOG(10) << "[output recording]" - << "[" << op_desc.Type() << "]" << arg_name << " " << idx; - ir::OpResult value = operation->result(idx); + << "[" << op_desc.Type() << "]" << arg_name << " " << idx_in_op + << " " << idx_in_vec; + ir::OpResult value = operation->result(idx_in_op); bool generated_by_vector = value.type().isa(); (*param_map)[arg_name] = VariableDefiningInfo( - value, generated_by_vector, generated_by_vector ? idx : -1); + value, generated_by_vector, generated_by_vector ? idx_in_vec : -1); } } @@ -1388,9 +1394,10 @@ struct ElementwiseGradTranscriber : public OpTranscriber { if (idx_iter == arg_to_idx.end()) { IR_THROW("op[%s] should have got its y_grad", op_desc.Type()); } - auto idx = idx_iter->second; + auto [idx_in_op, idx_in_vec] = idx_iter->second; VLOG(10) << "[output recording]" - << "[" << op_desc.Type() << "]" << y_grad_var_name << " " << idx; + << "[" << op_desc.Type() << "]" << y_grad_var_name << " " + << idx_in_op << " " << idx_in_vec; auto y_names = op_desc.Input("Y", true); auto y_name = y_names[0]; @@ -1414,7 +1421,7 @@ struct ElementwiseGradTranscriber : public OpTranscriber { y_type.dyn_cast(); std::vector y_shape = phi::vectorize(y_tensor_type.dims()); - ir::OpResult value = operation->result(idx); + ir::OpResult value = operation->result(idx_in_op); ir::Builder builder(ctx, operation->GetParent()); auto reshape_op = builder.Build(value, y_shape); (*param_map)[y_grad_var_name] = diff --git a/test/ir/new_ir/test_ir_vjp.py b/test/ir/new_ir/test_ir_vjp.py index 45da7162664e4..595715aa5f22f 100644 --- a/test/ir/new_ir/test_ir_vjp.py +++ b/test/ir/new_ir/test_ir_vjp.py @@ -106,7 +106,7 @@ def test_mean_vjp1(self): .source() .get_defining_op() .name(), - "builtin.get_parameter", + "pd.data", ) self.assertEqual( grad_outs[0][0] diff --git a/test/prim/new_ir_prim/test_decomp_op.py b/test/prim/new_ir_prim/test_decomp_op.py index bb6ae6d69ef1d..ecf9c85907999 100644 --- a/test/prim/new_ir_prim/test_decomp_op.py +++ b/test/prim/new_ir_prim/test_decomp_op.py @@ -47,7 +47,7 @@ def test_build_op(self): self.assertEqual( op_name_list, [ - 'builtin.get_parameter', + 'pd.data', 'pd.matmul', 'pd.add', 'pd.full_int_array',