From 3611e2663127b412ade6559faa2363a4feac3c6f Mon Sep 17 00:00:00 2001 From: kangguangli Date: Wed, 10 Jan 2024 20:26:53 +0800 Subject: [PATCH] fix ttfnet_darknet53_1x_coco in pir mode (#60663) --- .../instruction/instruction_util.cc | 4 +- .../translator/attribute_translator.cc | 17 ++++ .../ir_adaptor/translator/op_translator.cc | 45 ++++++---- .../pir/dialect/op_generator/ops_api_gen.py | 1 + .../pir/dialect/operator/ir/manual_op.cc | 88 ++++++++++++++++++- .../fluid/pir/dialect/operator/ir/manual_op.h | 35 ++++++++ .../pir/transforms/pd_op_to_kernel_pass.cc | 8 +- paddle/pir/core/builtin_op.cc | 10 +-- paddle/pir/core/dialect.cc | 2 +- paddle/pir/core/interface_support.h | 4 +- paddle/pir/core/ir_context.cc | 40 ++++----- paddle/pir/core/op_info_impl.cc | 18 ++-- paddle/pir/core/op_trait.cc | 14 +-- paddle/pir/core/operation.cc | 10 +-- paddle/pir/core/storage_manager.cc | 29 +++--- paddle/pir/core/value_impl.cc | 12 +-- 16 files changed, 245 insertions(+), 92 deletions(-) diff --git a/paddle/fluid/framework/new_executor/instruction/instruction_util.cc b/paddle/fluid/framework/new_executor/instruction/instruction_util.cc index 9a28eeb39f9bc..04a2126acc24f 100644 --- a/paddle/fluid/framework/new_executor/instruction/instruction_util.cc +++ b/paddle/fluid/framework/new_executor/instruction/instruction_util.cc @@ -181,7 +181,9 @@ OpFuncType AnalyseOpFuncType(pir::Operation* op, const platform::Place& place) { return OpFuncType::kGpuSync; } - if (platform::is_gpu_place(place) && op_name == "pd_op.memcpy_d2h") { + if (platform::is_gpu_place(place) && + (op_name == "pd_op.memcpy_d2h" || + op_name == "pd_op.memcpy_d2h_multi_io")) { return OpFuncType::kGpuSync; } diff --git a/paddle/fluid/ir_adaptor/translator/attribute_translator.cc b/paddle/fluid/ir_adaptor/translator/attribute_translator.cc index 928087c8cb8d8..99af9a45b6dc8 100644 --- a/paddle/fluid/ir_adaptor/translator/attribute_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/attribute_translator.cc @@ -187,6 +187,21 @@ class Int64AttributeVisitor : public AttributeVisitor { } }; +class Int32ArrayAttributeVisitor : public AttributeVisitor { + public: + using AttributeVisitor::AttributeVisitor; + + pir::Attribute operator()(const std::vector& i64s) override { + VLOG(10) << "translating vector size: " << i64s.size(); + std::vector attrs; + attrs.reserve(i64s.size()); + for (const auto& v : i64s) { + attrs.push_back(pir::Int32Attribute::get(ctx, v)); + } + return pir::ArrayAttribute::get(ctx, attrs); + } +}; + class IntArrayAttributeVisitor : public AttributeVisitor { public: using AttributeVisitor::AttributeVisitor; @@ -240,6 +255,8 @@ AttributeTranslator::AttributeTranslator() { new PlaceAttributeVisitor(); special_visitors["pir::ArrayAttribute"] = new Int64ArrayAttributeVisitor(); + special_visitors["pir::ArrayAttribute"] = + new Int32ArrayAttributeVisitor(); special_visitors["pir::Int64Attribute"] = new Int64AttributeVisitor(); } diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index c646d78e7e03e..9c7e713963912 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -37,12 +38,16 @@ #include "paddle/fluid/pir/dialect/operator/ir/op_type.h" #include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/phi/core/utils/data_type.h" +#include "paddle/pir/core/attribute.h" #include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_attribute.h" #include "paddle/pir/core/builtin_op.h" #include "paddle/pir/core/builtin_type.h" #include "paddle/pir/core/ir_context.h" #include "paddle/pir/core/operation.h" +#include "paddle/pir/core/utils.h" #include "paddle/pir/core/value.h" +#include "paddle/utils/blank.h" #ifdef PADDLE_WITH_DNNL #include "paddle/fluid/pir/dialect/operator/ir/pd_onednn_op.h" @@ -982,25 +987,28 @@ struct AssignValueOpTranscriber : public OpTranscriber { ctx, phi::Place(phi::AllocationType::UNDEFINED)); attribute_map["place"] = attr_place; - if (op_desc.HasAttr("bool_values")) { - legacy_attr = op_desc.GetAttr("bool_values"); - } else if (op_desc.HasAttr("fp32_values")) { - legacy_attr = op_desc.GetAttr("fp32_values"); - } else if (op_desc.HasAttr("int32_values")) { - legacy_attr = op_desc.GetAttr("int32_values"); - } else if (op_desc.HasAttr("int64_values")) { - legacy_attr = op_desc.GetAttr("int64_values"); - } else if (op_desc.HasAttr("values")) { - legacy_attr = op_desc.GetAttr("values"); - } else { - IR_THROW( - "Op assign_value should have attribute `**_values` or `values` but " - "not find"); + const std::vector possible_attrs = { + "bool_values", "fp32_values", "int32_values", "int64_values", "values"}; + for (const auto& attr_name : possible_attrs) { + if (!op_desc.HasAttr(attr_name)) { + continue; + } + legacy_attr = op_desc.GetAttr(attr_name); + pir::Attribute attr_values = attribute_translator( + attr_info_maps.at("values").type_name, legacy_attr); + if (attr_values && attr_values.isa() && + !attr_values.dyn_cast().empty()) { + attribute_map["values"] = attr_values; + VLOG(10) << "[op assign_value][values]" << attr_name << " " + << attr_values; + break; + } } - pir::Attribute attr_values = attribute_translator( - attr_info_maps.at("values").type_name, legacy_attr); - attribute_map["values"] = attr_values; + IR_ENFORCE( + attribute_map.find("values") != attribute_map.end(), + "Op assign_value should have attribute `**_values` or `values` but " + "not find"); TranslateOpDistAttribute(op_desc, &attribute_map); @@ -2062,7 +2070,8 @@ struct SelectInputOpTranscriber : public OpTranscriber { auto& attribute_translator = AttributeTranslator::instance(); undefine_value.defining_op()->set_attribute( "shape", - attribute_translator(common::vectorize(undefined_var_type.dims()))); + attribute_translator("pir::ArrayAttribute", + common::vectorize(undefined_var_type.dims()))); } auto dim1 = input1.dyn_cast().dims(); auto dim2 = input2.dyn_cast().dims(); diff --git a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py index 653c653c7ff7f..04da5c4be230c 100644 --- a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py @@ -83,6 +83,7 @@ 'generate_sequence_xpu', 'layer_norm_act_xpu', 'memcpy', + 'memcpy_d2h_multi_io', 'batch_norm_', 'multi_encoder_xpu', 'multihead_matmul', diff --git a/paddle/fluid/pir/dialect/operator/ir/manual_op.cc b/paddle/fluid/pir/dialect/operator/ir/manual_op.cc index 36d82b7bb9ed6..cc03b61d354d7 100644 --- a/paddle/fluid/pir/dialect/operator/ir/manual_op.cc +++ b/paddle/fluid/pir/dialect/operator/ir/manual_op.cc @@ -23,7 +23,8 @@ paddle::dialect::AddNOp, paddle::dialect::AddN_Op, paddle::dialect::SliceArrayOp, paddle::dialect::SliceArrayDenseOp, paddle::dialect::AssignArray_Op, paddle::dialect::ArrayToTensorOp, paddle::dialect::SelectInputOp, paddle::dialect::IncrementOp, - paddle::dialect::Increment_Op, paddle::dialect::ShapeBroadcastOp + paddle::dialect::Increment_Op, paddle::dialect::ShapeBroadcastOp, + paddle::dialect::MemcpyD2hMultiIoOp #else #include "paddle/fluid/pir/dialect/operator/ir/manual_op.h" @@ -1686,7 +1687,7 @@ OpInfoTuple ArrayWrite_Op::GetOpInfo() { "array_write", {"array", "x", "i"}, {"array"}, - {}, + {"array"}, {{"out", "array"}}, {}); @@ -3034,6 +3035,88 @@ bool ShapeBroadcastOp::InferSymbolicShape( return true; } +const char *MemcpyD2hMultiIoOp::attributes_name[1] = {"dst_place_type"}; + +OpInfoTuple MemcpyD2hMultiIoOp::GetOpInfo() { + std::vector inputs = { + paddle::dialect::OpInputInfo("x", + "paddle::dialect::DenseTensorArrayType", + false, + false, + false, + false)}; + std::vector attributes = { + paddle::dialect::OpAttributeInfo( + "dst_place_type", "pir::Int32Attribute", "")}; + std::vector outputs = { + paddle::dialect::OpOutputInfo( + "out", "paddle::dialect::DenseTensorArrayType", false, false)}; + paddle::dialect::OpRunTimeInfo run_time_info = + paddle::dialect::OpRunTimeInfo("UnchangedInferMeta", + {"x"}, + "memcpy_d2h_multi_io", + {"x", "dst_place_type"}, + {}, + {}, + {}, + {}); + return std::make_tuple( + inputs, attributes, outputs, run_time_info, "memcpy_d2h_multi_io"); +} + +void MemcpyD2hMultiIoOp::VerifySig() { + VLOG(4) << "Start Verifying inputs, outputs and attributes for: " + "MemcpyD2hMultiIoOp."; + VLOG(4) << "Verifying inputs:"; + { + auto input_size = num_operands(); + IR_ENFORCE(input_size == 1u, + "The size %d of inputs must be equal to 1.", + input_size); + + IR_ENFORCE((*this) + ->operand_source(0) + .type() + .isa(), + "Type validation failed for the 0th input, got %s.", + (*this)->operand_source(0).type()); + } + VLOG(4) << "Verifying attributes:"; + { + auto &attributes = this->attributes(); + IR_ENFORCE(attributes.count("dst_place_type") > 0, + "dst_place_type does not exist."); + IR_ENFORCE(attributes.at("dst_place_type").isa(), + "Type of attribute: dst_place_type is not pir::Int32Attribute."); + } + VLOG(4) << "Verifying outputs:"; + { + auto output_size = num_results(); + IR_ENFORCE(output_size == 1u, + "The size %d of outputs must be equal to 1.", + output_size); + auto output_0_type = (*this)->result(0).type(); + + IR_ENFORCE(output_0_type.isa(), + "Type validation failed for the 0th output."); + } + VLOG(4) << "End Verifying for: MemcpyD2hMultiIoOp."; +} + +void MemcpyD2hMultiIoOp::InferMeta(phi::InferMetaContext *infer_meta) { + auto fn = PD_INFER_META(phi::UnchangedArrayInferMeta); + fn(infer_meta); +} + +phi::DataType MemcpyD2hMultiIoOp::GetKernelTypeForVar( + const std::string &var_name, + const phi::DataType &tensor_dtype, + const phi::DataType &expected_kernel_dtype) { + VLOG(4) << "Get KernelType for Var of op: MemcpyD2hMultiIoOp"; + + return expected_kernel_dtype; +} + } // namespace dialect } // namespace paddle @@ -3057,5 +3140,6 @@ IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ExpandOp) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::SelectInputOp) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::IncrementOp) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::Increment_Op) +IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::MemcpyD2hMultiIoOp) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ShapeBroadcastOp) #endif diff --git a/paddle/fluid/pir/dialect/operator/ir/manual_op.h b/paddle/fluid/pir/dialect/operator/ir/manual_op.h index 99722f1306549..afa45de5c4c5c 100644 --- a/paddle/fluid/pir/dialect/operator/ir/manual_op.h +++ b/paddle/fluid/pir/dialect/operator/ir/manual_op.h @@ -555,6 +555,40 @@ class Increment_Op const std::vector> &stop_gradients); }; +class MemcpyD2hMultiIoOp + : public pir::Op { + public: + using Op::Op; + static const char *name() { return "pd_op.memcpy_d2h_multi_io"; } + static const char *attributes_name[1]; + static constexpr uint32_t attributes_num = 1; + static OpInfoTuple GetOpInfo(); + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::Value x_, + int dst_place_type); + + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::Value x_, + pir::AttributeMap attributes); + + void VerifySig(); + + static phi::DataType GetKernelTypeForVar( + const std::string &var_name, + const phi::DataType &tensor_dtype, + const phi::DataType &expected_kernel_dtype); + + pir::Value x() { return operand_source(0); } + pir::OpResult out() { return result(0); } + + static void InferMeta(phi::InferMetaContext *infer_meta); +}; + class IR_API ShapeBroadcastOp : public pir::Op { @@ -601,3 +635,4 @@ IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::SelectInputOp) IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::IncrementOp) IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::Increment_Op) IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::ShapeBroadcastOp) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::MemcpyD2hMultiIoOp) diff --git a/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc index 73e26ca1b1b09..eb7d5f9e19de8 100644 --- a/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc @@ -325,9 +325,13 @@ static pir::OpResult AddPlaceTransferOp(pir::Value in, } else if ((src_place.GetType() == phi::AllocationType::GPU) && (dst_place.GetType() == phi::AllocationType::CPU)) { copy_kernel_key.set_backend(phi::Backend::GPU); + std::string copy_kernel_name = "memcpy_d2h"; + if (in.type().isa()) { + copy_kernel_name = "memcpy_d2h_multi_io"; + } op_attribute = { - {"op_name", pir::StrAttribute::get(ctx, "pd_op.memcpy_d2h")}, - {"kernel_name", pir::StrAttribute::get(ctx, "memcpy_d2h")}, + {"op_name", pir::StrAttribute::get(ctx, "pd_op." + copy_kernel_name)}, + {"kernel_name", pir::StrAttribute::get(ctx, copy_kernel_name)}, {"kernel_key", KernelAttribute::get(ctx, copy_kernel_key)}, {"dst_place_type", pir::Int32Attribute::get(ctx, 0)}}; } else { diff --git a/paddle/pir/core/builtin_op.cc b/paddle/pir/core/builtin_op.cc index 59f0e3cd856cf..c183993d27709 100644 --- a/paddle/pir/core/builtin_op.cc +++ b/paddle/pir/core/builtin_op.cc @@ -22,7 +22,7 @@ namespace pir { const char *ModuleOp::attributes_name[attributes_num] = {"program"}; // NOLINT void PassStopGradientsDefaultly(OperationArgument &argument) { // NOLINT - VLOG(4) << "Builder construction stop gradient for OpResults."; + VLOG(10) << "Builder construction stop gradient for OpResults."; bool stop_gradient = true; for (auto value : argument.inputs) { auto attr = value.attribute(kStopGradientAttrName); @@ -90,7 +90,7 @@ void ModuleOp::Destroy() { } void ModuleOp::VerifySig() const { - VLOG(4) << "Verifying inputs, outputs and attributes for: ModuleOp."; + VLOG(10) << "Verifying inputs, outputs and attributes for: ModuleOp."; // Verify inputs: IR_ENFORCE(num_operands() == 0u, "The size of inputs must be equal to 0."); @@ -129,7 +129,7 @@ std::string ParameterOp::param_name() const { return attribute("parameter_name").AsString(); } void ParameterOp::VerifySig() const { - VLOG(4) << "Verifying inputs, outputs and attributes for: ParameterOp."; + VLOG(10) << "Verifying inputs, outputs and attributes for: ParameterOp."; // Verify inputs: IR_ENFORCE(num_operands() == 0u, "The size of inputs must be equal to 0."); @@ -155,7 +155,7 @@ void SetParameterOp::Build(Builder &builder, // NOLINT pir::StrAttribute::get(builder.ir_context(), name)); } void SetParameterOp::VerifySig() const { - VLOG(4) << "Verifying inputs, outputs and attributes for: SetParameterOp."; + VLOG(10) << "Verifying inputs, outputs and attributes for: SetParameterOp."; // Verify inputs: IR_ENFORCE(num_operands() == 1, "The size of outputs must be equal to 1."); @@ -181,7 +181,7 @@ void ShadowOutputOp::Build(Builder &builder, // NOLINT pir::StrAttribute::get(builder.ir_context(), name)); } void ShadowOutputOp::VerifySig() const { - VLOG(4) << "Verifying inputs, outputs and attributes for: ShadowOutputOp."; + VLOG(10) << "Verifying inputs, outputs and attributes for: ShadowOutputOp."; // Verify inputs: IR_ENFORCE(num_operands() == 1, "The size of outputs must be equal to 1."); diff --git a/paddle/pir/core/dialect.cc b/paddle/pir/core/dialect.cc index e6831e977fa31..92fa971bb6fcb 100644 --- a/paddle/pir/core/dialect.cc +++ b/paddle/pir/core/dialect.cc @@ -21,7 +21,7 @@ Dialect::Dialect(std::string name, pir::IrContext *context, pir::TypeId id) Dialect::~Dialect() = default; void Dialect::RegisterInterface(std::unique_ptr interface) { - VLOG(4) << "Register interface into dialect" << std::endl; + VLOG(9) << "Register interface into dialect" << std::endl; registered_interfaces_.emplace(interface->interface_id(), std::move(interface)); } diff --git a/paddle/pir/core/interface_support.h b/paddle/pir/core/interface_support.h index 60211a9437d7b..6f7d0a6aca731 100644 --- a/paddle/pir/core/interface_support.h +++ b/paddle/pir/core/interface_support.h @@ -45,14 +45,14 @@ class ConstructInterfacesOrTraits { IR_ENFORCE(suceess, "Interface: id[%u] is already registered. inset failed", TypeId::get()); - VLOG(6) << "New a interface: id[" << TypeId::get() << "]."; + VLOG(10) << "New a interface: id[" << TypeId::get() << "]."; } /// Placement new trait. template static void PlacementConstrctTrait(pir::TypeId *&p_trait) { // NOLINT *p_trait = TypeId::get(); - VLOG(6) << "New a trait: id[" << *p_trait << "]."; + VLOG(10) << "New a trait: id[" << *p_trait << "]."; ++p_trait; } }; diff --git a/paddle/pir/core/ir_context.cc b/paddle/pir/core/ir_context.cc index fb8c4c05a64a7..dfedff414655a 100644 --- a/paddle/pir/core/ir_context.cc +++ b/paddle/pir/core/ir_context.cc @@ -56,9 +56,9 @@ class IrContextImpl { void RegisterAbstractType(pir::TypeId type_id, AbstractType *abstract_type) { std::lock_guard guard(registed_abstract_types_lock_); - VLOG(6) << "Register an abstract_type of: [TypeId_hash=" - << std::hash()(type_id) - << ", AbstractType_ptr=" << abstract_type << "]."; + VLOG(10) << "Register an abstract_type of: [TypeId_hash=" + << std::hash()(type_id) + << ", AbstractType_ptr=" << abstract_type << "]."; registed_abstract_types_.emplace(type_id, abstract_type); } @@ -66,9 +66,9 @@ class IrContextImpl { std::lock_guard guard(registed_abstract_types_lock_); auto iter = registed_abstract_types_.find(type_id); if (iter != registed_abstract_types_.end()) { - VLOG(6) << "Found a cached abstract_type of: [TypeId_hash=" - << std::hash()(type_id) - << ", AbstractType_ptr=" << iter->second << "]."; + VLOG(10) << "Found a cached abstract_type of: [TypeId_hash=" + << std::hash()(type_id) + << ", AbstractType_ptr=" << iter->second << "]."; return iter->second; } LOG(WARNING) << "No cache found abstract_type of: [TypeId_hash=" @@ -79,9 +79,9 @@ class IrContextImpl { void RegisterAbstractAttribute(pir::TypeId type_id, AbstractAttribute *abstract_attribute) { std::lock_guard guard(registed_abstract_attributes_lock_); - VLOG(6) << "Register an abstract_attribute of: [TypeId_hash=" - << std::hash()(type_id) - << ", AbstractAttribute_ptr=" << abstract_attribute << "]."; + VLOG(10) << "Register an abstract_attribute of: [TypeId_hash=" + << std::hash()(type_id) + << ", AbstractAttribute_ptr=" << abstract_attribute << "]."; registed_abstract_attributes_.emplace(type_id, abstract_attribute); } @@ -89,9 +89,9 @@ class IrContextImpl { std::lock_guard guard(registed_abstract_attributes_lock_); auto iter = registed_abstract_attributes_.find(type_id); if (iter != registed_abstract_attributes_.end()) { - VLOG(4) << "Found a cached abstract_attribute of: [TypeId_hash=" - << std::hash()(type_id) - << ", AbstractAttribute_ptr=" << iter->second << "]."; + VLOG(10) << "Found a cached abstract_attribute of: [TypeId_hash=" + << std::hash()(type_id) + << ", AbstractAttribute_ptr=" << iter->second << "]."; return iter->second; } LOG(WARNING) << "No cache found abstract_attribute of: [TypeId_hash=" @@ -105,8 +105,8 @@ class IrContextImpl { void RegisterOpInfo(const std::string &name, OpInfo info) { std::lock_guard guard(registed_op_infos_lock_); - VLOG(6) << "Register an operation of: [Name=" << name - << ", OpInfo ptr=" << info << "]."; + VLOG(10) << "Register an operation of: [Name=" << name + << ", OpInfo ptr=" << info << "]."; registed_op_infos_.emplace(name, info); } @@ -191,9 +191,9 @@ IrContext *IrContext::Instance() { IrContext::~IrContext() { delete impl_; } IrContext::IrContext() : impl_(new IrContextImpl()) { - VLOG(4) << "BuiltinDialect registered into IrContext. ===>"; + VLOG(10) << "BuiltinDialect registered into IrContext. ===>"; GetOrRegisterDialect(); - VLOG(4) << "=============================================="; + VLOG(10) << "=============================================="; impl_->bfp16_type = TypeManager::get(this); impl_->fp16_type = TypeManager::get(this); @@ -247,11 +247,11 @@ AbstractAttribute *IrContext::GetRegisteredAbstractAttribute(TypeId id) { Dialect *IrContext::GetOrRegisterDialect( const std::string &dialect_name, std::function constructor) { - VLOG(4) << "Try to get or register a Dialect of: [name=" << dialect_name - << "]."; + VLOG(10) << "Try to get or register a Dialect of: [name=" << dialect_name + << "]."; if (!impl().IsDialectRegistered(dialect_name)) { - VLOG(4) << "Create and register a new Dialect of: [name=" << dialect_name - << "]."; + VLOG(10) << "Create and register a new Dialect of: [name=" << dialect_name + << "]."; impl().RegisterDialect(dialect_name, constructor()); } return impl().GetDialect(dialect_name); diff --git a/paddle/pir/core/op_info_impl.cc b/paddle/pir/core/op_info_impl.cc index 0ef97b521bee1..692860ae9b39c 100644 --- a/paddle/pir/core/op_info_impl.cc +++ b/paddle/pir/core/op_info_impl.cc @@ -28,8 +28,8 @@ void OpInfoImpl::AttachInterface(InterfaceValue &&interface_value) { IR_ENFORCE(suceess, "Interface: id[%u] is already registered. inset failed", interface_value.type_id()); - VLOG(6) << "Attach a interface: id[" << interface_value.type_id() << "]. to " - << op_name_; + VLOG(10) << "Attach a interface: id[" << interface_value.type_id() << "]. to " + << op_name_; } OpInfoImpl::OpInfoImpl(std::set &&interface_set, @@ -62,13 +62,13 @@ OpInfo OpInfoImpl::Create(Dialect *dialect, VerifyPtr verify_region) { // (1) Malloc memory for traits, opinfo_impl. size_t traits_num = trait_set.size(); - VLOG(6) << "Create OpInfoImpl with: " << interface_set.size() - << " interfaces, " << traits_num << " traits, " << attributes_num - << " attributes."; + VLOG(10) << "Create OpInfoImpl with: " << interface_set.size() + << " interfaces, " << traits_num << " traits, " << attributes_num + << " attributes."; size_t base_size = sizeof(TypeId) * traits_num + sizeof(OpInfoImpl); char *base_ptr = static_cast(::operator new(base_size)); - VLOG(6) << "Malloc " << base_size << " Bytes at " - << static_cast(base_ptr); + VLOG(10) << "Malloc " << base_size << " Bytes at " + << static_cast(base_ptr); if (traits_num > 0) { auto p_first_trait = reinterpret_cast(base_ptr); memcpy(base_ptr, trait_set.data(), sizeof(TypeId) * traits_num); @@ -76,8 +76,8 @@ OpInfo OpInfoImpl::Create(Dialect *dialect, base_ptr += traits_num * sizeof(TypeId); } // Construct OpInfoImpl. - VLOG(6) << "Construct OpInfoImpl at " << reinterpret_cast(base_ptr) - << " ......"; + VLOG(10) << "Construct OpInfoImpl at " << reinterpret_cast(base_ptr) + << " ......"; OpInfo op_info = OpInfo(new (base_ptr) OpInfoImpl(std::move(interface_set), dialect, op_id, diff --git a/paddle/pir/core/op_trait.cc b/paddle/pir/core/op_trait.cc index 506af3177e671..0a092714f49bb 100644 --- a/paddle/pir/core/op_trait.cc +++ b/paddle/pir/core/op_trait.cc @@ -19,7 +19,7 @@ namespace { void VerifySameOperandsShapeTrait(pir::Operation *op) { - VLOG(4) << "Verify SameOperandsShapeTrait for : " << op->name(); + VLOG(10) << "Verify SameOperandsShapeTrait for : " << op->name(); IR_ENFORCE(op->num_operands() > 0, "Op %s with SameOperandsShapeTrait requires at least 1 operands, " @@ -40,7 +40,7 @@ void VerifySameOperandsShapeTrait(pir::Operation *op) { } void VerifySameOperandsAndResultShapeTrait(pir::Operation *op) { - VLOG(4) << "Verify SameOperandsAndResultShapeTrait for : " << op->name(); + VLOG(10) << "Verify SameOperandsAndResultShapeTrait for : " << op->name(); IR_ENFORCE(op->num_operands() > 0, "Op %s with SameOperandsAndResultShapeTrait requires at least 1 " @@ -74,7 +74,7 @@ void VerifySameOperandsAndResultShapeTrait(pir::Operation *op) { } void VerifySameOperandsElementTypeTrait(pir::Operation *op) { - VLOG(4) << "Verify SameOperandsElementTypeTrait for : " << op->name(); + VLOG(10) << "Verify SameOperandsElementTypeTrait for : " << op->name(); IR_ENFORCE(op->num_operands() > 0, "Op %s with SameOperandsElementTypeTrait requires at least 1 " @@ -92,8 +92,8 @@ void VerifySameOperandsElementTypeTrait(pir::Operation *op) { } void VerifySameOperandsAndResultElementTypeTrait(pir::Operation *op) { - VLOG(4) << "Verify SameOperandsAndResultElementTypeTrait for : " - << op->name(); + VLOG(10) << "Verify SameOperandsAndResultElementTypeTrait for : " + << op->name(); IR_ENFORCE(op->num_operands() > 0, "Op %s with SameOperandsAndResultElementTypeTrait requires at " @@ -127,7 +127,7 @@ void VerifySameOperandsAndResultElementTypeTrait(pir::Operation *op) { } void VerifySameOperandsAndResultTypeTrait(pir::Operation *op) { - VLOG(4) << "Verify SameOperandsAndResultTypeTrait for : " << op->name(); + VLOG(10) << "Verify SameOperandsAndResultTypeTrait for : " << op->name(); IR_ENFORCE(op->num_operands() > 0, "Op %s with SameOperandsAndResultTypeTrait requires at least 1 " @@ -170,7 +170,7 @@ void VerifySameOperandsAndResultTypeTrait(pir::Operation *op) { } void VerifySameTypeOperandsTrait(pir::Operation *op) { - VLOG(4) << "Verify SameTypeOperandsTrait for : " << op->name(); + VLOG(10) << "Verify SameTypeOperandsTrait for : " << op->name(); // For zero or only one operand. unsigned operand_nums = op->num_operands(); diff --git a/paddle/pir/core/operation.cc b/paddle/pir/core/operation.cc index 00db9ce80fc14..c3839eb12996d 100644 --- a/paddle/pir/core/operation.cc +++ b/paddle/pir/core/operation.cc @@ -78,9 +78,9 @@ Operation *Operation::Create(const std::vector &inputs, char *base_ptr = reinterpret_cast(aligned_malloc(base_size, 8)); auto name = op_info ? op_info.name() : ""; - VLOG(6) << "Create Operation [" << name - << "]: {ptr = " << static_cast(base_ptr) - << ", size = " << base_size << "} done."; + VLOG(10) << "Create Operation [" << name + << "]: {ptr = " << static_cast(base_ptr) + << ", size = " << base_size << "} done."; // 3.1. Construct OpResults. for (size_t idx = num_results; idx > 0; idx--) { if (idx > max_inline_result_num) { @@ -221,8 +221,8 @@ void Operation::Destroy() { : sizeof(detail::OpInlineResultImpl) * num_results_; void *aligned_ptr = reinterpret_cast(this) - result_mem_size; - VLOG(6) << "Destroy Operation [" << name() << "]: {ptr = " << aligned_ptr - << ", size = " << result_mem_size << "} done."; + VLOG(10) << "Destroy Operation [" << name() << "]: {ptr = " << aligned_ptr + << ", size = " << result_mem_size << "} done."; aligned_free(aligned_ptr); } diff --git a/paddle/pir/core/storage_manager.cc b/paddle/pir/core/storage_manager.cc index bcfdf34a231e8..9244cf7d533c0 100644 --- a/paddle/pir/core/storage_manager.cc +++ b/paddle/pir/core/storage_manager.cc @@ -44,8 +44,9 @@ struct ParametricStorageManager { auto pr = parametric_instances_.equal_range(hash_value); while (pr.first != pr.second) { if (equal_func(pr.first->second)) { - VLOG(6) << "Found a cached parametric storage of: [param_hash=" - << hash_value << ", storage_ptr=" << pr.first->second << "]."; + VLOG(10) << "Found a cached parametric storage of: [param_hash=" + << hash_value << ", storage_ptr=" << pr.first->second + << "]."; return pr.first->second; } ++pr.first; @@ -53,9 +54,9 @@ struct ParametricStorageManager { } StorageBase *storage = constructor(); parametric_instances_.emplace(hash_value, storage); - VLOG(6) << "No cache found, construct and cache a new parametric storage " - "of: [param_hash=" - << hash_value << ", storage_ptr=" << storage << "]."; + VLOG(10) << "No cache found, construct and cache a new parametric storage " + "of: [param_hash=" + << hash_value << ", storage_ptr=" << storage << "]."; return storage; } @@ -76,9 +77,9 @@ StorageManager::StorageBase *StorageManager::GetParametricStorageImpl( std::function equal_func, std::function constructor) { std::lock_guard guard(parametric_instance_lock_); - VLOG(6) << "Try to get a parametric storage of: [TypeId_hash=" - << std::hash()(type_id) << ", param_hash=" << hash_value - << "]."; + VLOG(10) << "Try to get a parametric storage of: [TypeId_hash=" + << std::hash()(type_id) << ", param_hash=" << hash_value + << "]."; if (parametric_instance_.find(type_id) == parametric_instance_.end()) { IR_THROW("The input data pointer is null."); } @@ -89,8 +90,8 @@ StorageManager::StorageBase *StorageManager::GetParametricStorageImpl( StorageManager::StorageBase *StorageManager::GetParameterlessStorageImpl( TypeId type_id) { std::lock_guard guard(parameterless_instance_lock_); - VLOG(6) << "Try to get a parameterless storage of: [TypeId_hash=" - << std::hash()(type_id) << "]."; + VLOG(10) << "Try to get a parameterless storage of: [TypeId_hash=" + << std::hash()(type_id) << "]."; if (parameterless_instance_.find(type_id) == parameterless_instance_.end()) IR_THROW("TypeId not found in IrContext."); StorageBase *parameterless_instance = parameterless_instance_[type_id]; @@ -100,8 +101,8 @@ StorageManager::StorageBase *StorageManager::GetParameterlessStorageImpl( void StorageManager::RegisterParametricStorageImpl( TypeId type_id, std::function destroy) { std::lock_guard guard(parametric_instance_lock_); - VLOG(6) << "Register a parametric storage of: [TypeId_hash=" - << std::hash()(type_id) << "]."; + VLOG(10) << "Register a parametric storage of: [TypeId_hash=" + << std::hash()(type_id) << "]."; parametric_instance_.emplace( type_id, std::make_unique(destroy)); } @@ -109,8 +110,8 @@ void StorageManager::RegisterParametricStorageImpl( void StorageManager::RegisterParameterlessStorageImpl( TypeId type_id, std::function constructor) { std::lock_guard guard(parameterless_instance_lock_); - VLOG(6) << "Register a parameterless storage of: [TypeId_hash=" - << std::hash()(type_id) << "]."; + VLOG(10) << "Register a parameterless storage of: [TypeId_hash=" + << std::hash()(type_id) << "]."; if (parameterless_instance_.find(type_id) != parameterless_instance_.end()) IR_THROW("storage class already registered"); parameterless_instance_.emplace(type_id, constructor()); diff --git a/paddle/pir/core/value_impl.cc b/paddle/pir/core/value_impl.cc index 999a78e24063d..db659262c497e 100644 --- a/paddle/pir/core/value_impl.cc +++ b/paddle/pir/core/value_impl.cc @@ -20,9 +20,9 @@ void ValueImpl::set_first_use(OpOperandImpl *first_use) { uint32_t offset = kind(); first_use_offseted_by_kind_ = reinterpret_cast( reinterpret_cast(first_use) + offset); - VLOG(4) << "The index of this value is " << offset - << ". Offset and set first use: " << first_use << " -> " - << first_use_offseted_by_kind_ << "."; + VLOG(10) << "The index of this value is " << offset + << ". Offset and set first use: " << first_use << " -> " + << first_use_offseted_by_kind_ << "."; } std::string ValueImpl::PrintUdChain() { @@ -48,9 +48,9 @@ ValueImpl::ValueImpl(Type type, uint32_t kind) { type_ = type; first_use_offseted_by_kind_ = reinterpret_cast( reinterpret_cast(nullptr) + kind); - VLOG(4) << "Construct a ValueImpl whose's kind is " << kind - << ". The offset first_use address is: " - << first_use_offseted_by_kind_; + VLOG(10) << "Construct a ValueImpl whose's kind is " << kind + << ". The offset first_use address is: " + << first_use_offseted_by_kind_; } } // namespace detail