From efd5dbaa220fa57fb3ab5918f18d069ce1f1aaf8 Mon Sep 17 00:00:00 2001 From: phlrain Date: Wed, 7 Jun 2023 10:28:28 +0000 Subject: [PATCH 01/11] add kernel dialect --- paddle/fluid/ir/dialect/pd_kernel_dialect.cc | 62 +++++++++++++ paddle/fluid/ir/dialect/pd_kernel_dialect.h | 37 ++++++++ paddle/fluid/ir/dialect/pd_kernel_op.cc | 35 +++++++ paddle/fluid/ir/dialect/pd_kernel_op.h | 35 +++++++ paddle/fluid/ir/dialect/pd_kernel_type.cc | 45 +++++++++ paddle/fluid/ir/dialect/pd_kernel_type.h | 46 +++++++++ .../fluid/ir/dialect/pd_kernel_type_storage.h | 93 +++++++++++++++++++ paddle/fluid/ir/dialect/pd_type_storage.h | 9 ++ test/cpp/ir/core/CMakeLists.txt | 10 ++ test/cpp/ir/core/ir_phi_kernel_op_test.cc | 67 +++++++++++++ 10 files changed, 439 insertions(+) create mode 100644 paddle/fluid/ir/dialect/pd_kernel_dialect.cc create mode 100644 paddle/fluid/ir/dialect/pd_kernel_dialect.h create mode 100644 paddle/fluid/ir/dialect/pd_kernel_op.cc create mode 100644 paddle/fluid/ir/dialect/pd_kernel_op.h create mode 100644 paddle/fluid/ir/dialect/pd_kernel_type.cc create mode 100644 paddle/fluid/ir/dialect/pd_kernel_type.h create mode 100644 paddle/fluid/ir/dialect/pd_kernel_type_storage.h create mode 100644 test/cpp/ir/core/ir_phi_kernel_op_test.cc diff --git a/paddle/fluid/ir/dialect/pd_kernel_dialect.cc b/paddle/fluid/ir/dialect/pd_kernel_dialect.cc new file mode 100644 index 0000000000000..a18ba986d57ec --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_dialect.cc @@ -0,0 +1,62 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h" +#include "paddle/fluid/ir/dialect/pd_attribute.h" +#include "paddle/fluid/ir/dialect/pd_kernel_op.h" +// NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in +// paddle/fluid/ir/dialect/CMakeLists.txt. +#include "paddle/fluid/framework/convert_utils.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/ir/dialect/pd_kernel_type.h" +#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h" +#include "paddle/fluid/ir/dialect/pd_op.h" +#include "paddle/fluid/ir/dialect/utils.h" +#include "paddle/ir/core/dialect_interface.h" +#include "paddle/phi/core/dense_tensor.h" + +namespace paddle { +namespace dialect { + +PaddleKernelDialect::PaddleKernelDialect(ir::IrContext *context) + : ir::Dialect(name(), context, ir::TypeId::get()) { + initialize(); +} + +void PaddleKernelDialect::initialize() { + RegisterTypes(); + RegisterOps(); + + // RegisterAttributes(); +} + +void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) { + AllocatedDenseTensorType tensor_type = + type.dyn_cast(); + + os << phi::AllocationTypeStr(tensor_type.place().GetType()) << "_"; + os << "tensor<"; + for (auto d : phi::vectorize(tensor_type.dims())) { + os << d; + os << "x"; + } + tensor_type.dtype().Print(os); + os << ">"; +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_dialect.h b/paddle/fluid/ir/dialect/pd_kernel_dialect.h new file mode 100644 index 0000000000000..e3e4e329be89a --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_dialect.h @@ -0,0 +1,37 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/variable.h" +#include "paddle/ir/core/dialect.h" +#include "paddle/ir/core/parameter.h" + +namespace paddle { +namespace dialect { + +class PaddleKernelDialect : public ir::Dialect { + public: + explicit PaddleKernelDialect(ir::IrContext* context); + + static const char* name() { return "pd_kernel"; } + + void PrintType(ir::Type type, std::ostream& os); + + private: + void initialize(); +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_op.cc b/paddle/fluid/ir/dialect/pd_kernel_op.cc new file mode 100644 index 0000000000000..0a04284642f43 --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_op.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/ir/dialect/pd_kernel_op.h" + +namespace paddle { +namespace dialect { + +const char *PhiKernelOp::attributes_name[attributes_num] = { + "base_op", "infermeta_fn", "kernel_fn"}; + +void PhiKernelOp::Verify(const std::vector &inputs, + const std::vector &outputs, + const ir::AttributeMap &attributes) { + VLOG(4) << "Verifying inputs, outputs and attributes for: SetParameterOp."; + // Verify inputs type: + + // Verify if attributes contain attribute name in attributes_name: + // if (!attributes.at("parameter_name").isa()) { + // throw("Type of attribute: parameter_name is not right."); +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_op.h b/paddle/fluid/ir/dialect/pd_kernel_op.h new file mode 100644 index 0000000000000..4fc297e8d656e --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_op.h @@ -0,0 +1,35 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/ir/core/builder.h" +#include "paddle/ir/core/op_base.h" + +namespace paddle { +namespace dialect { + +class PhiKernelOp : public ir::Op { + public: + using Op::Op; + static const char *name() { return "phi.kernel"; } + static constexpr uint32_t attributes_num = 3; + static const char *attributes_name[attributes_num]; + static void Verify(const std::vector &inputs, + const std::vector &outputs, + const ir::AttributeMap &attributes); +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_type.cc b/paddle/fluid/ir/dialect/pd_kernel_type.cc new file mode 100644 index 0000000000000..38a2406e5e16c --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_type.cc @@ -0,0 +1,45 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/ir/dialect/pd_kernel_type.h" + +namespace paddle { +namespace dialect { + +const phi::Place& AllocatedDenseTensorType::place() const { + return storage()->place_; +} + +const ir::Type& AllocatedDenseTensorType::dtype() const { + return storage()->dense_tensor_storage_.dtype_; +} + +const phi::DDim& AllocatedDenseTensorType::dims() const { + return storage()->dense_tensor_storage_.dims_; +} + +const phi::DataLayout& AllocatedDenseTensorType::data_layout() const { + return storage()->dense_tensor_storage_.layout_; +} + +const phi::LoD& AllocatedDenseTensorType::lod() const { + return storage()->dense_tensor_storage_.lod_; +} + +const size_t& AllocatedDenseTensorType::offset() const { + return storage()->dense_tensor_storage_.offset_; +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_type.h b/paddle/fluid/ir/dialect/pd_kernel_type.h new file mode 100644 index 0000000000000..8e98479a02ab3 --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_type.h @@ -0,0 +1,46 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h" +#include "paddle/ir/core/type.h" + +namespace paddle { +namespace dialect { +/// +/// \brief Define built-in parametric types. +/// +class AllocatedDenseTensorType : public ir::Type { + public: + using Type::Type; + + DECLARE_TYPE_UTILITY_FUNCTOR(AllocatedDenseTensorType, + AllocatedDenseTensorTypeStorage); + + const phi::Place &place() const; + + const ir::Type &dtype() const; + + const phi::DDim &dims() const; + + const phi::DataLayout &data_layout() const; + + const phi::LoD &lod() const; + + const size_t &offset() const; +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_type_storage.h b/paddle/fluid/ir/dialect/pd_kernel_type_storage.h new file mode 100644 index 0000000000000..e3b488a73d01c --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_type_storage.h @@ -0,0 +1,93 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "paddle/fluid/ir/dialect/pd_type_storage.h" +#include "paddle/ir/core/type.h" +#include "paddle/ir/core/utils.h" +#include "paddle/phi/core/tensor_meta.h" + +namespace paddle { +namespace dialect { +/// +/// \brief Define Parametric TypeStorage for AllocatedDenseTensorType. +/// +/// NOTE(zhangbo9674): The derived TypeStorage class needs to implement the +/// following methods: (1)declare ParamKey, (2)define Construction method, +/// (3)define HashValue method, (4)overload operator==. +/// +struct AllocatedDenseTensorTypeStorage : public ir::TypeStorage { + using Place = phi::Place; + /// + /// \brief Declare ParamKey according to parameter type. + /// + using ParamKey = std::tuple; + + AllocatedDenseTensorTypeStorage(phi::Place place, + dialect::DenseTensorTypeStorage storage) + : place_(place), dense_tensor_storage_(storage) {} + + /// + /// \brief Each derived TypeStorage must define a Construct method, which + /// StorageManager uses to construct a derived TypeStorage. + /// + static AllocatedDenseTensorTypeStorage *Construct(ParamKey key) { + return new AllocatedDenseTensorTypeStorage(std::get<0>(key), + std::get<1>(key)); + } + + /// + /// \brief Each derived TypeStorage must provide a HashValue method. + /// + static std::size_t HashValue(const ParamKey &key) { + std::size_t hash_value = 0; + // hash place + hash_value = ir::hash_combine(hash_value, std::get<0>(key).HashValue()); + + // hash dtype + auto type_storage = std::get<1>(key); + hash_value = ir::hash_combine( + hash_value, + dialect::DenseTensorTypeStorage::HashValue( + dialect::DenseTensorTypeStorage::ParamKey(type_storage.dtype_, + type_storage.dims_, + type_storage.layout_, + type_storage.lod_, + type_storage.offset_))); + + return hash_value; + } + + /// + /// \brief Each derived TypeStorage needs to overload operator==. + /// + bool operator==(const ParamKey &key) const { + return ParamKey(place_, dense_tensor_storage_) == key; + } + + ParamKey GetAsKey() const { return ParamKey(place_, dense_tensor_storage_); } + + /// + /// \brief AllocatedDenseTensorTypeStorage include five parameters: place, + /// DenseTensorTypeStorage + /// + phi::Place place_; + dialect::DenseTensorTypeStorage dense_tensor_storage_; +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_type_storage.h b/paddle/fluid/ir/dialect/pd_type_storage.h index dbdb3b374e4d2..c2de288f2a592 100644 --- a/paddle/fluid/ir/dialect/pd_type_storage.h +++ b/paddle/fluid/ir/dialect/pd_type_storage.h @@ -112,6 +112,15 @@ struct DenseTensorTypeStorage : public ir::TypeStorage { return ParamKey(dtype_, dims_, layout_, lod_, offset_) == key; } + bool operator==(const DenseTensorTypeStorage &storage) const { + return ParamKey(dtype_, dims_, layout_, lod_, offset_) == + ParamKey(storage.dtype_, + storage.dims_, + storage.layout_, + storage.lod_, + storage.offset_); + } + ParamKey GetAsKey() const { return ParamKey(dtype_, dims_, layout_, lod_, offset_); } diff --git a/test/cpp/ir/core/CMakeLists.txt b/test/cpp/ir/core/CMakeLists.txt index 111da5c3e29ef..f2d119742e168 100644 --- a/test/cpp/ir/core/CMakeLists.txt +++ b/test/cpp/ir/core/CMakeLists.txt @@ -12,6 +12,16 @@ cc_test_old( phi gtest) +cc_test_old( + ir_phi_kernel_op_test + SRCS + ir_phi_kernel_op_test.cc + DEPS + new_ir + pd_dialect + phi + gtest) + cc_test_old( ir_infershape_test SRCS diff --git a/test/cpp/ir/core/ir_phi_kernel_op_test.cc b/test/cpp/ir/core/ir_phi_kernel_op_test.cc new file mode 100644 index 0000000000000..613cc0fe553db --- /dev/null +++ b/test/cpp/ir/core/ir_phi_kernel_op_test.cc @@ -0,0 +1,67 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h" +#include "paddle/fluid/ir/dialect/pd_kernel_op.h" +#include "paddle/fluid/ir/dialect/pd_kernel_type.h" +#include "paddle/fluid/ir/dialect/utils.h" +#include "paddle/fluid/ir/interface/op_yaml_info.h" +#include "paddle/ir/core/block.h" +#include "paddle/ir/core/builtin_attribute.h" +#include "paddle/ir/core/builtin_dialect.h" +#include "paddle/ir/core/builtin_op.h" +#include "paddle/ir/core/ir_context.h" +#include "paddle/ir/core/program.h" +#include "paddle/ir/core/utils.h" +#include "paddle/phi/core/meta_tensor.h" +#include "paddle/phi/infermeta/binary.h" +#include "paddle/phi/kernels/elementwise_add_kernel.h" + +TEST(program_test, program) { + // (1) Init environment. + ir::IrContext *ctx = ir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + + // (2) Create an empty program object + ir::Program program(ctx); + + // (3) Create a float32 DenseTensor Parameter and save into Program + phi::Place place(phi::AllocationType::CPU); + ir::Type fp32_dtype = ir::Float32Type::get(ctx); + phi::DDim dims = {2, 2}; + phi::DataLayout data_layout = phi::DataLayout::NCHW; + phi::LoD lod = {{0, 1, 2}}; + size_t offset = 0; + + std::string op1_name = paddle::dialect::PhiKernelOp::name(); + + ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); + + std::unordered_map op1_attribute{ + {"parameter_name", ir::StrAttribute::get(ctx, "a")}}; + + ir::Type allocated_dense_tensor_dtype = + paddle::dialect::AllocatedDenseTensorType::get( + ctx, + place, + paddle::dialect::DenseTensorTypeStorage( + fp32_dtype, dims, data_layout, lod, offset)); + + ir::Operation *op1 = ir::Operation::Create( + {}, op1_attribute, {allocated_dense_tensor_dtype}, op1_info); + + ASSERT_EQ(op1 != nullptr, true); +} From cdb0f51bc29d45f5fe50e644d5df4bdbb405dee5 Mon Sep 17 00:00:00 2001 From: phlrain Date: Wed, 7 Jun 2023 14:58:18 +0000 Subject: [PATCH 02/11] change DenseTensorTypeStorage to DenseTensorType --- paddle/fluid/ir/dialect/pd_kernel_type.cc | 10 +++--- paddle/fluid/ir/dialect/pd_kernel_type.h | 22 ++++++++++++ .../fluid/ir/dialect/pd_kernel_type_storage.h | 35 +++++++++---------- paddle/ir/core/storage_manager.cc | 4 ++- test/cpp/ir/core/ir_phi_kernel_op_test.cc | 7 ++-- 5 files changed, 50 insertions(+), 28 deletions(-) diff --git a/paddle/fluid/ir/dialect/pd_kernel_type.cc b/paddle/fluid/ir/dialect/pd_kernel_type.cc index 38a2406e5e16c..48fcca97d01c7 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_type.cc +++ b/paddle/fluid/ir/dialect/pd_kernel_type.cc @@ -22,23 +22,23 @@ const phi::Place& AllocatedDenseTensorType::place() const { } const ir::Type& AllocatedDenseTensorType::dtype() const { - return storage()->dense_tensor_storage_.dtype_; + return storage()->dense_tensor_type_.dtype(); } const phi::DDim& AllocatedDenseTensorType::dims() const { - return storage()->dense_tensor_storage_.dims_; + return storage()->dense_tensor_type_.dims(); } const phi::DataLayout& AllocatedDenseTensorType::data_layout() const { - return storage()->dense_tensor_storage_.layout_; + return storage()->dense_tensor_type_.data_layout(); } const phi::LoD& AllocatedDenseTensorType::lod() const { - return storage()->dense_tensor_storage_.lod_; + return storage()->dense_tensor_type_.lod(); } const size_t& AllocatedDenseTensorType::offset() const { - return storage()->dense_tensor_storage_.offset_; + return storage()->dense_tensor_type_.offset(); } } // namespace dialect diff --git a/paddle/fluid/ir/dialect/pd_kernel_type.h b/paddle/fluid/ir/dialect/pd_kernel_type.h index 8e98479a02ab3..f0e80648fcb9c 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_type.h +++ b/paddle/fluid/ir/dialect/pd_kernel_type.h @@ -15,6 +15,7 @@ #pragma once #include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h" +#include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/ir/core/type.h" namespace paddle { @@ -29,6 +30,27 @@ class AllocatedDenseTensorType : public ir::Type { DECLARE_TYPE_UTILITY_FUNCTOR(AllocatedDenseTensorType, AllocatedDenseTensorTypeStorage); + static AllocatedDenseTensorType get(ir::IrContext *ctx, + phi::Place place, + dialect::DenseTensorType type) { + return ir::TypeManager::template get( + ctx, place, type); + } + + static AllocatedDenseTensorType get(ir::IrContext *ctx, + phi::Place place, + ir::Type dtype, + phi::DDim dims, + phi::DataLayout layout, + phi::LoD lod, + size_t offset) { + dialect::DenseTensorType dense_tensor_type = + dialect::DenseTensorType::get(ctx, dtype, dims, layout, lod, offset); + + return ir::TypeManager::template get( + ctx, place, dense_tensor_type); + } + const phi::Place &place() const; const ir::Type &dtype() const; diff --git a/paddle/fluid/ir/dialect/pd_kernel_type_storage.h b/paddle/fluid/ir/dialect/pd_kernel_type_storage.h index e3b488a73d01c..dbee926754506 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_type_storage.h +++ b/paddle/fluid/ir/dialect/pd_kernel_type_storage.h @@ -16,7 +16,7 @@ #include -#include "paddle/fluid/ir/dialect/pd_type_storage.h" +#include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/ir/core/type.h" #include "paddle/ir/core/utils.h" #include "paddle/phi/core/tensor_meta.h" @@ -35,11 +35,11 @@ struct AllocatedDenseTensorTypeStorage : public ir::TypeStorage { /// /// \brief Declare ParamKey according to parameter type. /// - using ParamKey = std::tuple; + using ParamKey = std::tuple; AllocatedDenseTensorTypeStorage(phi::Place place, - dialect::DenseTensorTypeStorage storage) - : place_(place), dense_tensor_storage_(storage) {} + dialect::DenseTensorType type) + : place_(place), dense_tensor_type_(type) {} /// /// \brief Each derived TypeStorage must define a Construct method, which @@ -59,16 +59,15 @@ struct AllocatedDenseTensorTypeStorage : public ir::TypeStorage { hash_value = ir::hash_combine(hash_value, std::get<0>(key).HashValue()); // hash dtype - auto type_storage = std::get<1>(key); - hash_value = ir::hash_combine( - hash_value, - dialect::DenseTensorTypeStorage::HashValue( - dialect::DenseTensorTypeStorage::ParamKey(type_storage.dtype_, - type_storage.dims_, - type_storage.layout_, - type_storage.lod_, - type_storage.offset_))); - + auto dense_tensor_type = std::get<1>(key); + hash_value = ir::hash_combine(hash_value, + dialect::DenseTensorTypeStorage::HashValue( + dialect::DenseTensorTypeStorage::ParamKey( + dense_tensor_type.dtype(), + dense_tensor_type.dims(), + dense_tensor_type.data_layout(), + dense_tensor_type.lod(), + dense_tensor_type.offset()))); return hash_value; } @@ -76,17 +75,17 @@ struct AllocatedDenseTensorTypeStorage : public ir::TypeStorage { /// \brief Each derived TypeStorage needs to overload operator==. /// bool operator==(const ParamKey &key) const { - return ParamKey(place_, dense_tensor_storage_) == key; + return ParamKey(place_, dense_tensor_type_) == key; } - ParamKey GetAsKey() const { return ParamKey(place_, dense_tensor_storage_); } + ParamKey GetAsKey() const { return ParamKey(place_, dense_tensor_type_); } /// /// \brief AllocatedDenseTensorTypeStorage include five parameters: place, - /// DenseTensorTypeStorage + /// DenseTensorType /// phi::Place place_; - dialect::DenseTensorTypeStorage dense_tensor_storage_; + dialect::DenseTensorType dense_tensor_type_; }; } // namespace dialect diff --git a/paddle/ir/core/storage_manager.cc b/paddle/ir/core/storage_manager.cc index ff985f8e537d1..e7f89dc1d4052 100644 --- a/paddle/ir/core/storage_manager.cc +++ b/paddle/ir/core/storage_manager.cc @@ -75,8 +75,10 @@ StorageManager::StorageBase *StorageManager::GetParametricStorageImpl( VLOG(4) << "Try to get a parametric storage of: [TypeId_hash=" << std::hash()(type_id) << ", param_hash=" << hash_value << "]."; - if (parametric_instance_.find(type_id) == parametric_instance_.end()) + if (parametric_instance_.find(type_id) == parametric_instance_.end()) { + VLOG(4) << "throw here"; throw("The input data pointer is null."); + } ParametricStorageManager ¶metric_storage = *parametric_instance_[type_id]; return parametric_storage.GetOrCreate(hash_value, equal_func, constructor); } diff --git a/test/cpp/ir/core/ir_phi_kernel_op_test.cc b/test/cpp/ir/core/ir_phi_kernel_op_test.cc index 613cc0fe553db..85539d2331397 100644 --- a/test/cpp/ir/core/ir_phi_kernel_op_test.cc +++ b/test/cpp/ir/core/ir_phi_kernel_op_test.cc @@ -14,6 +14,7 @@ #include +#include "paddle/fluid/ir/dialect/pd_dialect.h" #include "paddle/fluid/ir/dialect/pd_kernel_dialect.h" #include "paddle/fluid/ir/dialect/pd_kernel_op.h" #include "paddle/fluid/ir/dialect/pd_kernel_type.h" @@ -34,6 +35,7 @@ TEST(program_test, program) { // (1) Init environment. ir::IrContext *ctx = ir::IrContext::Instance(); ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); // (2) Create an empty program object ir::Program program(ctx); @@ -55,10 +57,7 @@ TEST(program_test, program) { ir::Type allocated_dense_tensor_dtype = paddle::dialect::AllocatedDenseTensorType::get( - ctx, - place, - paddle::dialect::DenseTensorTypeStorage( - fp32_dtype, dims, data_layout, lod, offset)); + ctx, place, fp32_dtype, dims, data_layout, lod, offset); ir::Operation *op1 = ir::Operation::Create( {}, op1_attribute, {allocated_dense_tensor_dtype}, op1_info); From 406bc1cfbc59293b1be2dd1e36367ea0dfa9dcfe Mon Sep 17 00:00:00 2001 From: phlrain Date: Thu, 8 Jun 2023 01:21:29 +0000 Subject: [PATCH 03/11] add test case` --- paddle/ir/core/storage_manager.cc | 1 - test/cpp/ir/core/ir_phi_kernel_op_test.cc | 14 ++++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/paddle/ir/core/storage_manager.cc b/paddle/ir/core/storage_manager.cc index e7f89dc1d4052..41a52e85b3048 100644 --- a/paddle/ir/core/storage_manager.cc +++ b/paddle/ir/core/storage_manager.cc @@ -76,7 +76,6 @@ StorageManager::StorageBase *StorageManager::GetParametricStorageImpl( << std::hash()(type_id) << ", param_hash=" << hash_value << "]."; if (parametric_instance_.find(type_id) == parametric_instance_.end()) { - VLOG(4) << "throw here"; throw("The input data pointer is null."); } ParametricStorageManager ¶metric_storage = *parametric_instance_[type_id]; diff --git a/test/cpp/ir/core/ir_phi_kernel_op_test.cc b/test/cpp/ir/core/ir_phi_kernel_op_test.cc index 85539d2331397..8bb37b8e922be 100644 --- a/test/cpp/ir/core/ir_phi_kernel_op_test.cc +++ b/test/cpp/ir/core/ir_phi_kernel_op_test.cc @@ -13,6 +13,7 @@ // limitations under the License. #include +#include #include "paddle/fluid/ir/dialect/pd_dialect.h" #include "paddle/fluid/ir/dialect/pd_kernel_dialect.h" @@ -34,7 +35,8 @@ TEST(program_test, program) { // (1) Init environment. ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + auto kernel_dialect = + ctx->GetOrRegisterDialect(); ctx->GetOrRegisterDialect(); // (2) Create an empty program object @@ -55,9 +57,17 @@ TEST(program_test, program) { std::unordered_map op1_attribute{ {"parameter_name", ir::StrAttribute::get(ctx, "a")}}; - ir::Type allocated_dense_tensor_dtype = + auto allocated_dense_tensor_dtype = paddle::dialect::AllocatedDenseTensorType::get( ctx, place, fp32_dtype, dims, data_layout, lod, offset); + std::stringstream ss; + kernel_dialect->PrintType(allocated_dense_tensor_dtype, ss); + ASSERT_EQ(ss.str() == "cpu_tensor<2x2xf32>", true); + ASSERT_EQ(allocated_dense_tensor_dtype.place() == place, true); + ASSERT_EQ(allocated_dense_tensor_dtype.dims() == dims, true); + ASSERT_EQ(allocated_dense_tensor_dtype.data_layout() == data_layout, true); + ASSERT_EQ(allocated_dense_tensor_dtype.lod() == lod, true); + ASSERT_EQ(allocated_dense_tensor_dtype.offset() == 0, true); ir::Operation *op1 = ir::Operation::Create( {}, op1_attribute, {allocated_dense_tensor_dtype}, op1_info); From bfc101e04564de7a54e458e5474c0318396d3d8f Mon Sep 17 00:00:00 2001 From: phlrain Date: Thu, 8 Jun 2023 05:51:04 +0000 Subject: [PATCH 04/11] add first pd_op to kernel dialect --- paddle/fluid/ir/CMakeLists.txt | 1 + paddle/fluid/ir/dialect/pd_kernel_op.cc | 2 +- paddle/fluid/ir/pass/CMakeLists.txt | 7 ++ paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc | 27 +++++ paddle/fluid/ir/pass/pd_op_to_kernel_pass.h | 24 ++++ test/cpp/ir/CMakeLists.txt | 1 + test/cpp/ir/kernel_dialect/CMakeLists.txt | 10 ++ .../ir_kernel_dialect_pass_test.cc | 111 ++++++++++++++++++ 8 files changed, 182 insertions(+), 1 deletion(-) create mode 100644 paddle/fluid/ir/pass/CMakeLists.txt create mode 100644 paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc create mode 100644 paddle/fluid/ir/pass/pd_op_to_kernel_pass.h create mode 100644 test/cpp/ir/kernel_dialect/CMakeLists.txt create mode 100644 test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc diff --git a/paddle/fluid/ir/CMakeLists.txt b/paddle/fluid/ir/CMakeLists.txt index 919f78dc17ce5..19c5c5d7310be 100644 --- a/paddle/fluid/ir/CMakeLists.txt +++ b/paddle/fluid/ir/CMakeLists.txt @@ -1,2 +1,3 @@ add_subdirectory(interface) add_subdirectory(dialect) +add_subdirectory(pass) diff --git a/paddle/fluid/ir/dialect/pd_kernel_op.cc b/paddle/fluid/ir/dialect/pd_kernel_op.cc index 0a04284642f43..1220b08f9f449 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_op.cc +++ b/paddle/fluid/ir/dialect/pd_kernel_op.cc @@ -23,7 +23,7 @@ const char *PhiKernelOp::attributes_name[attributes_num] = { void PhiKernelOp::Verify(const std::vector &inputs, const std::vector &outputs, const ir::AttributeMap &attributes) { - VLOG(4) << "Verifying inputs, outputs and attributes for: SetParameterOp."; + VLOG(4) << "Verifying inputs, outputs and attributes for: PhiKernelOp."; // Verify inputs type: // Verify if attributes contain attribute name in attributes_name: diff --git a/paddle/fluid/ir/pass/CMakeLists.txt b/paddle/fluid/ir/pass/CMakeLists.txt new file mode 100644 index 0000000000000..0d1214afe10c0 --- /dev/null +++ b/paddle/fluid/ir/pass/CMakeLists.txt @@ -0,0 +1,7 @@ +# All source files of pd_dialect, except for the source file of op, which is generated in the compilation directory. +file(GLOB PD_PASS_SRCS "*.cc") + +cc_library( + pd_op_to_kernel_pass + SRCS ${PD_PASS_SRCS} + DEPS new_ir phi_utils) diff --git a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc new file mode 100644 index 0000000000000..8b86eed905cdd --- /dev/null +++ b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc @@ -0,0 +1,27 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h" + +namespace paddle { +namespace dialect { + +std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { + auto program = std::make_unique(ir::IrContext::Instance()); + + return program; +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.h b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.h new file mode 100644 index 0000000000000..415ce18bb0756 --- /dev/null +++ b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.h @@ -0,0 +1,24 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include "paddle/ir/core/program.h" + +namespace paddle { +namespace dialect { + +std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog); + +} // namespace dialect +} // namespace paddle diff --git a/test/cpp/ir/CMakeLists.txt b/test/cpp/ir/CMakeLists.txt index c5524ee38754b..a94503c0e5a1e 100644 --- a/test/cpp/ir/CMakeLists.txt +++ b/test/cpp/ir/CMakeLists.txt @@ -5,3 +5,4 @@ endif() add_subdirectory(core) add_subdirectory(pass) add_subdirectory(pattern_rewrite) +add_subdirectory(kernel_dialect) diff --git a/test/cpp/ir/kernel_dialect/CMakeLists.txt b/test/cpp/ir/kernel_dialect/CMakeLists.txt new file mode 100644 index 0000000000000..d15725f68c2f7 --- /dev/null +++ b/test/cpp/ir/kernel_dialect/CMakeLists.txt @@ -0,0 +1,10 @@ +cc_test_old( + ir_kernel_dialect_pass_test + SRCS + ir_kernel_dialect_pass_test.cc + DEPS + pd_op_to_kernel_pass + new_ir + pd_dialect + phi + gtest) diff --git a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc new file mode 100644 index 0000000000000..311ca93a97589 --- /dev/null +++ b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc @@ -0,0 +1,111 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "paddle/fluid/ir/dialect/pd_dialect.h" +#include "paddle/fluid/ir/dialect/pd_op.h" +#include "paddle/fluid/ir/dialect/pd_type.h" +#include "paddle/fluid/ir/dialect/utils.h" +#include "paddle/fluid/ir/interface/op_yaml_info.h" +#include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h" +#include "paddle/ir/core/builtin_attribute.h" +#include "paddle/ir/core/builtin_dialect.h" +#include "paddle/ir/core/builtin_op.h" +#include "paddle/ir/core/ir_context.h" +#include "paddle/ir/core/program.h" +#include "paddle/ir/core/utils.h" +#include "paddle/phi/core/meta_tensor.h" +#include "paddle/phi/infermeta/binary.h" +#include "paddle/phi/kernels/elementwise_add_kernel.h" + +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/framework/variable_helper.h" + +#include "paddle/phi/common/place.h" +#include "paddle/phi/core/kernel_context.h" +#include "paddle/phi/core/kernel_factory.h" + +#include "paddle/fluid/platform/init.h" + +#include "paddle/fluid/ir/dialect/pd_attribute.h" + +TEST(program_test, program) { + // (1) Init environment. + ir::IrContext* ctx = ir::IrContext::Instance(); + ir::Program program((ctx)); + + ctx->GetOrRegisterDialect(); + + ir::Block* block = program.block(); + ir::Type fp32_dtype = ir::Float32Type::get(ctx); + + paddle::dialect::DenseTensorTypeStorage::Dim dims = {2, 2}; + paddle::dialect::DenseTensorTypeStorage::DataLayout data_layout = + paddle::dialect::DenseTensorTypeStorage::DataLayout::NCHW; + paddle::dialect::DenseTensorTypeStorage::LoD lod = {}; + size_t offset = 0; + ir::Type dense_tensor_dtype = paddle::dialect::DenseTensorType::get( + ctx, fp32_dtype, dims, data_layout, lod, offset); + + // (1) Def a = GetParameterOp("a") + std::string op1_name = std::string(paddle::dialect::UniformOp::name()); + ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); + + // ir::Attribute shape_1 = ir::ArrayAttribute::get(ctx, {ten} ); + ir::Attribute shape_1 = paddle::dialect::IntArrayAttribute::get( + ctx, std::vector({2, 2})); + ir::Attribute data_type = + paddle::dialect::DataTypeAttribute::get(ctx, phi::DataType::FLOAT32); + ir::Attribute min = ir::FloatAttribute::get(ctx, 0.0); + ir::Attribute max = ir::FloatAttribute::get(ctx, 1.0); + ir::Attribute seed = ir::Int32_tAttribute::get(ctx, 2); + ir::Attribute uni_place = paddle::dialect::PlaceAttribute::get( + ctx, phi::Place(phi::AllocationType::CPU)); + std::unordered_map op1_attribute{ + {"shape", shape_1}, + {"dtype", data_type}, + {"min", min}, + {"max", max}, + {"seed", seed}, + {"place", uni_place}}; + ir::Operation* op1 = + ir::Operation::Create({}, op1_attribute, {dense_tensor_dtype}, op1_info); + + block->push_back(op1); + + // (2) Def b = GetParameterOp("b") + std::string op2_name = std::string(paddle::dialect::UniformOp::name()); + ir::OpInfo op2_info = ctx->GetRegisteredOpInfo(op2_name); + ir::Attribute ten2 = ir::Int32_tAttribute::get(ctx, 3); + std::unordered_map op2_attribute{{"shape", ten2}}; + ir::Operation* op2 = + ir::Operation::Create({}, op1_attribute, {dense_tensor_dtype}, op2_info); + block->push_back(op2); + + // (3) Def out = AddOp(a, b) + std::string add_op_name = std::string(paddle::dialect::AddOp::name()); + ir::OpInfo add_op_info = ctx->GetRegisteredOpInfo(add_op_name); + ir::Operation* add_op = ir::Operation::Create( + {op1->GetResultByIndex(0), op2->GetResultByIndex(0)}, + {}, + {dense_tensor_dtype}, + add_op_info); + block->push_back(add_op); + + paddle::dialect::PdOpLowerToKernelPass(&program); +} From 40a5366eb15d6ea4b5ec9de1eeba3576b080786c Mon Sep 17 00:00:00 2001 From: phlrain Date: Thu, 8 Jun 2023 08:43:42 +0000 Subject: [PATCH 05/11] lower pd op to kernel dialect --- paddle/fluid/ir/dialect/CMakeLists.txt | 32 +++++++++---------- ...pd_kernel_dialect.cc => kernel_dialect.cc} | 8 ++--- .../{pd_kernel_dialect.h => kernel_dialect.h} | 0 .../dialect/{pd_kernel_op.cc => kernel_op.cc} | 2 +- .../dialect/{pd_kernel_op.h => kernel_op.h} | 0 .../{pd_kernel_type.cc => kernel_type.cc} | 2 +- .../{pd_kernel_type.h => kernel_type.h} | 14 ++++---- ...l_type_storage.h => kernel_type_storage.h} | 0 paddle/fluid/ir/dialect/op_gen.py | 11 ++++++- paddle/fluid/ir/dialect/pd_type_storage.h | 9 ------ paddle/fluid/ir/dialect/utils.h | 7 ++-- paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc | 9 ++++++ test/cpp/ir/core/ir_phi_kernel_op_test.cc | 6 ++-- .../ir_kernel_dialect_pass_test.cc | 12 +++---- 14 files changed, 60 insertions(+), 52 deletions(-) rename paddle/fluid/ir/dialect/{pd_kernel_dialect.cc => kernel_dialect.cc} (90%) rename paddle/fluid/ir/dialect/{pd_kernel_dialect.h => kernel_dialect.h} (100%) rename paddle/fluid/ir/dialect/{pd_kernel_op.cc => kernel_op.cc} (96%) rename paddle/fluid/ir/dialect/{pd_kernel_op.h => kernel_op.h} (100%) rename paddle/fluid/ir/dialect/{pd_kernel_type.cc => kernel_type.cc} (96%) rename paddle/fluid/ir/dialect/{pd_kernel_type.h => kernel_type.h} (81%) rename paddle/fluid/ir/dialect/{pd_kernel_type_storage.h => kernel_type_storage.h} (100%) diff --git a/paddle/fluid/ir/dialect/CMakeLists.txt b/paddle/fluid/ir/dialect/CMakeLists.txt index 9d180a23bd286..3c5d2b4f301a8 100644 --- a/paddle/fluid/ir/dialect/CMakeLists.txt +++ b/paddle/fluid/ir/dialect/CMakeLists.txt @@ -27,22 +27,22 @@ set(op_source_file ${PD_DIALECT_BINARY_DIR}/pd_op.cc) set(op_header_file_tmp ${op_header_file}.tmp) set(op_source_file_tmp ${op_source_file}.tmp) -add_custom_command( - OUTPUT ${op_header_file} ${op_source_file} - COMMAND - ${PYTHON_EXECUTABLE} ${op_gen_file} --op_yaml_files ${op_yaml_files} - --op_compat_yaml_file ${op_compat_yaml_file} --namespaces ${op_namespace} - --dialect_name ${dialect_name} --op_def_h_file ${op_header_file_tmp} - --op_def_cc_file ${op_source_file_tmp} - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${op_header_file_tmp} - ${op_header_file} - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${op_source_file_tmp} - ${op_source_file} - COMMENT "copy_if_different ${op_header_file} ${op_source_file}" - DEPENDS ${op_gen_file} ${op_forward_yaml_file1} ${op_forward_yaml_file2} - ${op_backward_yaml_file1} ${op_backward_yaml_file2} - ${op_compat_yaml_file} - VERBATIM) +# add_custom_command( +# OUTPUT ${op_header_file} ${op_source_file} +# COMMAND +# ${PYTHON_EXECUTABLE} ${op_gen_file} --op_yaml_files ${op_yaml_files} +# --op_compat_yaml_file ${op_compat_yaml_file} --namespaces ${op_namespace} +# --dialect_name ${dialect_name} --op_def_h_file ${op_header_file_tmp} +# --op_def_cc_file ${op_source_file_tmp} +# COMMAND ${CMAKE_COMMAND} -E copy_if_different ${op_header_file_tmp} +# ${op_header_file} +# COMMAND ${CMAKE_COMMAND} -E copy_if_different ${op_source_file_tmp} +# ${op_source_file} +# COMMENT "copy_if_different ${op_header_file} ${op_source_file}" +# DEPENDS ${op_gen_file} ${op_forward_yaml_file1} ${op_forward_yaml_file2} +# ${op_backward_yaml_file1} ${op_backward_yaml_file2} +# ${op_compat_yaml_file} +# VERBATIM) # All source files of pd_dialect, except for the source file of op, which is generated in the compilation directory. file(GLOB PD_DIALECT_SRCS "*.cc") diff --git a/paddle/fluid/ir/dialect/pd_kernel_dialect.cc b/paddle/fluid/ir/dialect/kernel_dialect.cc similarity index 90% rename from paddle/fluid/ir/dialect/pd_kernel_dialect.cc rename to paddle/fluid/ir/dialect/kernel_dialect.cc index a18ba986d57ec..f1463861ce27c 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_dialect.cc +++ b/paddle/fluid/ir/dialect/kernel_dialect.cc @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h" +#include "paddle/fluid/ir/dialect/kernel_dialect.h" +#include "paddle/fluid/ir/dialect/kernel_op.h" #include "paddle/fluid/ir/dialect/pd_attribute.h" -#include "paddle/fluid/ir/dialect/pd_kernel_op.h" // NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in // paddle/fluid/ir/dialect/CMakeLists.txt. #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/ir/dialect/pd_kernel_type.h" -#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h" +#include "paddle/fluid/ir/dialect/kernel_type.h" +#include "paddle/fluid/ir/dialect/kernel_type_storage.h" #include "paddle/fluid/ir/dialect/pd_op.h" #include "paddle/fluid/ir/dialect/utils.h" #include "paddle/ir/core/dialect_interface.h" diff --git a/paddle/fluid/ir/dialect/pd_kernel_dialect.h b/paddle/fluid/ir/dialect/kernel_dialect.h similarity index 100% rename from paddle/fluid/ir/dialect/pd_kernel_dialect.h rename to paddle/fluid/ir/dialect/kernel_dialect.h diff --git a/paddle/fluid/ir/dialect/pd_kernel_op.cc b/paddle/fluid/ir/dialect/kernel_op.cc similarity index 96% rename from paddle/fluid/ir/dialect/pd_kernel_op.cc rename to paddle/fluid/ir/dialect/kernel_op.cc index f36c654020bee..eacf534708645 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_op.cc +++ b/paddle/fluid/ir/dialect/kernel_op.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/pd_kernel_op.h" +#include "paddle/fluid/ir/dialect/kernel_op.h" namespace paddle { namespace dialect { diff --git a/paddle/fluid/ir/dialect/pd_kernel_op.h b/paddle/fluid/ir/dialect/kernel_op.h similarity index 100% rename from paddle/fluid/ir/dialect/pd_kernel_op.h rename to paddle/fluid/ir/dialect/kernel_op.h diff --git a/paddle/fluid/ir/dialect/pd_kernel_type.cc b/paddle/fluid/ir/dialect/kernel_type.cc similarity index 96% rename from paddle/fluid/ir/dialect/pd_kernel_type.cc rename to paddle/fluid/ir/dialect/kernel_type.cc index 48fcca97d01c7..2aa4b32137dce 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_type.cc +++ b/paddle/fluid/ir/dialect/kernel_type.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/pd_kernel_type.h" +#include "paddle/fluid/ir/dialect/kernel_type.h" namespace paddle { namespace dialect { diff --git a/paddle/fluid/ir/dialect/pd_kernel_type.h b/paddle/fluid/ir/dialect/kernel_type.h similarity index 81% rename from paddle/fluid/ir/dialect/pd_kernel_type.h rename to paddle/fluid/ir/dialect/kernel_type.h index f0e80648fcb9c..a0a6be1960558 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_type.h +++ b/paddle/fluid/ir/dialect/kernel_type.h @@ -14,7 +14,7 @@ #pragma once -#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h" +#include "paddle/fluid/ir/dialect/kernel_type_storage.h" #include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/ir/core/type.h" @@ -31,18 +31,18 @@ class AllocatedDenseTensorType : public ir::Type { AllocatedDenseTensorTypeStorage); static AllocatedDenseTensorType get(ir::IrContext *ctx, - phi::Place place, + const phi::Place &place, dialect::DenseTensorType type) { return ir::TypeManager::template get( ctx, place, type); } static AllocatedDenseTensorType get(ir::IrContext *ctx, - phi::Place place, - ir::Type dtype, - phi::DDim dims, - phi::DataLayout layout, - phi::LoD lod, + const phi::Place &place, + const ir::Type &dtype, + const phi::DDim &dims, + const phi::DataLayout &layout, + const phi::LoD &lod, size_t offset) { dialect::DenseTensorType dense_tensor_type = dialect::DenseTensorType::get(ctx, dtype, dims, layout, lod, offset); diff --git a/paddle/fluid/ir/dialect/pd_kernel_type_storage.h b/paddle/fluid/ir/dialect/kernel_type_storage.h similarity index 100% rename from paddle/fluid/ir/dialect/pd_kernel_type_storage.h rename to paddle/fluid/ir/dialect/kernel_type_storage.h diff --git a/paddle/fluid/ir/dialect/op_gen.py b/paddle/fluid/ir/dialect/op_gen.py index 106229e141ecb..a433fa751b0b4 100644 --- a/paddle/fluid/ir/dialect/op_gen.py +++ b/paddle/fluid/ir/dialect/op_gen.py @@ -108,7 +108,7 @@ class {op_name} : public ir::Op<{op_name}{interfaces}{traits}> {{ std::vector inputs = {{ {inputs} }}; std::vector attributes = {{ {attributes} }}; std::vector outputs = {{ {outputs} }}; - paddle::dialect::OpRunTimeInfo run_time_info = OpRunTimeInfo("{infer_meta_func}", {{"{infer_meta_param}"}}, {{"{kernel_func}"}}, {{"{kernel_param}"}}); + paddle::dialect::OpRunTimeInfo run_time_info = OpRunTimeInfo("{infer_meta_func}", {{"{infer_meta_param}"}}, {{"{kernel_func}"}}, {{"{kernel_param}"}}, {{"{kernel_key_dtype}"}} ); return std::make_tuple(inputs, attributes, outputs, run_time_info); }} """ @@ -1006,6 +1006,7 @@ def OpGenerator( op_info_items.append( OpInfoParser(op, op_compat_parser.get_compat(op['name'])) ) + # break # (3) CodeGen: Traverse op_info_items and generate ops_name_list = [] # all op class name store in this list @@ -1259,9 +1260,14 @@ def OpGenerator( infer_meta_param_str = '", "'.join(op_infer_meta_map['param']) kernel_func_str = "" kernel_param_str = "" + kernel_key_dtype = "" if op_kernel_map is not None: kernel_func_str = '", "'.join(op_kernel_map['func']) kernel_param_str = '", "'.join(op_kernel_map['param']) + if 'data_type' in op_kernel_map and op_kernel_map['data_type']: + kernel_key_dtype = '", "'.join( + op_kernel_map['data_type']['candidates'] + ) op_info_func_str = OP_INFO_TEMPLATE.format( op_name=op_class_name, @@ -1272,6 +1278,7 @@ def OpGenerator( infer_meta_param=infer_meta_param_str, kernel_func=kernel_func_str, kernel_param=kernel_param_str, + kernel_key_dtype=kernel_key_dtype, ) # generate op verify function: inputs_type_check_str @@ -1419,6 +1426,8 @@ def OpGenerator( ops_defined_list.append(op_verify_str) ops_defined_list.append(op_infer_shape_str) + # break + # (4) Generate head file str op_namespaces_prev = "" for name in namespaces: diff --git a/paddle/fluid/ir/dialect/pd_type_storage.h b/paddle/fluid/ir/dialect/pd_type_storage.h index c2de288f2a592..dbdb3b374e4d2 100644 --- a/paddle/fluid/ir/dialect/pd_type_storage.h +++ b/paddle/fluid/ir/dialect/pd_type_storage.h @@ -112,15 +112,6 @@ struct DenseTensorTypeStorage : public ir::TypeStorage { return ParamKey(dtype_, dims_, layout_, lod_, offset_) == key; } - bool operator==(const DenseTensorTypeStorage &storage) const { - return ParamKey(dtype_, dims_, layout_, lod_, offset_) == - ParamKey(storage.dtype_, - storage.dims_, - storage.layout_, - storage.lod_, - storage.offset_); - } - ParamKey GetAsKey() const { return ParamKey(dtype_, dims_, layout_, lod_, offset_); } diff --git a/paddle/fluid/ir/dialect/utils.h b/paddle/fluid/ir/dialect/utils.h index 65f68db2c6a58..15c5cacee522c 100644 --- a/paddle/fluid/ir/dialect/utils.h +++ b/paddle/fluid/ir/dialect/utils.h @@ -141,14 +141,17 @@ struct OpRunTimeInfo { std::vector infer_meta_param; std::vector kernel_func; std::vector kernel_param; + std::vector kernel_key_dtype; OpRunTimeInfo(std::string infer_meta_func, std::vector infer_meta_param, std::vector kernel_func, - std::vector kernel_param) + std::vector kernel_param, + std::vector dtype) : infer_meta_func(infer_meta_func), infer_meta_param(infer_meta_param), kernel_func(kernel_func), - kernel_param(kernel_param) {} + kernel_param(kernel_param), + kernel_key_dtype(dtype) {} }; } // namespace dialect diff --git a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc index 8b86eed905cdd..2c58aafe41363 100644 --- a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc @@ -12,7 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include + #include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h" +#include "paddle/phi/common/place.h" namespace paddle { namespace dialect { @@ -20,6 +23,12 @@ namespace dialect { std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { auto program = std::make_unique(ir::IrContext::Instance()); + auto block = prog->block(); + phi::Place cpu_place(phi::AllocationType::CPU); + for (auto it = block->begin(); it != block->end(); ++it) { + std::cerr << (*it)->name() << std::endl; + } + return program; } diff --git a/test/cpp/ir/core/ir_phi_kernel_op_test.cc b/test/cpp/ir/core/ir_phi_kernel_op_test.cc index 8bb37b8e922be..2322ca08369b5 100644 --- a/test/cpp/ir/core/ir_phi_kernel_op_test.cc +++ b/test/cpp/ir/core/ir_phi_kernel_op_test.cc @@ -15,10 +15,10 @@ #include #include +#include "paddle/fluid/ir/dialect/kernel_dialect.h" +#include "paddle/fluid/ir/dialect/kernel_op.h" +#include "paddle/fluid/ir/dialect/kernel_type.h" #include "paddle/fluid/ir/dialect/pd_dialect.h" -#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h" -#include "paddle/fluid/ir/dialect/pd_kernel_op.h" -#include "paddle/fluid/ir/dialect/pd_kernel_type.h" #include "paddle/fluid/ir/dialect/utils.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/ir/core/block.h" diff --git a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc index 311ca93a97589..895eb879c4500 100644 --- a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc +++ b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc @@ -63,7 +63,7 @@ TEST(program_test, program) { ctx, fp32_dtype, dims, data_layout, lod, offset); // (1) Def a = GetParameterOp("a") - std::string op1_name = std::string(paddle::dialect::UniformOp::name()); + std::string op1_name = std::string(paddle::dialect::Full_Op::name()); ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); // ir::Attribute shape_1 = ir::ArrayAttribute::get(ctx, {ten} ); @@ -71,17 +71,13 @@ TEST(program_test, program) { ctx, std::vector({2, 2})); ir::Attribute data_type = paddle::dialect::DataTypeAttribute::get(ctx, phi::DataType::FLOAT32); - ir::Attribute min = ir::FloatAttribute::get(ctx, 0.0); - ir::Attribute max = ir::FloatAttribute::get(ctx, 1.0); - ir::Attribute seed = ir::Int32_tAttribute::get(ctx, 2); + ir::Attribute value = ir::FloatAttribute::get(ctx, 1.0); ir::Attribute uni_place = paddle::dialect::PlaceAttribute::get( ctx, phi::Place(phi::AllocationType::CPU)); std::unordered_map op1_attribute{ {"shape", shape_1}, + {"value", value}, {"dtype", data_type}, - {"min", min}, - {"max", max}, - {"seed", seed}, {"place", uni_place}}; ir::Operation* op1 = ir::Operation::Create({}, op1_attribute, {dense_tensor_dtype}, op1_info); @@ -89,7 +85,7 @@ TEST(program_test, program) { block->push_back(op1); // (2) Def b = GetParameterOp("b") - std::string op2_name = std::string(paddle::dialect::UniformOp::name()); + std::string op2_name = std::string(paddle::dialect::FullOp::name()); ir::OpInfo op2_info = ctx->GetRegisteredOpInfo(op2_name); ir::Attribute ten2 = ir::Int32_tAttribute::get(ctx, 3); std::unordered_map op2_attribute{{"shape", ten2}}; From d42faebff04c023c949b5e3dede959f57c201026 Mon Sep 17 00:00:00 2001 From: phlrain Date: Fri, 9 Jun 2023 06:19:24 +0000 Subject: [PATCH 06/11] update --- paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc | 193 +++++++++++++++++++ 1 file changed, 193 insertions(+) diff --git a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc index 2c58aafe41363..c2fa274f0806c 100644 --- a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc @@ -14,21 +14,214 @@ #include +#include "paddle/fluid/ir/dialect/kernel_dialect.h" +#include "paddle/fluid/ir/dialect/kernel_op.h" +#include "paddle/fluid/ir/dialect/kernel_type.h" +#include "paddle/fluid/ir/dialect/pd_attribute.h" +#include "paddle/fluid/ir/dialect/utils.h" +#include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h" +#include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/common/place.h" +#include "paddle/phi/core/compat/convert_utils.h" +#include "paddle/phi/core/kernel_factory.h" namespace paddle { namespace dialect { +phi::DataType convetIrType2DataType(ir::Type type) { + if (type.isa()) { + return phi::DataType::FLOAT32; + } else if (type.isa()) { + return phi::DataType::FLOAT16; + } else if (type.isa()) { + return phi::DataType::FLOAT64; + } else if (type.isa()) { + return phi::DataType::BFLOAT16; + } else if (type.isa()) { + return phi::DataType::INT32; + } else { + PADDLE_THROW("not shupport type for now", type); + } +} + +phi::KernelKey GetKernelKey( + ir::Operation* op, + const phi::Place& place, + const std::unordered_map& map_value_pair) { + paddle::dialect::OpYamlInfoInterface op_info_interface = + op->dyn_cast(); + auto op_info_res = op_info_interface.GetOpInfo(); + + auto input_info = std::get<0>(op_info_res); + + // only suppurt non vector input for now + std::map input_map; + int index = 0; + for (auto& t : input_info) { + // todo filter attribute tensor + input_map[t.name] = index++; + } + + std::cerr << "11" << std::endl; + std::map attr_type_map; + auto attr_info = std::get<1>(op_info_res); + for (auto& t : attr_info) { + VLOG(6) << t.name << "\t" << t.type_name; + attr_type_map[t.name] = t.type_name; + } + auto runtime_info = std::get<3>(op_info_res); + + std::cerr << "12" << std::endl; + // get dtype infomation + phi::Backend kernel_backend = phi::Backend::UNDEFINED; + phi::DataLayout kernel_layout = phi::DataLayout::UNDEFINED; + phi::DataType kernel_data_type = phi::DataType::UNDEFINED; + + auto attr_map = op->attributes(); + auto data_type_info = runtime_info.kernel_key_dtype; + if (data_type_info.size() > 0 && data_type_info[0] != "") { + // only support single input and attribute + auto slot_name = data_type_info[0]; + if (input_map.count(slot_name)) { + // parse from input + int in_index = input_map.at(slot_name); + + dialect::AllocatedDenseTensorType type = + op->GetOperandByIndex(in_index) + .source() + .type() + .dyn_cast(); + kernel_data_type = type.dyn_cast().data(); + } else { + PADDLE_ENFORCE_EQ(attr_type_map.count(slot_name), + true, + "[%s] MUST in attr map", + slot_name); + kernel_data_type = attr_map.at(slot_name) + .dyn_cast() + .data(); + } + } + + std::cerr << "13" << std::endl; + // parse all the input tensor + + std::cerr << "input size " << input_map.size() << std::endl; + if (input_map.size() == 0 || op->name() == "pd.full_") { + // all the information have to get from attribute and context + kernel_backend = paddle::experimental::ParseBackend(place); + + } else { + std::cerr << "1.1" << std::endl; + paddle::experimental::detail::KernelKeyParser kernel_key_parser; + + for (size_t i = 0; i < input_info.size(); ++i) { + // todo filter attribute tensor + std::cerr << "1.1.0.1" << std::endl; + auto input_tmp = op->GetOperandByIndex(i).source(); + auto new_input_tmp = map_value_pair.at(input_tmp); + dialect::AllocatedDenseTensorType type = + new_input_tmp.type().dyn_cast(); + std::cerr << "1.1.0.2" << std::endl; + // fake tensor here + phi::Place cpu_place(phi::AllocationType::CPU); + auto ptr = new phi::Allocation(nullptr, 0, cpu_place); + std::cerr << "1.1.0.3" << std::endl; + std::shared_ptr holder(ptr); + std::cerr << "1.1.0" << std::endl; + + auto dtype = convetIrType2DataType(type.dtype()); + std::cerr << "dtype " << dtype << std::endl; + phi::DenseTensorMeta meta( + dtype, type.dims(), type.data_layout(), type.lod(), type.offset()); + std::cerr << "1.1.2" << std::endl; + phi::DenseTensor fake_tensor(holder, meta); + + std::cerr << "1.1.1" << std::endl; + kernel_key_parser.AssignKernelKeySet(fake_tensor); + } + + std::cerr << "1.2" << std::endl; + auto kernel_key_set = kernel_key_parser.key_set; + + auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); + + if (kernel_backend == phi::Backend::UNDEFINED) { + kernel_backend = kernel_key.backend(); + } + if (kernel_layout == phi::DataLayout::UNDEFINED) { + kernel_layout = kernel_key.layout(); + } + if (kernel_data_type == phi::DataType::UNDEFINED) { + kernel_data_type = kernel_key.dtype(); + } + } + + std::cerr << "find res " << kernel_backend << "\t" << kernel_layout << "\t" + << kernel_data_type << std::endl; + phi::KernelKey res(kernel_backend, kernel_layout, kernel_data_type); + return res; +} + std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { auto program = std::make_unique(ir::IrContext::Instance()); + prog->Print(std::cout); auto block = prog->block(); phi::Place cpu_place(phi::AllocationType::CPU); + + ir::IrContext* ctx = ir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + + std::unordered_map map_op_pair; + std::unordered_map map_value_pair; + + std::string op1_name = paddle::dialect::PhiKernelOp::name(); + + ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); + + std::unordered_map op1_attribute{ + {"parameter_name", ir::StrAttribute::get(ctx, "a")}}; + for (auto it = block->begin(); it != block->end(); ++it) { std::cerr << (*it)->name() << std::endl; + + auto kernel_key = GetKernelKey(*it, cpu_place, map_value_pair); + + // create new Op + + // only for single output + auto allocated_dense_tensor_dtype = + paddle::dialect::AllocatedDenseTensorType::get( + ctx, + phi::TransToPhiPlace(kernel_key.backend()), + (*it) + ->GetResultByIndex(0) + .type() + .dyn_cast()); + + // constuct input + std::vector vec_inputs; + if ((*it)->name() != "pd.full_" && (*it)->num_operands() > 0) { + for (size_t i = 0; i < (*it)->num_operands(); ++i) { + auto cur_in = (*it)->GetOperandByIndex(i).source(); + auto new_in = map_value_pair.at(cur_in); + + vec_inputs.push_back(new_in); + } + } + + ir::Operation* op1 = ir::Operation::Create( + vec_inputs, op1_attribute, {allocated_dense_tensor_dtype}, op1_info); + + map_op_pair[*it] = op1; + map_value_pair[(*it)->GetResultByIndex(0)] = op1->GetResultByIndex(0); + + program->block()->push_back(op1); } + program->Print(std::cout); return program; } From b7899b77b41efa9716dd89177d9beb1dfd4f0b54 Mon Sep 17 00:00:00 2001 From: phlrain Date: Fri, 9 Jun 2023 14:13:08 +0000 Subject: [PATCH 07/11] update --- paddle/fluid/ir/dialect/CMakeLists.txt | 32 ++++---- paddle/fluid/ir/dialect/kernel_attribute.h | 38 +++++++++ .../ir/dialect/kernel_attribute_storage.h | 49 ++++++++++++ paddle/fluid/ir/dialect/kernel_dialect.cc | 16 ++-- paddle/fluid/ir/dialect/kernel_dialect.h | 4 +- paddle/fluid/ir/interface/infershape.h | 14 ++-- paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc | 42 +++++----- test/cpp/ir/core/phi_kernel_adaptor.h | 78 +++++++++++++++---- .../ir_kernel_dialect_pass_test.cc | 77 +++++++----------- 9 files changed, 239 insertions(+), 111 deletions(-) create mode 100644 paddle/fluid/ir/dialect/kernel_attribute.h create mode 100644 paddle/fluid/ir/dialect/kernel_attribute_storage.h diff --git a/paddle/fluid/ir/dialect/CMakeLists.txt b/paddle/fluid/ir/dialect/CMakeLists.txt index 3c5d2b4f301a8..9d180a23bd286 100644 --- a/paddle/fluid/ir/dialect/CMakeLists.txt +++ b/paddle/fluid/ir/dialect/CMakeLists.txt @@ -27,22 +27,22 @@ set(op_source_file ${PD_DIALECT_BINARY_DIR}/pd_op.cc) set(op_header_file_tmp ${op_header_file}.tmp) set(op_source_file_tmp ${op_source_file}.tmp) -# add_custom_command( -# OUTPUT ${op_header_file} ${op_source_file} -# COMMAND -# ${PYTHON_EXECUTABLE} ${op_gen_file} --op_yaml_files ${op_yaml_files} -# --op_compat_yaml_file ${op_compat_yaml_file} --namespaces ${op_namespace} -# --dialect_name ${dialect_name} --op_def_h_file ${op_header_file_tmp} -# --op_def_cc_file ${op_source_file_tmp} -# COMMAND ${CMAKE_COMMAND} -E copy_if_different ${op_header_file_tmp} -# ${op_header_file} -# COMMAND ${CMAKE_COMMAND} -E copy_if_different ${op_source_file_tmp} -# ${op_source_file} -# COMMENT "copy_if_different ${op_header_file} ${op_source_file}" -# DEPENDS ${op_gen_file} ${op_forward_yaml_file1} ${op_forward_yaml_file2} -# ${op_backward_yaml_file1} ${op_backward_yaml_file2} -# ${op_compat_yaml_file} -# VERBATIM) +add_custom_command( + OUTPUT ${op_header_file} ${op_source_file} + COMMAND + ${PYTHON_EXECUTABLE} ${op_gen_file} --op_yaml_files ${op_yaml_files} + --op_compat_yaml_file ${op_compat_yaml_file} --namespaces ${op_namespace} + --dialect_name ${dialect_name} --op_def_h_file ${op_header_file_tmp} + --op_def_cc_file ${op_source_file_tmp} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${op_header_file_tmp} + ${op_header_file} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${op_source_file_tmp} + ${op_source_file} + COMMENT "copy_if_different ${op_header_file} ${op_source_file}" + DEPENDS ${op_gen_file} ${op_forward_yaml_file1} ${op_forward_yaml_file2} + ${op_backward_yaml_file1} ${op_backward_yaml_file2} + ${op_compat_yaml_file} + VERBATIM) # All source files of pd_dialect, except for the source file of op, which is generated in the compilation directory. file(GLOB PD_DIALECT_SRCS "*.cc") diff --git a/paddle/fluid/ir/dialect/kernel_attribute.h b/paddle/fluid/ir/dialect/kernel_attribute.h new file mode 100644 index 0000000000000..d22bc9ff94927 --- /dev/null +++ b/paddle/fluid/ir/dialect/kernel_attribute.h @@ -0,0 +1,38 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/ir/dialect/kernel_attribute_storage.h" +#include "paddle/ir/core/attribute.h" +#include "paddle/phi/core/enforce.h" + +namespace paddle { +namespace dialect { + +class KernelAttribute : public ir::Attribute { + public: + using Attribute::Attribute; + + DECLARE_ATTRIBUTE_UTILITY_FUNCTOR(KernelAttribute, KernelAttributeStorage); + + bool operator<(const KernelAttribute &right) const { + return storage() < right.storage(); + } + + phi::KernelKey data() const { return storage()->GetAsKey(); } +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/kernel_attribute_storage.h b/paddle/fluid/ir/dialect/kernel_attribute_storage.h new file mode 100644 index 0000000000000..e1b17d6092fb2 --- /dev/null +++ b/paddle/fluid/ir/dialect/kernel_attribute_storage.h @@ -0,0 +1,49 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/ir/core/attribute.h" +#include "paddle/ir/core/utils.h" +#include "paddle/phi/common/data_type.h" +#include "paddle/phi/core/kernel_factory.h" + +namespace paddle { +namespace dialect { + +struct KernelAttributeStorage : public ir::AttributeStorage { + using ParamKey = phi::KernelKey; + + explicit KernelAttributeStorage(const ParamKey &key) { kernel_key_ = key; } + + static KernelAttributeStorage *Construct(ParamKey key) { + return new KernelAttributeStorage(key); + } + + static std::size_t HashValue(const ParamKey &key) { + auto t = phi::KernelKey::Hash()(key); + std::cerr << "hash value " << t << std::endl; + return t; + } + + bool operator==(const ParamKey &key) const { return kernel_key_ == key; } + + ParamKey GetAsKey() const { return kernel_key_; } + + private: + phi::KernelKey kernel_key_; +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/kernel_dialect.cc b/paddle/fluid/ir/dialect/kernel_dialect.cc index f1463861ce27c..ecf2e3996a0a1 100644 --- a/paddle/fluid/ir/dialect/kernel_dialect.cc +++ b/paddle/fluid/ir/dialect/kernel_dialect.cc @@ -19,6 +19,7 @@ // paddle/fluid/ir/dialect/CMakeLists.txt. #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/ir/dialect/kernel_attribute.h" #include "paddle/fluid/ir/dialect/kernel_type.h" #include "paddle/fluid/ir/dialect/kernel_type_storage.h" #include "paddle/fluid/ir/dialect/pd_op.h" @@ -38,13 +39,10 @@ void PaddleKernelDialect::initialize() { RegisterTypes(); RegisterOps(); - // RegisterAttributes(); + RegisterAttributes(); } -void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) { +void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) const { AllocatedDenseTensorType tensor_type = type.dyn_cast(); @@ -58,5 +56,13 @@ void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) { os << ">"; } +void PaddleKernelDialect::PrintAttribute(ir::Attribute attr, + std::ostream &os) const { + phi::KernelKey kernel = attr.dyn_cast().data(); + + os << ""; +} + } // namespace dialect } // namespace paddle diff --git a/paddle/fluid/ir/dialect/kernel_dialect.h b/paddle/fluid/ir/dialect/kernel_dialect.h index e3e4e329be89a..2cbbee316d75a 100644 --- a/paddle/fluid/ir/dialect/kernel_dialect.h +++ b/paddle/fluid/ir/dialect/kernel_dialect.h @@ -27,7 +27,9 @@ class PaddleKernelDialect : public ir::Dialect { static const char* name() { return "pd_kernel"; } - void PrintType(ir::Type type, std::ostream& os); + void PrintType(ir::Type type, std::ostream& os) const override; + + void PrintAttribute(ir::Attribute attr, std::ostream& os) const override; private: void initialize(); diff --git a/paddle/fluid/ir/interface/infershape.h b/paddle/fluid/ir/interface/infershape.h index 7a723ea03777e..4c5b72b6127ad 100644 --- a/paddle/fluid/ir/interface/infershape.h +++ b/paddle/fluid/ir/interface/infershape.h @@ -19,19 +19,15 @@ class InferShapeInterface : public ir::OpInterfaceBase { public: struct Concept { - explicit Concept(void (*infer_shape)(ir::Operation *, - phi::InferMetaContext *)) + explicit Concept(void (*infer_shape)(phi::InferMetaContext *)) : infer_shape_(infer_shape) {} - void (*infer_shape_)(ir::Operation *, phi::InferMetaContext *); + void (*infer_shape_)(phi::InferMetaContext *); }; template struct Model : public Concept { - static void InferShape(ir::Operation *op, - phi::InferMetaContext *infer_meta) { - ConcreteOp concret_op = op->dyn_cast(); - if (concret_op == nullptr) throw("concret_op is nullptr"); - concret_op.InferShape(infer_meta); + static void InferShape(phi::InferMetaContext *infer_meta) { + return ConcreteOp::InferShape(infer_meta); } Model() : Concept(InferShape) {} @@ -41,7 +37,7 @@ class InferShapeInterface : public ir::OpInterfaceBase { : ir::OpInterfaceBase(op), impl_(impl) {} void InferShape(phi::InferMetaContext *infer_meta) { - impl_->infer_shape_(operation(), infer_meta); + impl_->infer_shape_(infer_meta); } private: diff --git a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc index c2fa274f0806c..330e6e22daf8c 100644 --- a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc @@ -14,6 +14,7 @@ #include +#include "paddle/fluid/ir/dialect/kernel_attribute.h" #include "paddle/fluid/ir/dialect/kernel_dialect.h" #include "paddle/fluid/ir/dialect/kernel_op.h" #include "paddle/fluid/ir/dialect/kernel_type.h" @@ -25,7 +26,6 @@ #include "paddle/phi/common/place.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/kernel_factory.h" - namespace paddle { namespace dialect { @@ -63,7 +63,6 @@ phi::KernelKey GetKernelKey( input_map[t.name] = index++; } - std::cerr << "11" << std::endl; std::map attr_type_map; auto attr_info = std::get<1>(op_info_res); for (auto& t : attr_info) { @@ -72,7 +71,6 @@ phi::KernelKey GetKernelKey( } auto runtime_info = std::get<3>(op_info_res); - std::cerr << "12" << std::endl; // get dtype infomation phi::Backend kernel_backend = phi::Backend::UNDEFINED; phi::DataLayout kernel_layout = phi::DataLayout::UNDEFINED; @@ -104,45 +102,38 @@ phi::KernelKey GetKernelKey( } } - std::cerr << "13" << std::endl; // parse all the input tensor - std::cerr << "input size " << input_map.size() << std::endl; if (input_map.size() == 0 || op->name() == "pd.full_") { // all the information have to get from attribute and context kernel_backend = paddle::experimental::ParseBackend(place); } else { - std::cerr << "1.1" << std::endl; paddle::experimental::detail::KernelKeyParser kernel_key_parser; for (size_t i = 0; i < input_info.size(); ++i) { // todo filter attribute tensor - std::cerr << "1.1.0.1" << std::endl; auto input_tmp = op->GetOperandByIndex(i).source(); auto new_input_tmp = map_value_pair.at(input_tmp); dialect::AllocatedDenseTensorType type = new_input_tmp.type().dyn_cast(); - std::cerr << "1.1.0.2" << std::endl; + // fake tensor here phi::Place cpu_place(phi::AllocationType::CPU); auto ptr = new phi::Allocation(nullptr, 0, cpu_place); - std::cerr << "1.1.0.3" << std::endl; + std::shared_ptr holder(ptr); - std::cerr << "1.1.0" << std::endl; auto dtype = convetIrType2DataType(type.dtype()); - std::cerr << "dtype " << dtype << std::endl; + phi::DenseTensorMeta meta( dtype, type.dims(), type.data_layout(), type.lod(), type.offset()); - std::cerr << "1.1.2" << std::endl; + phi::DenseTensor fake_tensor(holder, meta); - std::cerr << "1.1.1" << std::endl; kernel_key_parser.AssignKernelKeySet(fake_tensor); } - std::cerr << "1.2" << std::endl; auto kernel_key_set = kernel_key_parser.key_set; auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); @@ -158,8 +149,6 @@ phi::KernelKey GetKernelKey( } } - std::cerr << "find res " << kernel_backend << "\t" << kernel_layout << "\t" - << kernel_data_type << std::endl; phi::KernelKey res(kernel_backend, kernel_layout, kernel_data_type); return res; } @@ -181,9 +170,6 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); - std::unordered_map op1_attribute{ - {"parameter_name", ir::StrAttribute::get(ctx, "a")}}; - for (auto it = block->begin(); it != block->end(); ++it) { std::cerr << (*it)->name() << std::endl; @@ -192,6 +178,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { // create new Op // only for single output + // need update new kernel key layout and data tyep auto allocated_dense_tensor_dtype = paddle::dialect::AllocatedDenseTensorType::get( ctx, @@ -212,6 +199,23 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { } } + paddle::dialect::OpYamlInfoInterface op_info_interface = + (*it)->dyn_cast(); + auto op_info_res = op_info_interface.GetOpInfo(); + auto runtime_info = std::get<3>(op_info_res); + + std::unordered_map op1_attribute{ + {"op_name", ir::StrAttribute::get(ctx, (*it)->name())}, + {"kernel_name", + ir::StrAttribute::get(ctx, runtime_info.kernel_func[0])}, + {"kernel_key", dialect::KernelAttribute::get(ctx, kernel_key)}}; + + auto op_attr_map = (*it)->attributes(); + + for (auto it1 = op_attr_map.begin(); it1 != op_attr_map.end(); ++it1) { + op1_attribute.emplace(it1->first, it1->second); + } + ir::Operation* op1 = ir::Operation::Create( vec_inputs, op1_attribute, {allocated_dense_tensor_dtype}, op1_info); diff --git a/test/cpp/ir/core/phi_kernel_adaptor.h b/test/cpp/ir/core/phi_kernel_adaptor.h index 2e44ff55848e3..1c45defd1f0f1 100644 --- a/test/cpp/ir/core/phi_kernel_adaptor.h +++ b/test/cpp/ir/core/phi_kernel_adaptor.h @@ -18,6 +18,7 @@ #include "paddle/fluid/ir/dialect/pd_op.h" #include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/fluid/ir/dialect/utils.h" +#include "paddle/fluid/ir/interface/infershape.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/builtin_dialect.h" @@ -40,6 +41,7 @@ #include "paddle/fluid/platform/init.h" +#include "paddle/fluid/ir/dialect/kernel_attribute.h" #include "paddle/fluid/ir/dialect/pd_attribute.h" #include "glog/logging.h" @@ -92,14 +94,11 @@ template void build_context(ir::Operation* op, const std::unordered_map& name_map, paddle::framework::Scope* scope, + const OpInfoTuple& op_yaml_info, T* ctx, bool is_infer_meta = true) { - paddle::dialect::OpYamlInfoInterface op_info_interface = - op->dyn_cast(); - auto op_info_res = op_info_interface.GetOpInfo(); - // inputs include input and mutable attributes - auto input_info = std::get<0>(op_info_res); + auto input_info = std::get<0>(op_yaml_info); std::map input_index_map; std::map mutable_attr_type_map; int input_index = 0; @@ -111,7 +110,7 @@ void build_context(ir::Operation* op, } } - auto attr_info = std::get<1>(op_info_res); + auto attr_info = std::get<1>(op_yaml_info); std::map attr_type_map; for (auto& t : attr_info) { VLOG(6) << t.name << "\t" << t.type_name; @@ -119,7 +118,7 @@ void build_context(ir::Operation* op, } auto attr_map = op->attributes(); - auto runtime_info = std::get<3>(op_info_res); + auto runtime_info = std::get<3>(op_yaml_info); // int input_index = 0; std::vector vec_param_list; @@ -203,17 +202,18 @@ class PhiKernelAdaptor { auto attr_map = (*it)->attributes(); + paddle::dialect::OpYamlInfoInterface op_info_interface = + (*it)->dyn_cast(); + auto op_info_res = op_info_interface.GetOpInfo(); + InferShapeInterface interface = (*it)->dyn_cast(); phi::InferMetaContext ctx; - build_context((*it), name_map, scope_, &ctx); + build_context( + (*it), name_map, scope_, op_info_res, &ctx); interface.InferShape(&ctx); - paddle::dialect::OpYamlInfoInterface op_info_interface = - (*it)->dyn_cast(); - auto op_info_res = op_info_interface.GetOpInfo(); - auto runtime_info = std::get<3>(op_info_res); auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap( @@ -237,7 +237,7 @@ class PhiKernelAdaptor { phi::KernelContext kernel_ctx(dev_ctx); build_context( - (*it), name_map, scope_, &kernel_ctx, false); + (*it), name_map, scope_, op_info_res, &kernel_ctx, false); found_it->second(&kernel_ctx); auto out_value = (*it)->GetResultByIndex(0); @@ -246,6 +246,58 @@ class PhiKernelAdaptor { } } + void run_kernel_prog(ir::Program* program) { + auto block = program->block(); + std::unordered_map name_map; + build_scope(block, scope_, &name_map); + ir::IrContext* ctx = ir::IrContext::Instance(); + + ctx->GetOrRegisterDialect(); + + auto* dev_ctx = phi::DeviceContextPool::Instance().Get(phi::CPUPlace()); + phi::Place cpu_place(phi::AllocationType::CPU); + for (auto it = block->begin(); it != block->end(); ++it) { + auto attr_map = (*it)->attributes(); + + auto op_name = attr_map.at("op_name").dyn_cast().data(); + + ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op_name); + + auto impl = + op1_info.GetInterfaceImpl(); + auto yaml_info = impl->get_op_info_(); + + auto attr_info = std::get<1>(yaml_info); + + auto infer_shape_impl = op1_info.GetInterfaceImpl(); + + phi::InferMetaContext ctx; + + build_context( + (*it), name_map, scope_, yaml_info, &ctx); + + infer_shape_impl->infer_shape_(&ctx); + + auto kernel_name = + attr_map.at("kernel_name").dyn_cast().data(); + auto kernel_key = attr_map.at("kernel_key") + .dyn_cast() + .data(); + + auto kernel_fn = + phi::KernelFactory::Instance().SelectKernel(kernel_name, kernel_key); + + phi::KernelContext kernel_ctx(dev_ctx); + + build_context( + (*it), name_map, scope_, yaml_info, &kernel_ctx, false); + kernel_fn(&kernel_ctx); + + auto out_value = (*it)->GetResultByIndex(0); + out_name = name_map[out_value]; + } + } + std::string out_name; private: diff --git a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc index 895eb879c4500..62fac2629d9bf 100644 --- a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc +++ b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc @@ -16,7 +16,6 @@ #include #include "paddle/fluid/ir/dialect/pd_dialect.h" -#include "paddle/fluid/ir/dialect/pd_op.h" #include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/fluid/ir/dialect/utils.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" @@ -43,6 +42,14 @@ #include "paddle/fluid/platform/init.h" #include "paddle/fluid/ir/dialect/pd_attribute.h" +#include "test/cpp/ir/core/phi_kernel_adaptor.h" + +#include "paddle/phi/core/kernel_registry.h" + +PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(full_int_array, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(uniform, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); TEST(program_test, program) { // (1) Init environment. @@ -52,56 +59,30 @@ TEST(program_test, program) { ctx->GetOrRegisterDialect(); ir::Block* block = program.block(); - ir::Type fp32_dtype = ir::Float32Type::get(ctx); - - paddle::dialect::DenseTensorTypeStorage::Dim dims = {2, 2}; - paddle::dialect::DenseTensorTypeStorage::DataLayout data_layout = - paddle::dialect::DenseTensorTypeStorage::DataLayout::NCHW; - paddle::dialect::DenseTensorTypeStorage::LoD lod = {}; - size_t offset = 0; - ir::Type dense_tensor_dtype = paddle::dialect::DenseTensorType::get( - ctx, fp32_dtype, dims, data_layout, lod, offset); - - // (1) Def a = GetParameterOp("a") - std::string op1_name = std::string(paddle::dialect::Full_Op::name()); - ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); - - // ir::Attribute shape_1 = ir::ArrayAttribute::get(ctx, {ten} ); - ir::Attribute shape_1 = paddle::dialect::IntArrayAttribute::get( - ctx, std::vector({2, 2})); - ir::Attribute data_type = - paddle::dialect::DataTypeAttribute::get(ctx, phi::DataType::FLOAT32); - ir::Attribute value = ir::FloatAttribute::get(ctx, 1.0); - ir::Attribute uni_place = paddle::dialect::PlaceAttribute::get( - ctx, phi::Place(phi::AllocationType::CPU)); - std::unordered_map op1_attribute{ - {"shape", shape_1}, - {"value", value}, - {"dtype", data_type}, - {"place", uni_place}}; - ir::Operation* op1 = - ir::Operation::Create({}, op1_attribute, {dense_tensor_dtype}, op1_info); + + ir::Builder builder = ir::Builder::AtBlockEnd(ctx, program.block()); + + paddle::dialect::FullOp op1 = builder.Build( + std::vector{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); block->push_back(op1); - // (2) Def b = GetParameterOp("b") - std::string op2_name = std::string(paddle::dialect::FullOp::name()); - ir::OpInfo op2_info = ctx->GetRegisteredOpInfo(op2_name); - ir::Attribute ten2 = ir::Int32_tAttribute::get(ctx, 3); - std::unordered_map op2_attribute{{"shape", ten2}}; - ir::Operation* op2 = - ir::Operation::Create({}, op1_attribute, {dense_tensor_dtype}, op2_info); + paddle::dialect::FullOp op2 = builder.Build( + std::vector{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); block->push_back(op2); - // (3) Def out = AddOp(a, b) - std::string add_op_name = std::string(paddle::dialect::AddOp::name()); - ir::OpInfo add_op_info = ctx->GetRegisteredOpInfo(add_op_name); - ir::Operation* add_op = ir::Operation::Create( - {op1->GetResultByIndex(0), op2->GetResultByIndex(0)}, - {}, - {dense_tensor_dtype}, - add_op_info); - block->push_back(add_op); - - paddle::dialect::PdOpLowerToKernelPass(&program); + paddle::dialect::AddOp add = builder.Build( + op1->GetResultByIndex(0), op2->GetResultByIndex(0)); + block->push_back(add); + + auto kernel_program = paddle::dialect::PdOpLowerToKernelPass(&program); + + paddle::framework::Scope scope; + PhiKernelAdaptor phi_kernel_adaptor(&scope); + phi_kernel_adaptor.run_kernel_prog(kernel_program.get()); + + auto out_tensor = + scope.Var(phi_kernel_adaptor.out_name)->Get(); + + std::cerr << "op result" << out_tensor << std::endl; } From 615644f69adb1d5be76e91e74715508ac9c4b635 Mon Sep 17 00:00:00 2001 From: phlrain Date: Fri, 9 Jun 2023 14:41:04 +0000 Subject: [PATCH 08/11] remove useless code --- .../ir/dialect/kernel_attribute_storage.h | 1 - paddle/fluid/ir/dialect/op_gen.py | 3 --- paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc | 4 ---- test/cpp/ir/core/ir_exe_test.cc | 5 +---- .../ir_kernel_dialect_pass_test.cc | 22 +++++++++++-------- 5 files changed, 14 insertions(+), 21 deletions(-) diff --git a/paddle/fluid/ir/dialect/kernel_attribute_storage.h b/paddle/fluid/ir/dialect/kernel_attribute_storage.h index e1b17d6092fb2..634e2f2dfff17 100644 --- a/paddle/fluid/ir/dialect/kernel_attribute_storage.h +++ b/paddle/fluid/ir/dialect/kernel_attribute_storage.h @@ -33,7 +33,6 @@ struct KernelAttributeStorage : public ir::AttributeStorage { static std::size_t HashValue(const ParamKey &key) { auto t = phi::KernelKey::Hash()(key); - std::cerr << "hash value " << t << std::endl; return t; } diff --git a/paddle/fluid/ir/dialect/op_gen.py b/paddle/fluid/ir/dialect/op_gen.py index b8a239ecebdf7..24feb3af80e7a 100644 --- a/paddle/fluid/ir/dialect/op_gen.py +++ b/paddle/fluid/ir/dialect/op_gen.py @@ -1211,7 +1211,6 @@ def OpGenerator( op_info_items.append( OpInfoParser(op, op_compat_parser.get_compat(op['name'])) ) - # break # (3) CodeGen: Traverse op_info_items and generate ops_name_list = [] # all op class name store in this list @@ -1643,8 +1642,6 @@ def OpGenerator( ops_defined_list.append(op_verify_str) ops_defined_list.append(op_infer_shape_str) - # break - # (4) Generate head file str op_namespaces_prev = "" for name in namespaces: diff --git a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc index 330e6e22daf8c..21dfe02338292 100644 --- a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc @@ -156,7 +156,6 @@ phi::KernelKey GetKernelKey( std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { auto program = std::make_unique(ir::IrContext::Instance()); - prog->Print(std::cout); auto block = prog->block(); phi::Place cpu_place(phi::AllocationType::CPU); @@ -171,8 +170,6 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); for (auto it = block->begin(); it != block->end(); ++it) { - std::cerr << (*it)->name() << std::endl; - auto kernel_key = GetKernelKey(*it, cpu_place, map_value_pair); // create new Op @@ -225,7 +222,6 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { program->block()->push_back(op1); } - program->Print(std::cout); return program; } diff --git a/test/cpp/ir/core/ir_exe_test.cc b/test/cpp/ir/core/ir_exe_test.cc index 7c7dc76a5f476..89735e0e714c4 100644 --- a/test/cpp/ir/core/ir_exe_test.cc +++ b/test/cpp/ir/core/ir_exe_test.cc @@ -108,10 +108,7 @@ TEST(program_test, program) { bool res1 = simple_cmp(out_tensor.data()[1], 1.70047); bool res2 = simple_cmp(out_tensor.data()[2], 1.56764); bool res3 = simple_cmp(out_tensor.data()[3], 1.85063); - std::cerr << out_tensor.data()[0] << "\t" - << out_tensor.data()[1] << "\t" - << out_tensor.data()[2] << "\t" - << out_tensor.data()[3] << std::endl; + EXPECT_EQ(res0, true); EXPECT_EQ(res1, true); EXPECT_EQ(res2, true); diff --git a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc index 62fac2629d9bf..2b748ff1f21b8 100644 --- a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc +++ b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc @@ -51,6 +51,8 @@ PD_DECLARE_KERNEL(full_int_array, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(uniform, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); +bool simple_cmp(float a, float b) { return std::abs((a - b) / a) < 1e-5; } + TEST(program_test, program) { // (1) Init environment. ir::IrContext* ctx = ir::IrContext::Instance(); @@ -58,22 +60,16 @@ TEST(program_test, program) { ctx->GetOrRegisterDialect(); - ir::Block* block = program.block(); - ir::Builder builder = ir::Builder::AtBlockEnd(ctx, program.block()); paddle::dialect::FullOp op1 = builder.Build( std::vector{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); - block->push_back(op1); - paddle::dialect::FullOp op2 = builder.Build( std::vector{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); - block->push_back(op2); - paddle::dialect::AddOp add = builder.Build( - op1->GetResultByIndex(0), op2->GetResultByIndex(0)); - block->push_back(add); + builder.Build(op1->GetResultByIndex(0), + op2->GetResultByIndex(0)); auto kernel_program = paddle::dialect::PdOpLowerToKernelPass(&program); @@ -84,5 +80,13 @@ TEST(program_test, program) { auto out_tensor = scope.Var(phi_kernel_adaptor.out_name)->Get(); - std::cerr << "op result" << out_tensor << std::endl; + bool res0 = simple_cmp(out_tensor.data()[0], 2.0); + bool res1 = simple_cmp(out_tensor.data()[1], 2.0); + bool res2 = simple_cmp(out_tensor.data()[2], 2.0); + bool res3 = simple_cmp(out_tensor.data()[3], 2.0); + + EXPECT_EQ(res0, true); + EXPECT_EQ(res1, true); + EXPECT_EQ(res2, true); + EXPECT_EQ(res3, true); } From 8bf22ec9b2297453d3dc3f97f95fb9779f64b940 Mon Sep 17 00:00:00 2001 From: phlrain Date: Sat, 10 Jun 2023 13:41:34 +0000 Subject: [PATCH 09/11] add attrite print test --- .../ir_kernel_dialect_pass_test.cc | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc index 2b748ff1f21b8..0a3434586902c 100644 --- a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc +++ b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc @@ -46,6 +46,8 @@ #include "paddle/phi/core/kernel_registry.h" +#include "paddle/fluid/ir/dialect/kernel_dialect.h" + PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full_int_array, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(uniform, CPU, ALL_LAYOUT); @@ -90,3 +92,23 @@ TEST(program_test, program) { EXPECT_EQ(res2, true); EXPECT_EQ(res3, true); } + +TEST(dialect_attr, attr) { + // (1) Init environment. + ir::IrContext* ctx = ir::IrContext::Instance(); + ir::Program program((ctx)); + + ctx->GetOrRegisterDialect(); + auto kernel_dialect = + ctx->GetOrRegisterDialect(); + + phi::KernelKey kernel_key( + phi::Backend::CPU, phi::DataLayout::ALL_LAYOUT, phi::DataType::FLOAT32); + auto attr = paddle::dialect::KernelAttribute::get(ctx, kernel_key); + + std::stringstream ss; + + kernel_dialect->PrintAttribute(attr, ss); + + std::cerr << ss.str() << std::endl; +} From 62553cd945d5daf73306040d3f4a130991d4ee61 Mon Sep 17 00:00:00 2001 From: phlrain Date: Sat, 10 Jun 2023 14:07:39 +0000 Subject: [PATCH 10/11] fix bug --- paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc | 3 +-- test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc | 4 +++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc index 21dfe02338292..b2ef1d6f7a0c3 100644 --- a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc @@ -119,8 +119,7 @@ phi::KernelKey GetKernelKey( new_input_tmp.type().dyn_cast(); // fake tensor here - phi::Place cpu_place(phi::AllocationType::CPU); - auto ptr = new phi::Allocation(nullptr, 0, cpu_place); + auto ptr = new phi::Allocation(nullptr, 0, type.place()); std::shared_ptr holder(ptr); diff --git a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc index 0a3434586902c..dba6c5a7686fc 100644 --- a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc +++ b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc @@ -110,5 +110,7 @@ TEST(dialect_attr, attr) { kernel_dialect->PrintAttribute(attr, ss); - std::cerr << ss.str() << std::endl; + EXPECT_EQ( + ss.str() == "", + true); } From 819a9818e32d5b90edcc70af9d554840be824e29 Mon Sep 17 00:00:00 2001 From: phlrain Date: Mon, 12 Jun 2023 08:21:29 +0000 Subject: [PATCH 11/11] polish code --- paddle/fluid/ir/dialect/utils.h | 7 +++-- paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc | 29 +++++--------------- 2 files changed, 11 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/ir/dialect/utils.h b/paddle/fluid/ir/dialect/utils.h index 936d902320bb0..e9c4817c3177f 100644 --- a/paddle/fluid/ir/dialect/utils.h +++ b/paddle/fluid/ir/dialect/utils.h @@ -151,15 +151,16 @@ struct OpRunTimeInfo { std::vector infer_meta_param, std::vector kernel_func, std::vector kernel_param, - std::vector dtype) + std::vector dtype, std::vector> inplace, std::vector> view) : infer_meta_func(infer_meta_func), infer_meta_param(infer_meta_param), kernel_func(kernel_func), kernel_param(kernel_param), - kernel_key_dtype(dtype) {} - inplace(inplace), view(view) {} + kernel_key_dtype(dtype), + inplace(inplace), + view(view) {} }; } // namespace dialect diff --git a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc index b2ef1d6f7a0c3..d3e8fbd526de5 100644 --- a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc @@ -14,6 +14,8 @@ #include +#include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h" + #include "paddle/fluid/ir/dialect/kernel_attribute.h" #include "paddle/fluid/ir/dialect/kernel_dialect.h" #include "paddle/fluid/ir/dialect/kernel_op.h" @@ -21,7 +23,6 @@ #include "paddle/fluid/ir/dialect/pd_attribute.h" #include "paddle/fluid/ir/dialect/utils.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" -#include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h" #include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/common/place.h" #include "paddle/phi/core/compat/convert_utils.h" @@ -29,22 +30,6 @@ namespace paddle { namespace dialect { -phi::DataType convetIrType2DataType(ir::Type type) { - if (type.isa()) { - return phi::DataType::FLOAT32; - } else if (type.isa()) { - return phi::DataType::FLOAT16; - } else if (type.isa()) { - return phi::DataType::FLOAT64; - } else if (type.isa()) { - return phi::DataType::BFLOAT16; - } else if (type.isa()) { - return phi::DataType::INT32; - } else { - PADDLE_THROW("not shupport type for now", type); - } -} - phi::KernelKey GetKernelKey( ir::Operation* op, const phi::Place& place, @@ -92,10 +77,10 @@ phi::KernelKey GetKernelKey( .dyn_cast(); kernel_data_type = type.dyn_cast().data(); } else { - PADDLE_ENFORCE_EQ(attr_type_map.count(slot_name), - true, - "[%s] MUST in attr map", - slot_name); + PADDLE_ENFORCE_EQ( + attr_type_map.count(slot_name), + true, + phi::errors::PreconditionNotMet("[%s] MUST in attr map", slot_name)); kernel_data_type = attr_map.at(slot_name) .dyn_cast() .data(); @@ -123,7 +108,7 @@ phi::KernelKey GetKernelKey( std::shared_ptr holder(ptr); - auto dtype = convetIrType2DataType(type.dtype()); + auto dtype = TransToPhiDataType(type.dtype()); phi::DenseTensorMeta meta( dtype, type.dims(), type.data_layout(), type.lod(), type.offset());