diff --git a/paddle/fluid/ir/CMakeLists.txt b/paddle/fluid/ir/CMakeLists.txt index 919f78dc17ce57ed032f0abce26dc75f7ca68190..19c5c5d7310bebd137d3cc5a5eef81e8555d95aa 100644 --- a/paddle/fluid/ir/CMakeLists.txt +++ b/paddle/fluid/ir/CMakeLists.txt @@ -1,2 +1,3 @@ add_subdirectory(interface) add_subdirectory(dialect) +add_subdirectory(pass) diff --git a/paddle/fluid/ir/dialect/kernel_attribute.h b/paddle/fluid/ir/dialect/kernel_attribute.h new file mode 100644 index 0000000000000000000000000000000000000000..d22bc9ff949270ee1f13716f09f2e81a42be0654 --- /dev/null +++ b/paddle/fluid/ir/dialect/kernel_attribute.h @@ -0,0 +1,38 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/ir/dialect/kernel_attribute_storage.h" +#include "paddle/ir/core/attribute.h" +#include "paddle/phi/core/enforce.h" + +namespace paddle { +namespace dialect { + +class KernelAttribute : public ir::Attribute { + public: + using Attribute::Attribute; + + DECLARE_ATTRIBUTE_UTILITY_FUNCTOR(KernelAttribute, KernelAttributeStorage); + + bool operator<(const KernelAttribute &right) const { + return storage() < right.storage(); + } + + phi::KernelKey data() const { return storage()->GetAsKey(); } +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/kernel_attribute_storage.h b/paddle/fluid/ir/dialect/kernel_attribute_storage.h new file mode 100644 index 0000000000000000000000000000000000000000..634e2f2dfff17767065979b506994e06b6e9f03c --- /dev/null +++ b/paddle/fluid/ir/dialect/kernel_attribute_storage.h @@ -0,0 +1,48 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/ir/core/attribute.h" +#include "paddle/ir/core/utils.h" +#include "paddle/phi/common/data_type.h" +#include "paddle/phi/core/kernel_factory.h" + +namespace paddle { +namespace dialect { + +struct KernelAttributeStorage : public ir::AttributeStorage { + using ParamKey = phi::KernelKey; + + explicit KernelAttributeStorage(const ParamKey &key) { kernel_key_ = key; } + + static KernelAttributeStorage *Construct(ParamKey key) { + return new KernelAttributeStorage(key); + } + + static std::size_t HashValue(const ParamKey &key) { + auto t = phi::KernelKey::Hash()(key); + return t; + } + + bool operator==(const ParamKey &key) const { return kernel_key_ == key; } + + ParamKey GetAsKey() const { return kernel_key_; } + + private: + phi::KernelKey kernel_key_; +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_dialect.cc b/paddle/fluid/ir/dialect/kernel_dialect.cc similarity index 74% rename from paddle/fluid/ir/dialect/pd_kernel_dialect.cc rename to paddle/fluid/ir/dialect/kernel_dialect.cc index a18ba986d57ecc248959f02e1d3ed014636d8fb7..ecf2e3996a0a101dcca567c6cdef9652fb01a0d4 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_dialect.cc +++ b/paddle/fluid/ir/dialect/kernel_dialect.cc @@ -12,15 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h" +#include "paddle/fluid/ir/dialect/kernel_dialect.h" +#include "paddle/fluid/ir/dialect/kernel_op.h" #include "paddle/fluid/ir/dialect/pd_attribute.h" -#include "paddle/fluid/ir/dialect/pd_kernel_op.h" // NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in // paddle/fluid/ir/dialect/CMakeLists.txt. #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/ir/dialect/pd_kernel_type.h" -#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h" +#include "paddle/fluid/ir/dialect/kernel_attribute.h" +#include "paddle/fluid/ir/dialect/kernel_type.h" +#include "paddle/fluid/ir/dialect/kernel_type_storage.h" #include "paddle/fluid/ir/dialect/pd_op.h" #include "paddle/fluid/ir/dialect/utils.h" #include "paddle/ir/core/dialect_interface.h" @@ -38,13 +39,10 @@ void PaddleKernelDialect::initialize() { RegisterTypes(); RegisterOps(); - // RegisterAttributes(); + RegisterAttributes(); } -void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) { +void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) const { AllocatedDenseTensorType tensor_type = type.dyn_cast(); @@ -58,5 +56,13 @@ void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) { os << ">"; } +void PaddleKernelDialect::PrintAttribute(ir::Attribute attr, + std::ostream &os) const { + phi::KernelKey kernel = attr.dyn_cast().data(); + + os << ""; +} + } // namespace dialect } // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_dialect.h b/paddle/fluid/ir/dialect/kernel_dialect.h similarity index 88% rename from paddle/fluid/ir/dialect/pd_kernel_dialect.h rename to paddle/fluid/ir/dialect/kernel_dialect.h index e3e4e329be89a4de4eb6cc326421d1e2b46b365f..2cbbee316d75aa54e09a1c1bbb26058a932a1c31 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_dialect.h +++ b/paddle/fluid/ir/dialect/kernel_dialect.h @@ -27,7 +27,9 @@ class PaddleKernelDialect : public ir::Dialect { static const char* name() { return "pd_kernel"; } - void PrintType(ir::Type type, std::ostream& os); + void PrintType(ir::Type type, std::ostream& os) const override; + + void PrintAttribute(ir::Attribute attr, std::ostream& os) const override; private: void initialize(); diff --git a/paddle/fluid/ir/dialect/pd_kernel_op.cc b/paddle/fluid/ir/dialect/kernel_op.cc similarity index 90% rename from paddle/fluid/ir/dialect/pd_kernel_op.cc rename to paddle/fluid/ir/dialect/kernel_op.cc index 0a04284642f4384d358c1a00d208b0fe0a25cba7..eacf53470864553d231ac738ede3932d81c47121 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_op.cc +++ b/paddle/fluid/ir/dialect/kernel_op.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/pd_kernel_op.h" +#include "paddle/fluid/ir/dialect/kernel_op.h" namespace paddle { namespace dialect { @@ -23,7 +23,8 @@ const char *PhiKernelOp::attributes_name[attributes_num] = { void PhiKernelOp::Verify(const std::vector &inputs, const std::vector &outputs, const ir::AttributeMap &attributes) { - VLOG(4) << "Verifying inputs, outputs and attributes for: SetParameterOp."; + VLOG(4) << "Verifying inputs, outputs and attributes for: PhiKernelOp."; + // Verify inputs type: // Verify if attributes contain attribute name in attributes_name: diff --git a/paddle/fluid/ir/dialect/pd_kernel_op.h b/paddle/fluid/ir/dialect/kernel_op.h similarity index 100% rename from paddle/fluid/ir/dialect/pd_kernel_op.h rename to paddle/fluid/ir/dialect/kernel_op.h diff --git a/paddle/fluid/ir/dialect/pd_kernel_type.cc b/paddle/fluid/ir/dialect/kernel_type.cc similarity index 96% rename from paddle/fluid/ir/dialect/pd_kernel_type.cc rename to paddle/fluid/ir/dialect/kernel_type.cc index 48fcca97d01c763f052d703f5e3f49b3ddf5d75a..2aa4b32137dcea3dbf5a60f150d2d7415856397e 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_type.cc +++ b/paddle/fluid/ir/dialect/kernel_type.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/pd_kernel_type.h" +#include "paddle/fluid/ir/dialect/kernel_type.h" namespace paddle { namespace dialect { diff --git a/paddle/fluid/ir/dialect/pd_kernel_type.h b/paddle/fluid/ir/dialect/kernel_type.h similarity index 81% rename from paddle/fluid/ir/dialect/pd_kernel_type.h rename to paddle/fluid/ir/dialect/kernel_type.h index f0e80648fcb9c8b65f47e82992b4628e52737fe1..a0a6be196055891035264d97293aea7ed485432c 100644 --- a/paddle/fluid/ir/dialect/pd_kernel_type.h +++ b/paddle/fluid/ir/dialect/kernel_type.h @@ -14,7 +14,7 @@ #pragma once -#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h" +#include "paddle/fluid/ir/dialect/kernel_type_storage.h" #include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/ir/core/type.h" @@ -31,18 +31,18 @@ class AllocatedDenseTensorType : public ir::Type { AllocatedDenseTensorTypeStorage); static AllocatedDenseTensorType get(ir::IrContext *ctx, - phi::Place place, + const phi::Place &place, dialect::DenseTensorType type) { return ir::TypeManager::template get( ctx, place, type); } static AllocatedDenseTensorType get(ir::IrContext *ctx, - phi::Place place, - ir::Type dtype, - phi::DDim dims, - phi::DataLayout layout, - phi::LoD lod, + const phi::Place &place, + const ir::Type &dtype, + const phi::DDim &dims, + const phi::DataLayout &layout, + const phi::LoD &lod, size_t offset) { dialect::DenseTensorType dense_tensor_type = dialect::DenseTensorType::get(ctx, dtype, dims, layout, lod, offset); diff --git a/paddle/fluid/ir/dialect/pd_kernel_type_storage.h b/paddle/fluid/ir/dialect/kernel_type_storage.h similarity index 100% rename from paddle/fluid/ir/dialect/pd_kernel_type_storage.h rename to paddle/fluid/ir/dialect/kernel_type_storage.h diff --git a/paddle/fluid/ir/dialect/op_gen.py b/paddle/fluid/ir/dialect/op_gen.py index 5a2bc6772aef251d2d82ecb0ad5870c1e23902ac..d5bdca85caf9f71e35d3aaf04b3ffecb3dfa5f6e 100644 --- a/paddle/fluid/ir/dialect/op_gen.py +++ b/paddle/fluid/ir/dialect/op_gen.py @@ -110,7 +110,8 @@ OpInfoTuple {op_name}::GetOpInfo() {{ std::vector inputs = {{ {inputs} }}; std::vector attributes = {{ {attributes} }}; std::vector outputs = {{ {outputs} }}; - paddle::dialect::OpRunTimeInfo run_time_info = OpRunTimeInfo("{infer_meta_func}", {{"{infer_meta_param}"}}, {{"{kernel_func}"}}, {{"{kernel_param}"}}, {{{inplace}}}, {{{view}}}); + paddle::dialect::OpRunTimeInfo run_time_info = OpRunTimeInfo("{infer_meta_func}", {{"{infer_meta_param}"}}, {{"{kernel_func}"}}, {{"{kernel_param}"}}, {{"{kernel_key_dtype}"}}, {{{inplace}}}, {{{view}}}); + return std::make_tuple(inputs, attributes, outputs, run_time_info); }} """ @@ -1488,9 +1489,14 @@ def OpGenerator( kernel_func_str = "" kernel_param_str = "" + kernel_key_dtype = "" if op_kernel_map is not None: kernel_func_str = '", "'.join(op_kernel_map['func']) kernel_param_str = '", "'.join(op_kernel_map['param']) + if 'data_type' in op_kernel_map and op_kernel_map['data_type']: + kernel_key_dtype = '", "'.join( + op_kernel_map['data_type']['candidates'] + ) inplace_str = "" view_str = "" @@ -1513,6 +1519,7 @@ def OpGenerator( infer_meta_param=infer_meta_param_str, kernel_func=kernel_func_str, kernel_param=kernel_param_str, + kernel_key_dtype=kernel_key_dtype, inplace=inplace_str, view=view_str, ) diff --git a/paddle/fluid/ir/dialect/pd_type_storage.h b/paddle/fluid/ir/dialect/pd_type_storage.h index c2de288f2a5922e665c7c8e93b205ce6e22489f7..dbdb3b374e4d223b89280e67bf27bf858bad2f81 100644 --- a/paddle/fluid/ir/dialect/pd_type_storage.h +++ b/paddle/fluid/ir/dialect/pd_type_storage.h @@ -112,15 +112,6 @@ struct DenseTensorTypeStorage : public ir::TypeStorage { return ParamKey(dtype_, dims_, layout_, lod_, offset_) == key; } - bool operator==(const DenseTensorTypeStorage &storage) const { - return ParamKey(dtype_, dims_, layout_, lod_, offset_) == - ParamKey(storage.dtype_, - storage.dims_, - storage.layout_, - storage.lod_, - storage.offset_); - } - ParamKey GetAsKey() const { return ParamKey(dtype_, dims_, layout_, lod_, offset_); } diff --git a/paddle/fluid/ir/dialect/utils.h b/paddle/fluid/ir/dialect/utils.h index 5f08ed28d2e8b2cd77373bfbf776d55ac756adf5..e9c4817c3177f9c2003b22b96e1e58ba9f6b0f54 100644 --- a/paddle/fluid/ir/dialect/utils.h +++ b/paddle/fluid/ir/dialect/utils.h @@ -144,18 +144,21 @@ struct OpRunTimeInfo { std::vector infer_meta_param; std::vector kernel_func; std::vector kernel_param; + std::vector kernel_key_dtype; std::vector> inplace; std::vector> view; OpRunTimeInfo(std::string infer_meta_func, std::vector infer_meta_param, std::vector kernel_func, std::vector kernel_param, + std::vector dtype, std::vector> inplace, std::vector> view) : infer_meta_func(infer_meta_func), infer_meta_param(infer_meta_param), kernel_func(kernel_func), kernel_param(kernel_param), + kernel_key_dtype(dtype), inplace(inplace), view(view) {} }; diff --git a/paddle/fluid/ir/interface/infershape.h b/paddle/fluid/ir/interface/infershape.h index 7a723ea03777e221bdf115a81cb749a7265f8e78..4c5b72b6127ad9fc65efa78f749907f7605bb57d 100644 --- a/paddle/fluid/ir/interface/infershape.h +++ b/paddle/fluid/ir/interface/infershape.h @@ -19,19 +19,15 @@ class InferShapeInterface : public ir::OpInterfaceBase { public: struct Concept { - explicit Concept(void (*infer_shape)(ir::Operation *, - phi::InferMetaContext *)) + explicit Concept(void (*infer_shape)(phi::InferMetaContext *)) : infer_shape_(infer_shape) {} - void (*infer_shape_)(ir::Operation *, phi::InferMetaContext *); + void (*infer_shape_)(phi::InferMetaContext *); }; template struct Model : public Concept { - static void InferShape(ir::Operation *op, - phi::InferMetaContext *infer_meta) { - ConcreteOp concret_op = op->dyn_cast(); - if (concret_op == nullptr) throw("concret_op is nullptr"); - concret_op.InferShape(infer_meta); + static void InferShape(phi::InferMetaContext *infer_meta) { + return ConcreteOp::InferShape(infer_meta); } Model() : Concept(InferShape) {} @@ -41,7 +37,7 @@ class InferShapeInterface : public ir::OpInterfaceBase { : ir::OpInterfaceBase(op), impl_(impl) {} void InferShape(phi::InferMetaContext *infer_meta) { - impl_->infer_shape_(operation(), infer_meta); + impl_->infer_shape_(infer_meta); } private: diff --git a/paddle/fluid/ir/pass/CMakeLists.txt b/paddle/fluid/ir/pass/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d1214afe10c00c18a88a9a406bd2f1427f9e228 --- /dev/null +++ b/paddle/fluid/ir/pass/CMakeLists.txt @@ -0,0 +1,7 @@ +# All source files of pd_dialect, except for the source file of op, which is generated in the compilation directory. +file(GLOB PD_PASS_SRCS "*.cc") + +cc_library( + pd_op_to_kernel_pass + SRCS ${PD_PASS_SRCS} + DEPS new_ir phi_utils) diff --git a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..d3e8fbd526de510d13234f53275c4fc789184272 --- /dev/null +++ b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.cc @@ -0,0 +1,213 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h" + +#include "paddle/fluid/ir/dialect/kernel_attribute.h" +#include "paddle/fluid/ir/dialect/kernel_dialect.h" +#include "paddle/fluid/ir/dialect/kernel_op.h" +#include "paddle/fluid/ir/dialect/kernel_type.h" +#include "paddle/fluid/ir/dialect/pd_attribute.h" +#include "paddle/fluid/ir/dialect/utils.h" +#include "paddle/fluid/ir/interface/op_yaml_info.h" +#include "paddle/phi/api/lib/kernel_dispatch.h" +#include "paddle/phi/common/place.h" +#include "paddle/phi/core/compat/convert_utils.h" +#include "paddle/phi/core/kernel_factory.h" +namespace paddle { +namespace dialect { + +phi::KernelKey GetKernelKey( + ir::Operation* op, + const phi::Place& place, + const std::unordered_map& map_value_pair) { + paddle::dialect::OpYamlInfoInterface op_info_interface = + op->dyn_cast(); + auto op_info_res = op_info_interface.GetOpInfo(); + + auto input_info = std::get<0>(op_info_res); + + // only suppurt non vector input for now + std::map input_map; + int index = 0; + for (auto& t : input_info) { + // todo filter attribute tensor + input_map[t.name] = index++; + } + + std::map attr_type_map; + auto attr_info = std::get<1>(op_info_res); + for (auto& t : attr_info) { + VLOG(6) << t.name << "\t" << t.type_name; + attr_type_map[t.name] = t.type_name; + } + auto runtime_info = std::get<3>(op_info_res); + + // get dtype infomation + phi::Backend kernel_backend = phi::Backend::UNDEFINED; + phi::DataLayout kernel_layout = phi::DataLayout::UNDEFINED; + phi::DataType kernel_data_type = phi::DataType::UNDEFINED; + + auto attr_map = op->attributes(); + auto data_type_info = runtime_info.kernel_key_dtype; + if (data_type_info.size() > 0 && data_type_info[0] != "") { + // only support single input and attribute + auto slot_name = data_type_info[0]; + if (input_map.count(slot_name)) { + // parse from input + int in_index = input_map.at(slot_name); + + dialect::AllocatedDenseTensorType type = + op->GetOperandByIndex(in_index) + .source() + .type() + .dyn_cast(); + kernel_data_type = type.dyn_cast().data(); + } else { + PADDLE_ENFORCE_EQ( + attr_type_map.count(slot_name), + true, + phi::errors::PreconditionNotMet("[%s] MUST in attr map", slot_name)); + kernel_data_type = attr_map.at(slot_name) + .dyn_cast() + .data(); + } + } + + // parse all the input tensor + + if (input_map.size() == 0 || op->name() == "pd.full_") { + // all the information have to get from attribute and context + kernel_backend = paddle::experimental::ParseBackend(place); + + } else { + paddle::experimental::detail::KernelKeyParser kernel_key_parser; + + for (size_t i = 0; i < input_info.size(); ++i) { + // todo filter attribute tensor + auto input_tmp = op->GetOperandByIndex(i).source(); + auto new_input_tmp = map_value_pair.at(input_tmp); + dialect::AllocatedDenseTensorType type = + new_input_tmp.type().dyn_cast(); + + // fake tensor here + auto ptr = new phi::Allocation(nullptr, 0, type.place()); + + std::shared_ptr holder(ptr); + + auto dtype = TransToPhiDataType(type.dtype()); + + phi::DenseTensorMeta meta( + dtype, type.dims(), type.data_layout(), type.lod(), type.offset()); + + phi::DenseTensor fake_tensor(holder, meta); + + kernel_key_parser.AssignKernelKeySet(fake_tensor); + } + + auto kernel_key_set = kernel_key_parser.key_set; + + auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); + + if (kernel_backend == phi::Backend::UNDEFINED) { + kernel_backend = kernel_key.backend(); + } + if (kernel_layout == phi::DataLayout::UNDEFINED) { + kernel_layout = kernel_key.layout(); + } + if (kernel_data_type == phi::DataType::UNDEFINED) { + kernel_data_type = kernel_key.dtype(); + } + } + + phi::KernelKey res(kernel_backend, kernel_layout, kernel_data_type); + return res; +} + +std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog) { + auto program = std::make_unique(ir::IrContext::Instance()); + + auto block = prog->block(); + phi::Place cpu_place(phi::AllocationType::CPU); + + ir::IrContext* ctx = ir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + + std::unordered_map map_op_pair; + std::unordered_map map_value_pair; + + std::string op1_name = paddle::dialect::PhiKernelOp::name(); + + ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); + + for (auto it = block->begin(); it != block->end(); ++it) { + auto kernel_key = GetKernelKey(*it, cpu_place, map_value_pair); + + // create new Op + + // only for single output + // need update new kernel key layout and data tyep + auto allocated_dense_tensor_dtype = + paddle::dialect::AllocatedDenseTensorType::get( + ctx, + phi::TransToPhiPlace(kernel_key.backend()), + (*it) + ->GetResultByIndex(0) + .type() + .dyn_cast()); + + // constuct input + std::vector vec_inputs; + if ((*it)->name() != "pd.full_" && (*it)->num_operands() > 0) { + for (size_t i = 0; i < (*it)->num_operands(); ++i) { + auto cur_in = (*it)->GetOperandByIndex(i).source(); + auto new_in = map_value_pair.at(cur_in); + + vec_inputs.push_back(new_in); + } + } + + paddle::dialect::OpYamlInfoInterface op_info_interface = + (*it)->dyn_cast(); + auto op_info_res = op_info_interface.GetOpInfo(); + auto runtime_info = std::get<3>(op_info_res); + + std::unordered_map op1_attribute{ + {"op_name", ir::StrAttribute::get(ctx, (*it)->name())}, + {"kernel_name", + ir::StrAttribute::get(ctx, runtime_info.kernel_func[0])}, + {"kernel_key", dialect::KernelAttribute::get(ctx, kernel_key)}}; + + auto op_attr_map = (*it)->attributes(); + + for (auto it1 = op_attr_map.begin(); it1 != op_attr_map.end(); ++it1) { + op1_attribute.emplace(it1->first, it1->second); + } + + ir::Operation* op1 = ir::Operation::Create( + vec_inputs, op1_attribute, {allocated_dense_tensor_dtype}, op1_info); + + map_op_pair[*it] = op1; + map_value_pair[(*it)->GetResultByIndex(0)] = op1->GetResultByIndex(0); + + program->block()->push_back(op1); + } + + return program; +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/pass/pd_op_to_kernel_pass.h b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..415ce18bb0756a9c9b997033b06bbea463c86e36 --- /dev/null +++ b/paddle/fluid/ir/pass/pd_op_to_kernel_pass.h @@ -0,0 +1,24 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include "paddle/ir/core/program.h" + +namespace paddle { +namespace dialect { + +std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog); + +} // namespace dialect +} // namespace paddle diff --git a/test/cpp/ir/CMakeLists.txt b/test/cpp/ir/CMakeLists.txt index c5524ee38754b14f50342797d10b32249be65769..a94503c0e5a1eab98d510a24a0a30ae9b76c8d67 100644 --- a/test/cpp/ir/CMakeLists.txt +++ b/test/cpp/ir/CMakeLists.txt @@ -5,3 +5,4 @@ endif() add_subdirectory(core) add_subdirectory(pass) add_subdirectory(pattern_rewrite) +add_subdirectory(kernel_dialect) diff --git a/test/cpp/ir/core/ir_exe_test.cc b/test/cpp/ir/core/ir_exe_test.cc index 58ff05c2660a7be2e6ba12825f93192cec2d4ddc..067a735069e4dbe742f2d2e4712f86df7dce45ff 100644 --- a/test/cpp/ir/core/ir_exe_test.cc +++ b/test/cpp/ir/core/ir_exe_test.cc @@ -103,10 +103,7 @@ TEST(program_test, program) { bool res1 = simple_cmp(out_tensor.data()[1], 1.70047); bool res2 = simple_cmp(out_tensor.data()[2], 1.56764); bool res3 = simple_cmp(out_tensor.data()[3], 1.85063); - std::cerr << out_tensor.data()[0] << "\t" - << out_tensor.data()[1] << "\t" - << out_tensor.data()[2] << "\t" - << out_tensor.data()[3] << std::endl; + EXPECT_EQ(res0, true); EXPECT_EQ(res1, true); EXPECT_EQ(res2, true); diff --git a/test/cpp/ir/core/ir_phi_kernel_op_test.cc b/test/cpp/ir/core/ir_phi_kernel_op_test.cc index 8bb37b8e922bedfd0ef5872e63d4d81ae605c71b..2322ca08369b52b9b13f39b98de309ba67193a1a 100644 --- a/test/cpp/ir/core/ir_phi_kernel_op_test.cc +++ b/test/cpp/ir/core/ir_phi_kernel_op_test.cc @@ -15,10 +15,10 @@ #include #include +#include "paddle/fluid/ir/dialect/kernel_dialect.h" +#include "paddle/fluid/ir/dialect/kernel_op.h" +#include "paddle/fluid/ir/dialect/kernel_type.h" #include "paddle/fluid/ir/dialect/pd_dialect.h" -#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h" -#include "paddle/fluid/ir/dialect/pd_kernel_op.h" -#include "paddle/fluid/ir/dialect/pd_kernel_type.h" #include "paddle/fluid/ir/dialect/utils.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/ir/core/block.h" diff --git a/test/cpp/ir/core/phi_kernel_adaptor.h b/test/cpp/ir/core/phi_kernel_adaptor.h index ded1cebe0c2f1065100e15df6ca96ded6e50f36c..1910f4bf49a8c239565c2f2689358a18039f03bc 100644 --- a/test/cpp/ir/core/phi_kernel_adaptor.h +++ b/test/cpp/ir/core/phi_kernel_adaptor.h @@ -18,6 +18,7 @@ #include "paddle/fluid/ir/dialect/pd_op.h" #include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/fluid/ir/dialect/utils.h" +#include "paddle/fluid/ir/interface/infershape.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/builtin_dialect.h" @@ -40,6 +41,7 @@ #include "paddle/fluid/platform/init.h" +#include "paddle/fluid/ir/dialect/kernel_attribute.h" #include "paddle/fluid/ir/dialect/pd_attribute.h" #include "glog/logging.h" @@ -92,14 +94,11 @@ template void build_context(ir::Operation* op, const std::unordered_map& name_map, paddle::framework::Scope* scope, + const OpInfoTuple& op_yaml_info, T* ctx, bool is_infer_meta = true) { - paddle::dialect::OpYamlInfoInterface op_info_interface = - op->dyn_cast(); - auto op_info_res = op_info_interface.GetOpInfo(); - // inputs include input and mutable attributes - auto input_info = std::get<0>(op_info_res); + auto input_info = std::get<0>(op_yaml_info); std::map input_index_map; std::map mutable_attr_type_map; int input_index = 0; @@ -111,7 +110,7 @@ void build_context(ir::Operation* op, } } - auto attr_info = std::get<1>(op_info_res); + auto attr_info = std::get<1>(op_yaml_info); std::map attr_type_map; for (auto& t : attr_info) { VLOG(6) << t.name << "\t" << t.type_name; @@ -119,7 +118,7 @@ void build_context(ir::Operation* op, } auto attr_map = op->attributes(); - auto runtime_info = std::get<3>(op_info_res); + auto runtime_info = std::get<3>(op_yaml_info); // int input_index = 0; std::vector vec_param_list; @@ -202,17 +201,18 @@ class PhiKernelAdaptor { auto attr_map = (*it)->attributes(); + paddle::dialect::OpYamlInfoInterface op_info_interface = + (*it)->dyn_cast(); + auto op_info_res = op_info_interface.GetOpInfo(); + InferShapeInterface interface = (*it)->dyn_cast(); phi::InferMetaContext ctx; - build_context((*it), name_map, scope_, &ctx); + build_context( + (*it), name_map, scope_, op_info_res, &ctx); interface.InferShape(&ctx); - paddle::dialect::OpYamlInfoInterface op_info_interface = - (*it)->dyn_cast(); - auto op_info_res = op_info_interface.GetOpInfo(); - auto runtime_info = std::get<3>(op_info_res); auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap( @@ -236,7 +236,7 @@ class PhiKernelAdaptor { phi::KernelContext kernel_ctx(dev_ctx); build_context( - (*it), name_map, scope_, &kernel_ctx, false); + (*it), name_map, scope_, op_info_res, &kernel_ctx, false); found_it->second(&kernel_ctx); auto out_value = (*it)->result(0); @@ -245,6 +245,58 @@ class PhiKernelAdaptor { } } + void run_kernel_prog(ir::Program* program) { + auto block = program->block(); + std::unordered_map name_map; + build_scope(block, scope_, &name_map); + ir::IrContext* ctx = ir::IrContext::Instance(); + + ctx->GetOrRegisterDialect(); + + auto* dev_ctx = phi::DeviceContextPool::Instance().Get(phi::CPUPlace()); + phi::Place cpu_place(phi::AllocationType::CPU); + for (auto it = block->begin(); it != block->end(); ++it) { + auto attr_map = (*it)->attributes(); + + auto op_name = attr_map.at("op_name").dyn_cast().data(); + + ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op_name); + + auto impl = + op1_info.GetInterfaceImpl(); + auto yaml_info = impl->get_op_info_(); + + auto attr_info = std::get<1>(yaml_info); + + auto infer_shape_impl = op1_info.GetInterfaceImpl(); + + phi::InferMetaContext ctx; + + build_context( + (*it), name_map, scope_, yaml_info, &ctx); + + infer_shape_impl->infer_shape_(&ctx); + + auto kernel_name = + attr_map.at("kernel_name").dyn_cast().data(); + auto kernel_key = attr_map.at("kernel_key") + .dyn_cast() + .data(); + + auto kernel_fn = + phi::KernelFactory::Instance().SelectKernel(kernel_name, kernel_key); + + phi::KernelContext kernel_ctx(dev_ctx); + + build_context( + (*it), name_map, scope_, yaml_info, &kernel_ctx, false); + kernel_fn(&kernel_ctx); + + auto out_value = (*it)->GetResultByIndex(0); + out_name = name_map[out_value]; + } + } + std::string out_name; private: diff --git a/test/cpp/ir/kernel_dialect/CMakeLists.txt b/test/cpp/ir/kernel_dialect/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..d15725f68c2f7449348675002cc209eb2097a1f0 --- /dev/null +++ b/test/cpp/ir/kernel_dialect/CMakeLists.txt @@ -0,0 +1,10 @@ +cc_test_old( + ir_kernel_dialect_pass_test + SRCS + ir_kernel_dialect_pass_test.cc + DEPS + pd_op_to_kernel_pass + new_ir + pd_dialect + phi + gtest) diff --git a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..dba6c5a7686fc3c418bc347a63351c324a42ea0f --- /dev/null +++ b/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc @@ -0,0 +1,116 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "paddle/fluid/ir/dialect/pd_dialect.h" +#include "paddle/fluid/ir/dialect/pd_type.h" +#include "paddle/fluid/ir/dialect/utils.h" +#include "paddle/fluid/ir/interface/op_yaml_info.h" +#include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h" +#include "paddle/ir/core/builtin_attribute.h" +#include "paddle/ir/core/builtin_dialect.h" +#include "paddle/ir/core/builtin_op.h" +#include "paddle/ir/core/ir_context.h" +#include "paddle/ir/core/program.h" +#include "paddle/ir/core/utils.h" +#include "paddle/phi/core/meta_tensor.h" +#include "paddle/phi/infermeta/binary.h" +#include "paddle/phi/kernels/elementwise_add_kernel.h" + +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/framework/variable_helper.h" + +#include "paddle/phi/common/place.h" +#include "paddle/phi/core/kernel_context.h" +#include "paddle/phi/core/kernel_factory.h" + +#include "paddle/fluid/platform/init.h" + +#include "paddle/fluid/ir/dialect/pd_attribute.h" +#include "test/cpp/ir/core/phi_kernel_adaptor.h" + +#include "paddle/phi/core/kernel_registry.h" + +#include "paddle/fluid/ir/dialect/kernel_dialect.h" + +PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(full_int_array, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(uniform, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); + +bool simple_cmp(float a, float b) { return std::abs((a - b) / a) < 1e-5; } + +TEST(program_test, program) { + // (1) Init environment. + ir::IrContext* ctx = ir::IrContext::Instance(); + ir::Program program((ctx)); + + ctx->GetOrRegisterDialect(); + + ir::Builder builder = ir::Builder::AtBlockEnd(ctx, program.block()); + + paddle::dialect::FullOp op1 = builder.Build( + std::vector{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); + + paddle::dialect::FullOp op2 = builder.Build( + std::vector{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); + + builder.Build(op1->GetResultByIndex(0), + op2->GetResultByIndex(0)); + + auto kernel_program = paddle::dialect::PdOpLowerToKernelPass(&program); + + paddle::framework::Scope scope; + PhiKernelAdaptor phi_kernel_adaptor(&scope); + phi_kernel_adaptor.run_kernel_prog(kernel_program.get()); + + auto out_tensor = + scope.Var(phi_kernel_adaptor.out_name)->Get(); + + bool res0 = simple_cmp(out_tensor.data()[0], 2.0); + bool res1 = simple_cmp(out_tensor.data()[1], 2.0); + bool res2 = simple_cmp(out_tensor.data()[2], 2.0); + bool res3 = simple_cmp(out_tensor.data()[3], 2.0); + + EXPECT_EQ(res0, true); + EXPECT_EQ(res1, true); + EXPECT_EQ(res2, true); + EXPECT_EQ(res3, true); +} + +TEST(dialect_attr, attr) { + // (1) Init environment. + ir::IrContext* ctx = ir::IrContext::Instance(); + ir::Program program((ctx)); + + ctx->GetOrRegisterDialect(); + auto kernel_dialect = + ctx->GetOrRegisterDialect(); + + phi::KernelKey kernel_key( + phi::Backend::CPU, phi::DataLayout::ALL_LAYOUT, phi::DataType::FLOAT32); + auto attr = paddle::dialect::KernelAttribute::get(ctx, kernel_key); + + std::stringstream ss; + + kernel_dialect->PrintAttribute(attr, ss); + + EXPECT_EQ( + ss.str() == "", + true); +}