diff --git a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc index f9f054a4772525a5dd85d1f2168dfd1639609d39..9eb131f49e5d7c1f38ddffdd5e277dba562e6617 100644 --- a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc +++ b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc @@ -967,8 +967,8 @@ void BuildOpFuncList( auto attr_info = std::get<1>(yaml_info); - op_func_node.infer_shape_interface_ = - op_info.GetInterfaceImpl(); + op_func_node.infer_meta_interface_ = + op_info.GetInterfaceImpl(); VLOG(6) << "op name" << op_func_node.phi_op_name_; diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.cc b/paddle/fluid/framework/new_executor/new_executor_defs.cc index eeb5142546f225ed9c3066c13253c8918b267d6a..94eab7722659f3bd23556f6db918bdca7f90358d 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.cc +++ b/paddle/fluid/framework/new_executor/new_executor_defs.cc @@ -161,7 +161,7 @@ Instruction::Instruction(size_t id, is_artificial_ = true; } - if (op_func_node_.infer_shape_interface_ != nullptr) { + if (op_func_node_.infer_meta_interface_ != nullptr) { pre_define_context_ = true; } PADDLE_ENFORCE_GE(id, diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.h b/paddle/fluid/framework/new_executor/new_executor_defs.h index 0742568e939528bb0f480c4c8e4223a215ab49f2..73d99eb63d94cd175d5eba93d505c025a4d58b86 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.h +++ b/paddle/fluid/framework/new_executor/new_executor_defs.h @@ -20,7 +20,7 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/variable_helper.h" -#include "paddle/fluid/ir/interface/infershape.h" +#include "paddle/fluid/ir/interface/infermeta.h" #include "paddle/fluid/platform/device_event_base.h" #include "paddle/fluid/platform/event.h" #include "paddle/phi/core/utils/rw_lock.h" @@ -177,8 +177,7 @@ struct OpFuncNode { phi::KernelContext kernel_context_; phi::InferMetaContext infer_meta_context_; std::string phi_op_name_; - paddle::dialect::InferShapeInterface::Concept* infer_shape_interface_{ - nullptr}; + paddle::dialect::InferMetaInterface::Concept* infer_meta_interface_{nullptr}; }; class Instruction { diff --git a/paddle/fluid/framework/new_executor/new_ir_interpreter.cc b/paddle/fluid/framework/new_executor/new_ir_interpreter.cc index fdb8e26e4e4abd1e4b728e3fb38eedb56ff1f604..09875712bd732653626ea7b347f0fa0ab7622aed 100644 --- a/paddle/fluid/framework/new_executor/new_ir_interpreter.cc +++ b/paddle/fluid/framework/new_executor/new_ir_interpreter.cc @@ -963,7 +963,7 @@ void NewIRInterpreter::RunInstruction(const Instruction& instr_node) { VLOG(5) << "run new ir selected kernel"; auto op_func_node = const_cast((instr_node.OpFunc())); VLOG(5) << "begin to run op " << op_func_node->phi_op_name_; - op_func_node->infer_shape_interface_->infer_shape_( + op_func_node->infer_meta_interface_->infer_meta_( &(op_func_node->infer_meta_context_)); VLOG(5) << "after run infer meta"; (*(op_func_node->phi_kernel_))(&(op_func_node->kernel_context_)); diff --git a/paddle/fluid/ir/dialect/CMakeLists.txt b/paddle/fluid/ir/dialect/CMakeLists.txt index 8fa488fc14720fb0d18c6d5caa1ff115164b726d..9cb024be507e10503c269c5691ffdb2a6a9ee57e 100644 --- a/paddle/fluid/ir/dialect/CMakeLists.txt +++ b/paddle/fluid/ir/dialect/CMakeLists.txt @@ -17,8 +17,10 @@ set(op_backward_yaml_file2 ${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/legacy_backward_ops.parsed.yaml ) set(op_yaml_file3 ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/pd_op.yaml) +set(op_yaml_file4 + ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/pd_legacy_op.yaml) set(op_yaml_files - ${op_forward_yaml_file1},${op_forward_yaml_file2},${op_backward_yaml_file1},${op_backward_yaml_file2},${op_yaml_file3} + ${op_forward_yaml_file1},${op_forward_yaml_file2},${op_backward_yaml_file1},${op_backward_yaml_file2},${op_yaml_file3},${op_yaml_file4} ) set(op_namespace paddle,dialect) set(dialect_name pd) diff --git a/paddle/fluid/ir/dialect/kernel_op.cc b/paddle/fluid/ir/dialect/kernel_op.cc index 30a2a24d07fd3dd6fd2c77fc6528cc50ba59b4a2..34bce0f176dd6ff5796f6233b49ee4f27c65f8ed 100644 --- a/paddle/fluid/ir/dialect/kernel_op.cc +++ b/paddle/fluid/ir/dialect/kernel_op.cc @@ -13,21 +13,57 @@ // limitations under the License. #include "paddle/fluid/ir/dialect/kernel_op.h" +#include "paddle/fluid/ir/dialect/kernel_attribute.h" +#include "paddle/ir/core/builtin_attribute.h" +#include "paddle/phi/core/enforce.h" namespace paddle { namespace dialect { -const char *PhiKernelOp::attributes_name[attributes_num] = { - "base_op", "infermeta_fn", "kernel_fn"}; +const char* PhiKernelOp::attributes_name[attributes_num] = { + "op_name", "kernel_name", "kernel_key"}; void PhiKernelOp::Verify() { VLOG(4) << "Verifying inputs, outputs and attributes for: PhiKernelOp."; - // Verify inputs type: + auto& attributes = this->attributes(); - // Verify if attributes contain attribute name in attributes_name: - // if (!attributes.at("parameter_name").isa()) { - // throw("Type of attribute: parameter_name is not right."); + PADDLE_ENFORCE(attributes.count("op_name") > 0 && + attributes.at("op_name").isa(), + phi::errors::PreconditionNotMet( + "Type of attribute: op_name is not right.")); + + PADDLE_ENFORCE(attributes.count("kernel_name") > 0 && + attributes.at("kernel_name").isa(), + phi::errors::PreconditionNotMet( + "Type of attribute: kernel_name is not right.")); + + PADDLE_ENFORCE(attributes.count("kernel_key") > 0 && + attributes.at("kernel_key").isa(), + phi::errors::PreconditionNotMet( + "Type of attribute: kernel_key is not right.")); +} + +const std::string PhiKernelOp::op_name() { + return operation() + ->attributes() + .at("op_name") + .dyn_cast() + .data(); +} +const std::string PhiKernelOp::kernel_name() { + return operation() + ->attributes() + .at("kernel_name") + .dyn_cast() + .data(); +} +phi::KernelKey PhiKernelOp::kernel_key() { + return operation() + ->attributes() + .at("kernel_key") + .dyn_cast() + .data(); } } // namespace dialect diff --git a/paddle/fluid/ir/dialect/kernel_op.h b/paddle/fluid/ir/dialect/kernel_op.h index 34fe2590267ca0060aeee6e47441429b569835dd..c3a15e3be056d397e9dc06908a4f10f68b1b3f5d 100644 --- a/paddle/fluid/ir/dialect/kernel_op.h +++ b/paddle/fluid/ir/dialect/kernel_op.h @@ -16,6 +16,7 @@ #include "paddle/ir/core/builder.h" #include "paddle/ir/core/op_base.h" +#include "paddle/phi/core/kernel_factory.h" namespace paddle { namespace dialect { @@ -26,6 +27,9 @@ class PhiKernelOp : public ir::Op { static const char *name() { return "phi.kernel"; } static constexpr uint32_t attributes_num = 3; static const char *attributes_name[attributes_num]; + const std::string op_name(); + const std::string kernel_name(); + phi::KernelKey kernel_key(); void Verify(); }; diff --git a/paddle/fluid/ir/dialect/op_gen.py b/paddle/fluid/ir/dialect/op_gen.py index 7aa49f583f45ea07df82acf6f3bdcbf803876b68..71e124ba01b90b93fe1ae07474de811c7f2e6182 100644 --- a/paddle/fluid/ir/dialect/op_gen.py +++ b/paddle/fluid/ir/dialect/op_gen.py @@ -39,7 +39,7 @@ H_FILE_TEMPLATE = """#ifdef GET_OP_LIST #include "paddle/fluid/ir/dialect/utils.h" #include "paddle/fluid/ir/dialect/op_yaml_info_util.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" -#include "paddle/fluid/ir/interface/infershape.h" +#include "paddle/fluid/ir/interface/infermeta.h" #include "paddle/fluid/framework/infershape_utils.h" #include "paddle/phi/core/infermeta_utils.h" @@ -143,7 +143,7 @@ void {op_name}::Build({build_args}) {{ }} """ OP_INFER_SHAPE_TEMPLATE = """ -void {op_name}::InferShape( phi::InferMetaContext *infer_meta ) {{ +void {op_name}::InferMeta( phi::InferMetaContext *infer_meta ) {{ auto fn = PD_INFER_META(phi::{infer_meta_func}); fn(infer_meta); }} @@ -298,9 +298,9 @@ class OpInfoParser: self.infer_meta_map = self.parse_infer_meta_map() self.kernel_map = self.parse_kernel_map() if 'infer_meta' in self.op_yaml_item: - self.infer_shape_func = self.op_yaml_item['infer_meta']["func"] + self.infer_meta_func = self.op_yaml_item['infer_meta']["func"] else: - self.infer_shape_func = None + self.infer_meta_func = None # parse inplace && view self.inplace_map = self.parse_op_inplace_info() @@ -1218,10 +1218,10 @@ def OpGenerator( op_traits = [] exclusive_interface_str = "" - if op_info.infer_shape_func: - op_interfaces += ["InferShapeInterface"] + if op_info.infer_meta_func: + op_interfaces += ["InferMetaInterface"] exclusive_interface_str += ( - " static void InferShape( phi::InferMetaContext *infer_meta );" + " static void InferMeta( phi::InferMetaContext *infer_meta );" ) # If op has inplace info, we will generate inplace op and non-inplace op. @@ -1472,11 +1472,11 @@ def OpGenerator( op_output_optional_list, ) - op_infer_shape_str = "" - if op_info.infer_shape_func: - op_infer_shape_str = OP_INFER_SHAPE_TEMPLATE.format( + op_infer_meta_str = "" + if op_info.infer_meta_func: + op_infer_meta_str = OP_INFER_SHAPE_TEMPLATE.format( op_name=op_class_name, - infer_meta_func=op_info.infer_shape_func, + infer_meta_func=op_info.infer_meta_func, ) ops_name_list.append(op_class_name) @@ -1487,7 +1487,7 @@ def OpGenerator( if len(op_mutable_attribute_name_list) > 0: ops_defined_list.append(build_func_with_muta_attr_is_input) ops_defined_list.append(op_verify_str) - ops_defined_list.append(op_infer_shape_str) + ops_defined_list.append(op_infer_meta_str) # (4) Generate head file str op_namespaces_prev = "" diff --git a/paddle/fluid/ir/dialect/pd_legacy_op.yaml b/paddle/fluid/ir/dialect/pd_legacy_op.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9aa96732c87ebb0a996d5ff8968f8e1d27ededa1 --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_legacy_op.yaml @@ -0,0 +1,32 @@ +- name: elementwise_add + inputs: + - typename: Tensor + name: x + optional: false + no_need_buffer: false + data_transform: {} + - typename: Tensor + name: y + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: int, name: axis} + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + infer_meta: + func: ElementwiseInferMeta + param: [x, y] + kernel: + func: [add_raw] + param: [x, y] + backend: null + layout: null + data_type: null + dispatch: {add: null} + force_backend: null + inplace: {out: x} + view: null + backward: add_grad diff --git a/paddle/fluid/ir/interface/infershape.h b/paddle/fluid/ir/interface/infermeta.h similarity index 58% rename from paddle/fluid/ir/interface/infershape.h rename to paddle/fluid/ir/interface/infermeta.h index 5b4f430413d1e6dc3fc367a98a146c4467ef5e75..ba3d54c59439bde099aef6a08131c47f5952d7c4 100644 --- a/paddle/fluid/ir/interface/infershape.h +++ b/paddle/fluid/ir/interface/infermeta.h @@ -18,28 +18,28 @@ namespace paddle { namespace dialect { -class InferShapeInterface : public ir::OpInterfaceBase { +class InferMetaInterface : public ir::OpInterfaceBase { public: struct Concept { - explicit Concept(void (*infer_shape)(phi::InferMetaContext *)) - : infer_shape_(infer_shape) {} - void (*infer_shape_)(phi::InferMetaContext *); + explicit Concept(void (*infer_meta)(phi::InferMetaContext *)) + : infer_meta_(infer_meta) {} + void (*infer_meta_)(phi::InferMetaContext *); }; template struct Model : public Concept { - static void InferShape(phi::InferMetaContext *infer_meta) { - return ConcreteOp::InferShape(infer_meta); + static void InferMeta(phi::InferMetaContext *infer_meta) { + return ConcreteOp::InferMeta(infer_meta); } - Model() : Concept(InferShape) {} + Model() : Concept(InferMeta) {} }; - InferShapeInterface(ir::Operation *op, Concept *impl) - : ir::OpInterfaceBase(op), impl_(impl) {} + InferMetaInterface(ir::Operation *op, Concept *impl) + : ir::OpInterfaceBase(op), impl_(impl) {} - void InferShape(phi::InferMetaContext *infer_meta) { - impl_->infer_shape_(infer_meta); + void InferMeta(phi::InferMetaContext *infer_meta) { + impl_->infer_meta_(infer_meta); } private: @@ -49,4 +49,4 @@ class InferShapeInterface : public ir::OpInterfaceBase { } // namespace dialect } // namespace paddle -IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::InferShapeInterface) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::InferMetaInterface) diff --git a/paddle/fluid/ir/interface/interface.cc b/paddle/fluid/ir/interface/interface.cc index 6d2cd0ae17bf629be5ba9202d6956e069e280de2..442be02e2f23565e3b162e300791140e52730bcb 100644 --- a/paddle/fluid/ir/interface/interface.cc +++ b/paddle/fluid/ir/interface/interface.cc @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/interface/infershape.h" +#include "paddle/fluid/ir/interface/infermeta.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" -IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::InferShapeInterface) +IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::InferMetaInterface) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::OpYamlInfoInterface) diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h index 9d3393d965d174e509e49216ee597741d76d77e3..a45260fe2ac1f47d038a922ba21a712881bde2bc 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h +++ b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h @@ -19,7 +19,7 @@ #include "paddle/fluid/ir/dialect/pd_op.h" #include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/fluid/ir/dialect/utils.h" -#include "paddle/fluid/ir/interface/infershape.h" +#include "paddle/fluid/ir/interface/infermeta.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/builtin_dialect.h" @@ -52,59 +52,6 @@ class PhiKernelAdaptor { public: explicit PhiKernelAdaptor(paddle::framework::Scope* scope) : scope_(scope) {} - void run(ir::Program* program) { - auto block = program->block(); - std::unordered_map name_map; - - ir::BuildScope(block, scope_, &name_map); - - auto* dev_ctx = phi::DeviceContextPool::Instance().Get(phi::CPUPlace()); - phi::Place cpu_place(phi::AllocationType::CPU); - for (auto it = block->begin(); it != block->end(); ++it) { - VLOG(6) << "begin to run op " << (*it)->name(); - - auto attr_map = (*it)->attributes(); - - paddle::dialect::OpYamlInfoInterface op_info_interface = - (*it)->dyn_cast(); - auto op_info_res = op_info_interface.GetOpInfo(); - - paddle::dialect::InferShapeInterface interface = - (*it)->dyn_cast(); - phi::InferMetaContext ctx; - - ir::BuildInferMetaContext((*it), name_map, scope_, op_info_res, &ctx); - - interface.InferShape(&ctx); - - auto runtime_info = std::get<3>(op_info_res); - - auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap( - runtime_info.kernel_func[0]); - - phi::KernelKey kernel_key(phi::TransToPhiBackend(cpu_place), - phi::DataLayout::ANY, - phi::DataType::FLOAT32); - if (runtime_info.kernel_func[0] == "full_int_array") { - kernel_key.set_dtype(phi::DataType::INT64); - } - auto found_it = phi_kernels.find(kernel_key); - if (found_it == phi_kernels.end()) { - PADDLE_THROW(paddle::platform::errors::NotFound( - "can not found kerenl for [%s]", (*it)->name())); - } else { - phi::KernelContext kernel_ctx(dev_ctx); - - ir::BuildPhiKernelContext( - (*it), name_map, scope_, op_info_res, &kernel_ctx); - found_it->second(&kernel_ctx); - - auto out_value = (*it)->result(0); - out_name = name_map[out_value]; - } - } - } - void run_kernel_prog(ir::Program* program) { auto block = program->block(); std::unordered_map name_map; @@ -128,14 +75,14 @@ class PhiKernelAdaptor { auto attr_info = std::get<1>(yaml_info); - auto infer_shape_impl = - op1_info.GetInterfaceImpl(); + auto infer_meta_impl = + op1_info.GetInterfaceImpl(); phi::InferMetaContext ctx; ir::BuildInferMetaContext((*it), name_map, scope_, yaml_info, &ctx); - infer_shape_impl->infer_shape_(&ctx); + infer_meta_impl->infer_meta_(&ctx); auto kernel_name = attr_map.at("kernel_name").dyn_cast().data(); diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc index 76c3848343d8f69ac08669d8131c0b82027f0bcd..1d9f29fedb32ab198f55b571e569a5f1c896e760 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc +++ b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc @@ -34,6 +34,7 @@ #include "paddle/fluid/framework/string_array.h" #include "paddle/fluid/framework/tensor_ref_array.h" #include "paddle/fluid/ir/dialect/kernel_attribute.h" +#include "paddle/fluid/ir/dialect/kernel_type.h" #include "paddle/fluid/ir/dialect/pd_attribute.h" #include "paddle/phi/core/enforce.h" @@ -74,8 +75,6 @@ void BuildScope(ir::Block* block, // TODO(phlrain): need to update here, support StringTensor auto out_tensor = var->GetMutable(); - name_map->emplace(ptr, name); - auto feed_var = scope->Var("feed"); int index = (*it)->attributes().at("col").dyn_cast().data(); @@ -117,6 +116,8 @@ void BuildScope(ir::Block* block, continue; } + // TODO(zhangbo): support builtin.slice + if (input_num > 0) { for (size_t i = 0; i < input_num; ++i) { auto ptr = (*it)->operand(i); @@ -145,9 +146,29 @@ void BuildScope(ir::Block* block, name_map->emplace(ptr, name); } auto var = scope->Var(name); - - // need to update here, only support DenseTensor - var->GetMutable(); + // Only support DenseTensor or Vector + if (ptr.type().isa()) { + var->GetMutable(); + } else if (ptr.type().isa()) { + auto tensor_array = + var->GetMutable(); + for (size_t i = 0; i < ptr.type().dyn_cast().size(); + i++) { + PADDLE_ENFORCE( + ptr.type() + .dyn_cast()[i] + .isa(), + paddle::platform::errors::Fatal( + "Element of VectorType output only support " + "DenseTensorType")); + std::string name_i = "inner_var_" + std::to_string(count++); + auto var_i = scope->Var(name_i); + tensor_array->emplace_back(var_i->GetMutable()); + } + } else { + PADDLE_THROW(phi::errors::PreconditionNotMet( + "Output only support DenseTensorType or VectorType")); + } } } } diff --git a/test/cpp/ir/core/CMakeLists.txt b/test/cpp/ir/core/CMakeLists.txt index 4a85007a6230960e95c1857bcca156464f9cd162..1ec6436ad0623bef749af131b2654947de2871df 100644 --- a/test/cpp/ir/core/CMakeLists.txt +++ b/test/cpp/ir/core/CMakeLists.txt @@ -13,16 +13,6 @@ cc_test_old( phi gtest) -cc_test_old( - ir_phi_kernel_op_test - SRCS - ir_phi_kernel_op_test.cc - DEPS - pd_dialect - ir - phi - gtest) - cc_test_old( ir_infershape_test SRCS @@ -38,6 +28,7 @@ cc_test_old( SRCS ir_exe_test.cc DEPS + pd_op_to_kernel_pass pd_dialect phi_kernel_adaptor ir diff --git a/test/cpp/ir/core/ir_exe_test.cc b/test/cpp/ir/core/ir_exe_test.cc index ad7ebd2da7b596a517c39391f792877ae1529669..3c49fa0595edae07ad20912ae0da18f4867be1b1 100644 --- a/test/cpp/ir/core/ir_exe_test.cc +++ b/test/cpp/ir/core/ir_exe_test.cc @@ -42,6 +42,7 @@ #include "paddle/fluid/ir/dialect/pd_attribute.h" +#include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h" #include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h" #include "paddle/phi/core/kernel_registry.h" @@ -93,9 +94,10 @@ TEST(program_test, program) { EXPECT_EQ(block->size(), 9u); // Execute program + auto kernel_program = paddle::dialect::PdOpLowerToKernelPass(&program); paddle::framework::Scope scope; PhiKernelAdaptor phi_kernel_adaptor(&scope); - phi_kernel_adaptor.run(&program); + phi_kernel_adaptor.run_kernel_prog(kernel_program.get()); auto out_tensor = scope.Var(phi_kernel_adaptor.out_name)->Get(); @@ -159,9 +161,10 @@ TEST(program_test, mutable_attribute) { EXPECT_EQ(block->size(), 6u); // Execute program + auto kernel_program = paddle::dialect::PdOpLowerToKernelPass(&program); paddle::framework::Scope scope; PhiKernelAdaptor phi_kernel_adaptor(&scope); - phi_kernel_adaptor.run(&program); + phi_kernel_adaptor.run_kernel_prog(kernel_program.get()); auto out_tensor = scope.Var(phi_kernel_adaptor.out_name)->Get(); diff --git a/test/cpp/ir/core/ir_infershape_test.cc b/test/cpp/ir/core/ir_infershape_test.cc index 0053cd77d898b8332dd0c8f1a51f42bf05017a6e..36121cfef7594b6ef3aa532b1d2f063b4634e4d8 100644 --- a/test/cpp/ir/core/ir_infershape_test.cc +++ b/test/cpp/ir/core/ir_infershape_test.cc @@ -32,21 +32,21 @@ #include "paddle/phi/core/kernel_context.h" #include "paddle/phi/core/kernel_factory.h" -#include "paddle/fluid/ir/interface/infershape.h" +#include "paddle/fluid/ir/interface/infermeta.h" #include "paddle/fluid/platform/init.h" #include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/infermeta/nullary.h" // Define op class OperationTest - : public ir::Op { + : public ir::Op { public: using Op::Op; static const char *name() { return "test.operation2"; } static constexpr uint32_t attributes_num = 2; static const char *attributes_name[attributes_num]; static void Verify() {} - static void InferShape(phi::InferMetaContext *infer_meta) { + static void InferMeta(phi::InferMetaContext *infer_meta) { auto fn = PD_INFER_META(phi::CreateInferMeta); fn(infer_meta); } @@ -87,15 +87,15 @@ TEST(infershape_test, infershape_test) { ir::Operation *op = ir::Operation::Create(op_inputs, {}, op_output_types, op_info); - paddle::dialect::InferShapeInterface interface = - op->dyn_cast(); + paddle::dialect::InferMetaInterface interface = + op->dyn_cast(); phi::InferMetaContext infer_meta_ctx; infer_meta_ctx.EmplaceBackAttr(phi::IntArray({5, 6})); infer_meta_ctx.EmplaceBackAttr(phi::DataType::FLOAT32); phi::DenseTensor tensor; infer_meta_ctx.EmplaceBackOutput(phi::MetaTensor(&tensor)); - interface.InferShape(&infer_meta_ctx); + interface.InferMeta(&infer_meta_ctx); EXPECT_EQ(tensor.dims().size(), 2); EXPECT_EQ(tensor.dims()[0], 5); diff --git a/test/cpp/ir/core/ir_phi_kernel_op_test.cc b/test/cpp/ir/core/ir_phi_kernel_op_test.cc deleted file mode 100644 index b9fea029d2856d2ff8d277cb29d4e412b36f304d..0000000000000000000000000000000000000000 --- a/test/cpp/ir/core/ir_phi_kernel_op_test.cc +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include - -#include "paddle/fluid/ir/dialect/kernel_dialect.h" -#include "paddle/fluid/ir/dialect/kernel_op.h" -#include "paddle/fluid/ir/dialect/kernel_type.h" -#include "paddle/fluid/ir/dialect/op_yaml_info_util.h" -#include "paddle/fluid/ir/dialect/pd_dialect.h" -#include "paddle/fluid/ir/dialect/utils.h" -#include "paddle/fluid/ir/interface/op_yaml_info.h" -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" -#include "paddle/phi/core/meta_tensor.h" -#include "paddle/phi/infermeta/binary.h" -#include "paddle/phi/kernels/elementwise_add_kernel.h" - -TEST(program_test, program) { - // (1) Init environment. - ir::IrContext *ctx = ir::IrContext::Instance(); - auto kernel_dialect = - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); - - // (2) Create an empty program object - ir::Program program(ctx); - - // (3) Create a float32 DenseTensor Parameter and save into Program - phi::Place place(phi::AllocationType::CPU); - ir::Type fp32_dtype = ir::Float32Type::get(ctx); - phi::DDim dims = {2, 2}; - phi::DataLayout data_layout = phi::DataLayout::NCHW; - phi::LoD lod = {{0, 1, 2}}; - size_t offset = 0; - - std::string op1_name = paddle::dialect::PhiKernelOp::name(); - - ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); - - std::unordered_map op1_attribute{ - {"parameter_name", ir::StrAttribute::get(ctx, "a")}}; - - auto allocated_dense_tensor_dtype = - paddle::dialect::AllocatedDenseTensorType::get( - ctx, place, fp32_dtype, dims, data_layout, lod, offset); - std::stringstream ss; - kernel_dialect->PrintType(allocated_dense_tensor_dtype, ss); - ASSERT_EQ(ss.str() == "cpu_tensor<2x2xf32>", true); - ASSERT_EQ(allocated_dense_tensor_dtype.place() == place, true); - ASSERT_EQ(allocated_dense_tensor_dtype.dims() == dims, true); - ASSERT_EQ(allocated_dense_tensor_dtype.data_layout() == data_layout, true); - ASSERT_EQ(allocated_dense_tensor_dtype.lod() == lod, true); - ASSERT_EQ(allocated_dense_tensor_dtype.offset() == 0, true); - - ir::Operation *op1 = ir::Operation::Create( - {}, op1_attribute, {allocated_dense_tensor_dtype}, op1_info); - - ASSERT_EQ(op1 != nullptr, true); -} diff --git a/test/cpp/ir/core/phi_kernel_adaptor.h b/test/cpp/ir/core/phi_kernel_adaptor.h deleted file mode 100644 index b82572a15f3510135f60f2f51eb62670c89fbe3f..0000000000000000000000000000000000000000 --- a/test/cpp/ir/core/phi_kernel_adaptor.h +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/fluid/ir/dialect/pd_dialect.h" -#include "paddle/fluid/ir/dialect/pd_op.h" -#include "paddle/fluid/ir/dialect/pd_type.h" -#include "paddle/fluid/ir/dialect/utils.h" -#include "paddle/fluid/ir/interface/infershape.h" -#include "paddle/fluid/ir/interface/op_yaml_info.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" -#include "paddle/phi/core/meta_tensor.h" -#include "paddle/phi/infermeta/binary.h" -#include "paddle/phi/kernels/elementwise_add_kernel.h" - -#include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/framework/variable.h" -#include "paddle/fluid/framework/variable_helper.h" - -#include "paddle/phi/common/place.h" -#include "paddle/phi/core/kernel_context.h" -#include "paddle/phi/core/kernel_factory.h" - -#include "paddle/fluid/platform/init.h" - -#include "paddle/fluid/ir/dialect/kernel_attribute.h" -#include "paddle/fluid/ir/dialect/pd_attribute.h" - -#include "glog/logging.h" - -void BuildScope(ir::Block* block, - paddle::framework::Scope* scope, - std::unordered_map* name_map) { - std::unordered_map map_test; - - int count = 0; - for (auto it = block->begin(); it != block->end(); ++it) { - int input = (*it)->num_operands(); - if (input > 0) { - for (int i = 0; i < input; ++i) { - auto ptr = (*it)->operand(i); - std::string name; - if (name_map->find(ptr) != name_map->end()) { - name = name_map->at(ptr); - } else { - name = "var_" + std::to_string(count++); - name_map->emplace(ptr, name); - } - auto var = scope->Var(name); - // need to update here, only support DenseTensor - var->GetMutable(); - } - } - - int out_num = (*it)->num_results(); - - if (out_num > 0) { - for (int i = 0; i < out_num; ++i) { - ir::Value ptr = (*it)->result(i); - std::string name; - if (name_map->find(ptr) != name_map->end()) { - name = name_map->at(ptr); - } else { - name = "var_" + std::to_string(count++); - name_map->emplace(ptr, name); - } - auto var = scope->Var(name); - - var->GetMutable(); - } - } - } -} - -template -void build_context(ir::Operation* op, - const std::unordered_map& name_map, - paddle::framework::Scope* scope, - const OpInfoTuple& op_yaml_info, - T* ctx, - bool is_infer_meta = true) { - // inputs include input and mutable attributes - auto input_info = std::get<0>(op_yaml_info); - std::map input_index_map; - std::map mutable_attr_type_map; - int input_index = 0; - for (auto& t : input_info) { - VLOG(6) << t.name << "\t" << t.type_name; - input_index_map[t.name] = input_index++; - if (t.is_mutable_attribute) { - mutable_attr_type_map[t.name] = t.type_name; - } - } - - auto attr_info = std::get<1>(op_yaml_info); - std::map attr_type_map; - for (auto& t : attr_info) { - VLOG(6) << t.name << "\t" << t.type_name; - attr_type_map[t.name] = t.type_name; - } - - auto attr_map = op->attributes(); - auto runtime_info = std::get<3>(op_yaml_info); - - // int input_index = 0; - std::vector vec_param_list; - if (is_infer_meta) { - vec_param_list = runtime_info.infer_meta_param; - } else { - vec_param_list = runtime_info.kernel_param; - } - for (auto& t : vec_param_list) { - if (input_index_map.count(t)) { - // get information from input - ir::Value ptr = op->operand(input_index_map[t]); - auto in_var_name = name_map.at(ptr); - - if (mutable_attr_type_map.count(t)) { - VLOG(6) << "ctx->EmplaceBack mutable attr: " << t << "\t" - << in_var_name; - if (mutable_attr_type_map[t] == "paddle::dialect::IntArrayAttribute") { - ctx->EmplaceBackAttr(phi::IntArray( - *(scope->Var(in_var_name)->GetMutable()))); - } else if (mutable_attr_type_map[t] == - "paddle::dialect::ScalarAttribute") { - ctx->EmplaceBackAttr(phi::Scalar( - *(scope->Var(in_var_name)->GetMutable()))); - } else { - PADDLE_THROW(phi::errors::Unimplemented("attr type not support [%s] ", - mutable_attr_type_map[t])); - } - - } else { - VLOG(6) << "ctx->EmplaceBackInput: " << t << "\t" << in_var_name; - ctx->EmplaceBackInput( - scope->Var(in_var_name)->GetMutable()); - } - } - - if (attr_type_map.count(t)) { - auto type_name = attr_type_map[t]; - if (type_name == "paddle::dialect::IntArrayAttribute") { - ctx->EmplaceBackAttr( - attr_map[t].dyn_cast().data()); - } else if (type_name == "paddle::dialect::DataTypeAttribute") { - ctx->EmplaceBackAttr( - attr_map[t].dyn_cast().data()); - } else if (type_name == "ir::Int32Attribute") { - ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); - } else if (type_name == "paddle::dialect::PlaceAttribute") { - ctx->EmplaceBackAttr( - attr_map[t].dyn_cast().data()); - } else if (type_name == "paddle::dialect::ScalarAttribute") { - ctx->EmplaceBackAttr( - attr_map[t].dyn_cast().data()); - } else { - PADDLE_THROW(phi::errors::Unimplemented("attr type not support [%s] ", - type_name)); - } - VLOG(6) << "ctx->EmplaceBackAttr: " << t; - } - } - - ir::Value out_ptr = op->result(0); - auto name = name_map.at(out_ptr); - - ctx->EmplaceBackOutput(scope->Var(name)->GetMutable()); -} - -class PhiKernelAdaptor { - public: - explicit PhiKernelAdaptor(paddle::framework::Scope* scope) : scope_(scope) {} - - void run(ir::Program* program) { - auto block = program->block(); - std::unordered_map name_map; - BuildScope(block, scope_, &name_map); - - auto* dev_ctx = phi::DeviceContextPool::Instance().Get(phi::CPUPlace()); - phi::Place cpu_place(phi::AllocationType::CPU); - for (auto it = block->begin(); it != block->end(); ++it) { - VLOG(6) << "begin to run op " << (*it)->name(); - - auto attr_map = (*it)->attributes(); - - paddle::dialect::OpYamlInfoInterface op_info_interface = - (*it)->dyn_cast(); - auto op_info_res = op_info_interface.GetOpInfo(); - - InferShapeInterface interface = (*it)->dyn_cast(); - phi::InferMetaContext ctx; - - build_context( - (*it), name_map, scope_, op_info_res, &ctx); - - interface.InferShape(&ctx); - - auto runtime_info = std::get<3>(op_info_res); - - auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap( - runtime_info.kernel_func[0]); - - phi::KernelKey kernel_key(phi::TransToPhiBackend(cpu_place), - phi::DataLayout::ANY, - phi::DataType::FLOAT32); - if (runtime_info.kernel_func[0] == "full_int_array") { - kernel_key.set_dtype(phi::DataType::INT64); - } - auto found_it = phi_kernels.find(kernel_key); - if (found_it == phi_kernels.end()) { - std::cerr << "kernel name " << runtime_info.kernel_func[0] << std::endl; - std::cerr << "kernel key " << kernel_key.backend() << "\t" - << kernel_key.dtype() << "\t" << kernel_key.layout() - << std::endl; - PADDLE_THROW(paddle::platform::errors::NotFound( - "can not found kerenl for [%s]", (*it)->name())); - } else { - phi::KernelContext kernel_ctx(dev_ctx); - - build_context( - (*it), name_map, scope_, op_info_res, &kernel_ctx, false); - found_it->second(&kernel_ctx); - - auto out_value = (*it)->result(0); - out_name = name_map[out_value]; - } - } - } - - void run_kernel_prog(ir::Program* program) { - auto block = program->block(); - std::unordered_map name_map; - BuildScope(block, scope_, &name_map); - ir::IrContext* ctx = ir::IrContext::Instance(); - - ctx->GetOrRegisterDialect(); - - auto* dev_ctx = phi::DeviceContextPool::Instance().Get(phi::CPUPlace()); - phi::Place cpu_place(phi::AllocationType::CPU); - for (auto it = block->begin(); it != block->end(); ++it) { - auto attr_map = (*it)->attributes(); - - auto op_name = attr_map.at("op_name").dyn_cast().data(); - - ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op_name); - - auto impl = - op1_info.GetInterfaceImpl(); - auto yaml_info = impl->get_op_info_(); - - auto attr_info = std::get<1>(yaml_info); - - auto infer_shape_impl = op1_info.GetInterfaceImpl(); - - phi::InferMetaContext ctx; - - build_context( - (*it), name_map, scope_, yaml_info, &ctx); - - infer_shape_impl->infer_shape_(&ctx); - - auto kernel_name = - attr_map.at("kernel_name").dyn_cast().data(); - auto kernel_key = attr_map.at("kernel_key") - .dyn_cast() - .data(); - - auto kernel_fn = - phi::KernelFactory::Instance().SelectKernel(kernel_name, kernel_key); - - phi::KernelContext kernel_ctx(dev_ctx); - - build_context( - (*it), name_map, scope_, yaml_info, &kernel_ctx, false); - kernel_fn(&kernel_ctx); - - auto out_value = (*it)->result(0); - out_name = name_map[out_value]; - } - } - - std::string out_name; - - private: - paddle::framework::Scope* scope_; -};