diff --git a/paddle/infrt/dialect/infrt/common_type.cc b/paddle/infrt/dialect/infrt/common_type.cc index 5cbd7b2cd6153f3724bc357811bdb0894eeb64ba..00684c505268c09e97d262a3526c946d1bc3095c 100644 --- a/paddle/infrt/dialect/infrt/common_type.cc +++ b/paddle/infrt/dialect/infrt/common_type.cc @@ -43,46 +43,49 @@ llvm::Optional GetPrecisionType(llvm::StringRef key) { return llvm::None; } -llvm::raw_ostream &operator<<(llvm::raw_ostream &os, TargetType type) { +llvm::StringRef GetString(TargetType type) { + llvm::StringRef str; switch (type) { case (TargetType::CPU): - os << "CPU"; + str = "CPU"; break; case (TargetType::GPU): - os << "GPU"; + str = "GPU"; break; default: - os << "Unsupported"; + str = "Unsupported"; } - return os; + return str; } -llvm::raw_ostream &operator<<(llvm::raw_ostream &os, LayoutType type) { +llvm::StringRef GetString(LayoutType type) { + llvm::StringRef str; switch (type) { case (LayoutType::NCHW): - os << "NCHW"; + str = "NCHW"; break; case (LayoutType::NHWC): - os << "NHWC"; + str = "NHWC"; break; default: - os << "Unsupported"; + str = "Unsupported"; } - return os; + return str; } -llvm::raw_ostream &operator<<(llvm::raw_ostream &os, PrecisionType type) { +llvm::StringRef GetString(PrecisionType type) { + llvm::StringRef str; switch (type) { case (PrecisionType::FLOAT32): - os << "FP32"; + str = "FP32"; break; case (PrecisionType::FLOAT16): - os << "FP16"; + str = "FP16"; break; default: - os << "Unsupported"; + str = "Unsupported"; } - return os; + return str; } } // namespace infrt diff --git a/paddle/infrt/dialect/infrt/common_type.h b/paddle/infrt/dialect/infrt/common_type.h index 436e7920ca5c66d944fa766f52656339499912c0..2ebe2b8ccdba6943d81e46ec747144fd0835d7e0 100644 --- a/paddle/infrt/dialect/infrt/common_type.h +++ b/paddle/infrt/dialect/infrt/common_type.h @@ -54,8 +54,13 @@ llvm::Optional GetTargetType(llvm::StringRef key); llvm::Optional GetLayoutType(llvm::StringRef key); llvm::Optional GetPrecisionType(llvm::StringRef key); -llvm::raw_ostream &operator<<(llvm::raw_ostream &os, TargetType type); -llvm::raw_ostream &operator<<(llvm::raw_ostream &os, LayoutType type); -llvm::raw_ostream &operator<<(llvm::raw_ostream &os, PrecisionType type); +llvm::StringRef GetString(TargetType type); +llvm::StringRef GetString(LayoutType type); +llvm::StringRef GetString(PrecisionType type); +template +llvm::raw_ostream &operator<<(llvm::raw_ostream &os, T type) { + os << GetString(type); + return os; +} } // end namespace infrt diff --git a/paddle/infrt/dialect/infrt/infrt_dialect.cc b/paddle/infrt/dialect/infrt/infrt_dialect.cc index abb60016f90233cae68dc99e95885042517e9212..400e4921c944491e0ce8cded38fec9435f4ad0bd 100644 --- a/paddle/infrt/dialect/infrt/infrt_dialect.cc +++ b/paddle/infrt/dialect/infrt/infrt_dialect.cc @@ -14,6 +14,7 @@ #include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include #include #include #include diff --git a/paddle/infrt/dialect/infrt/infrt_ops_base.td b/paddle/infrt/dialect/infrt/infrt_ops_base.td index f19912dc0cd59674b4b45448266b1ff4bed530b5..8a6eb766567dccd9aa0fa083aa0c3e9e9eb5984d 100644 --- a/paddle/infrt/dialect/infrt/infrt_ops_base.td +++ b/paddle/infrt/dialect/infrt/infrt_ops_base.td @@ -10,16 +10,59 @@ def Infrt_Dialect : Dialect { let name = "infrt"; let cppNamespace = "::infrt"; + let useDefaultAttributePrinterParser = 1; } // Type definitions - // Base class for Infrt dialect types. class Infrt_Type traits = [], string baseCppClass = "::mlir::Type"> : TypeDef { } +class Infrt_EnumParam : TypeParameter { + let parser = [{[&]() -> ::mlir::FailureOr<}] # cppEnumType # [{> { + ::llvm::StringRef enumKeyword; + if (::mlir::failed($_parser.parseKeyword(&enumKeyword))) + return ::mlir::failure(); + auto maybeEnum = }] # stringToSymbolFnName # [{(enumKeyword); + if (maybeEnum) + return *maybeEnum; + llvm_unreachable("}] # cppEnumType # [{ can not be found."); + return {}; + }()}]; + let printer = "$_printer << " # symbolToStringFnName # "($_self)"; +} + +def TargetParam : Infrt_EnumParam<"::infrt::TargetType", "GetTargetType", "GetString">; +def PrecisionParam : Infrt_EnumParam<"::infrt::PrecisionType", "GetPrecisionType", "GetString">; +def LayoutParam : Infrt_EnumParam<"::infrt::LayoutType", "GetLayoutType", "GetString">; + +def TargetAttr : AttrDef { + let mnemonic = "target"; + let parameters = (ins + TargetParam:$target + ); + let assemblyFormat = "`<` $target `>`"; +} + +def PrecisionAttr : AttrDef { + let mnemonic = "precision"; + let parameters = (ins + PrecisionParam:$precision + ); + let assemblyFormat = "`<` $precision `>`"; +} + +def LayoutAttr : AttrDef { + let mnemonic = "layout"; + let parameters = (ins + LayoutParam:$layout + ); + let assemblyFormat = "`<` $layout `>`"; +} + def LoDTensor : Infrt_Type<"LoDTensor"> { let summary = "infrt lod tensor"; let description = [{lod_tensor<3x64x3x3xf32, 3>}]; @@ -37,7 +80,6 @@ def DenseTensor : Infrt_Type<"DenseTensor"> { "::infrt::TargetType":$target, "::infrt::PrecisionType":$precision, "::infrt::LayoutType":$layout - ); } diff --git a/paddle/infrt/dialect/phi/ir/infrt_phi_base.td b/paddle/infrt/dialect/phi/ir/infrt_phi_base.td index 907f912d9e638ba76e5010d5442381d1aa053bc2..e9591e7f6d7e7d3bffdbef3d1fa3b81e53d9fc57 100644 --- a/paddle/infrt/dialect/phi/ir/infrt_phi_base.td +++ b/paddle/infrt/dialect/phi/ir/infrt_phi_base.td @@ -2,6 +2,7 @@ #define PHI_BASE include "mlir/IR/OpBase.td" +include "paddle/infrt/dialect/infrt_base.td" def PHI_Dialect : Dialect { let name = "phi"; @@ -11,27 +12,28 @@ def PHI_Dialect : Dialect { }]; let cppNamespace = "::infrt::phi"; -} - -class AllocatorTypeOf traits=[]>: - TypeDef { - let summary = !strconcat("!phi.allocator_", place, " type"); -} - -class ContextTypeOf traits=[]>: - TypeDef { - let summary = !strconcat("!phi.context_", place, " type"); + let useDefaultTypePrinterParser = 1; } def PhiOpTrait : NativeOpTrait<"PhiOpTrait">; -def CPU_Allocator : AllocatorTypeOf<"CPU">; -def GPU_Allocator : AllocatorTypeOf<"GPU">; - -def CPU_Context : ContextTypeOf<"CPU">; -def GPU_Context : ContextTypeOf<"GPU">; - -def Allocator : AnyTypeOf<[CPU_Allocator, GPU_Allocator], "Allocator type">; -def Context : AnyTypeOf<[CPU_Context, GPU_Context], "Context type">; +class PHI_Type traits = []> + : TypeDef {} + +def Allocator : PHI_Type<"Allocator"> { + let mnemonic = "allocator"; + let parameters = (ins + TargetParam:$target + ); + let assemblyFormat = "`<` $target `>`"; + } + + def Context : PHI_Type<"Context"> { + let mnemonic = "context"; + let parameters = (ins + TargetParam:$target + ); + let assemblyFormat = "`<` $target `>`"; + } #endif diff --git a/paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td b/paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td index 39677871ff8fedcb9268066d8b10b8cf823ed5f9..3399c408d9b5a0ff207c6947d2d3b3b4d8ca0a96 100644 --- a/paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td +++ b/paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td @@ -23,7 +23,7 @@ class PDT_Op traits = []> : Op : PDT_Op<"create_dense_tensor." # place # "." # dtype # "." # layout, [NoSideEffect]> { - let arguments = (ins CPU_Allocator:$allocator, I64ArrayAttr:$dims, I64ArrayAttr:$lod); + let arguments = (ins Allocator:$allocator, I64ArrayAttr:$dims, I64ArrayAttr:$lod); let results = (outs DenseTensor:$output); } @@ -47,13 +47,13 @@ class PrintDenseTensorOp: class CreateCPUAllocatorOp : PDT_Op<"create_allocator." # "cpu", [NoSideEffect]> { let arguments = (ins); - let results = (outs CPU_Allocator:$output); + let results = (outs Allocator:$output); } class CreateCPUContextOp : PDT_Op<"create_context." # "cpu", [NoSideEffect]> { - let arguments = (ins CPU_Allocator:$input); - let results = (outs CPU_Context:$output); + let arguments = (ins Allocator:$input); + let results = (outs Context:$output); } def PDT_CreateDenseTensorOp_cpu_f32_nchw : CreateDenseTensorOp<"cpu", "f32", "nchw">; @@ -63,7 +63,7 @@ def PDT_CreateContextOp_cpu : CreateCPUContextOp; def PDT_PrintDenseTensor_cpu : PrintDenseTensorOp; def FakeKernelOp : PDT_Op<"fake_phi_kernel"> { - let arguments = (ins CPU_Context:$dev_ctx, DenseTensor:$x, DenseTensor:$y, BoolAttr:$transpose_x, BoolAttr:$transpose_y); + let arguments = (ins Context:$dev_ctx, DenseTensor:$x, DenseTensor:$y, BoolAttr:$transpose_x, BoolAttr:$transpose_y); let results = (outs DenseTensor:$output); } diff --git a/paddle/infrt/dialect/phi/ir/phi_base.cc b/paddle/infrt/dialect/phi/ir/phi_base.cc index 7a6b3f3f0a404043f49a6df3e5bdcb873dd442c9..d8095d7f3f13fcfbf9b2ccab6db182850633d632 100644 --- a/paddle/infrt/dialect/phi/ir/phi_base.cc +++ b/paddle/infrt/dialect/phi/ir/phi_base.cc @@ -14,6 +14,7 @@ #include "paddle/infrt/dialect/phi/ir/phi_base.h" +#include #include #include #include @@ -27,27 +28,6 @@ namespace infrt { namespace phi { -void PHIDialect::printType(::mlir::Type type, - mlir::DialectAsmPrinter& os) const { - if (type.isa()) { - os << "CPU_Allocator"; - return; - } - if (type.isa()) { - os << "GPU_Allocator"; - return; - } - if (type.isa()) { - os << "CPU_Context"; - return; - } - if (type.isa()) { - os << "GPU_Context"; - return; - } - llvm_unreachable("unexpected 'allocator/context' type kind"); -} - void PHIDialect::initialize() { addOperations< #define GET_OP_LIST @@ -59,24 +39,6 @@ void PHIDialect::initialize() { >(); } -mlir::Type PHIDialect::parseType(mlir::DialectAsmParser& parser) const { - llvm::StringRef keyword; - if (parser.parseKeyword(&keyword)) return mlir::Type(); - if (keyword == "CPU_allocator") { - return CPUAllocatorType::get(parser.getContext()); - } else if (keyword == "GPU_allocator") { - return GPUAllocatorType::get(parser.getContext()); - } else if (keyword == "CPU_context") { - return CPUContextType::get(parser.getContext()); - } else if (keyword == "GPU_context") { - return GPUContextType::get(parser.getContext()); - } else { - llvm_unreachable("unexpected 'allocator/context' type kind"); - } - - return mlir::Type(); -} - } // namespace phi } // namespace infrt diff --git a/paddle/infrt/dialect/phi/ir/phi_base.h b/paddle/infrt/dialect/phi/ir/phi_base.h index a08d8229fccf53225311b451e941f99e8a3d0e8a..0ea1973a7331b8a34bf2a286cb55e19a4d09118b 100644 --- a/paddle/infrt/dialect/phi/ir/phi_base.h +++ b/paddle/infrt/dialect/phi/ir/phi_base.h @@ -18,12 +18,10 @@ #include #include +#include "paddle/infrt/dialect/infrt/common_type.h" #include "paddle/infrt/dialect/phi/ir/infrt_phi_baseDialect.h.inc" -#define GET_TYPEDEF_CLASSES -#include "paddle/infrt/dialect/phi/ir/infrt_phi_baseTypes.h.inc" - #define GET_OP_CLASSES #include "paddle/infrt/dialect/phi/ir/infrt_phi_base.h.inc" @@ -41,6 +39,9 @@ class PhiOpTrait : public OpTrait::TraitBase { } // namespace OpTrait } // namespace mlir +#define GET_TYPEDEF_CLASSES +#include "paddle/infrt/dialect/phi/ir/infrt_phi_baseTypes.h.inc" + namespace infrt { namespace phi {} // namespace phi } // namespace infrt diff --git a/paddle/infrt/host_context/kernel_frame.cc b/paddle/infrt/host_context/kernel_frame.cc index 14e88be4b96bb58df87db3191db8bae444c4cc3d..266c145f47839afb31d708d0863e2e90905253ee 100644 --- a/paddle/infrt/host_context/kernel_frame.cc +++ b/paddle/infrt/host_context/kernel_frame.cc @@ -30,28 +30,21 @@ std::ostream& operator<<(std::ostream& os, const KernelFrame& frame) { std::string KernelFrame::DumpArgTypes() const { std::stringstream ss; for (auto* value : GetValues(0, GetNumElements())) { - if (value->is_type()) { - ss << "bool (" << &value->get() << "), "; - } else if (value->is_type()) { - ss << "DenseHostTensor(" << &value->get() - << "), "; - } else if (value->is_type()) { - ss << "float(" << &value->get() << "), "; - } else if (value->is_type()) { - ss << "int(" << &value->get() << "), "; - } else if (value->is_type()) { - ss << "phi::DenseTensor(" << &value->get() << "), "; - } else if (value->is_type()) { - ss << "phi::MetaTensor(" << &value->get() << "), "; - } else if (value->is_type<::phi::CPUContext>()) { - ss << "phi::CPUContext(" << &value->get<::phi::CPUContext>() << "), "; - } else if (value->is_type()) { - ss << "none(" << &value->get() << "), "; - } else if (value->is_type()) { - ss << "CpuPhiContext(" << &value->get() << "), "; - } else { - ss << "typeid: " << value->index() << ", "; - } +#define DUMP(type_name) \ + if (value->is_type()) { \ + ss << #type_name << &value->get() << "), "; \ + } + DUMP(bool); + DUMP(tensor::DenseHostTensor); + DUMP(float); + DUMP(int); + DUMP(::phi::DenseTensor); + DUMP(::phi::MetaTensor); + DUMP(::phi::CPUContext); + DUMP(host_context::None); + DUMP(backends::CpuPhiContext); +#undef DUMP + ss << "typeid: " << value->index() << ", "; } return ss.str(); } diff --git a/paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h b/paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h index a0a5b391ea669b1358b14098e32750d709e52fe2..75c9e554778dcf1488289c6e9e46fb9652f677dd 100644 --- a/paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h +++ b/paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h @@ -24,26 +24,6 @@ namespace infrt { namespace kernel { -static void FakePhiInferShape(const ::phi::MetaTensor& a, - const ::phi::MetaTensor& b, - bool arg_0, - bool arg_1, - ::phi::MetaTensor* c) { - LOG(INFO) << "the ptr of c: " << c; - LOG(INFO) << "c->numel(): " << c->numel(); -} - -static void FakePhiKernel(const ::phi::CPUContext& /*Context*/, - const ::phi::DenseTensor& a, - const ::phi::DenseTensor& b, - bool arg_0, - bool arg_1, - ::phi::DenseTensor* c) { - std::cout << "@FakePhiKernel@" << std::endl; - LOG(INFO) << "the ptr of c: " << c; - LOG(INFO) << "c->numel(): " << c->numel(); -} - template AddKernel("phi_dt.print_tensor", INFRT_KERNEL(infrt::kernel::phi::PrintDenseTensor)); - registry->AddKernel( - "phi_dt.fake_phi_kernel", - std::bind(&KernelLauncherFunc, - KernelLauncher(), - std::placeholders::_1)); } } // namespace kernel diff --git a/paddle/infrt/tests/dialect/pten/dense_tensor.mlir b/paddle/infrt/tests/dialect/pten/dense_tensor.mlir index 695143c93b3cf13839429cc8d48fa9f7d255c033..586af7a9c50c2dc84583f6d4fc5375708ab370f1 100644 --- a/paddle/infrt/tests/dialect/pten/dense_tensor.mlir +++ b/paddle/infrt/tests/dialect/pten/dense_tensor.mlir @@ -2,11 +2,11 @@ // CHECK-LABEL: @sign_any_float32_execute func @sign_any_float32_execute() { - %allocator = "phi_dt.create_allocator.cpu" (): () -> !phi.CPU_allocator - %ctx = "phi_dt.create_context.cpu" (%allocator): (!phi.CPU_allocator) -> !phi.CPU_context - %t = "phi_dt.create_dense_tensor.cpu.f32.nchw" (%allocator) {dims=[1:i64], lod=[1:i64]}: (!phi.CPU_allocator) -> (!infrt.dense_tensor) + %allocator = "phi_dt.create_allocator.cpu" (): () -> !phi.allocator + %ctx = "phi_dt.create_context.cpu" (%allocator): (!phi.allocator) -> !phi.context + %t = "phi_dt.create_dense_tensor.cpu.f32.nchw" (%allocator) {dims=[1:i64], lod=[1:i64]}: (!phi.allocator) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor) -> () - %e = "phi_cpu.sign.any.float32"(%ctx, %t) : (!phi.CPU_context, !infrt.dense_tensor) -> (!infrt.dense_tensor) + %e = "phi_cpu.sign.any.float32"(%ctx, %t) : (!phi.context, !infrt.dense_tensor) -> (!infrt.dense_tensor) // CHECK: dense_tensor: shape=shape[1], values=[1] "phi_dt.print_tensor" (%e) : (!infrt.dense_tensor) -> () diff --git a/tools/infrt/generate_phi_kernel_dialect.py b/tools/infrt/generate_phi_kernel_dialect.py index 80cf3958b156d79b4b1253d198042c63458663b7..8efa03306fb1de4a4a39256c881d87479a8ac25a 100644 --- a/tools/infrt/generate_phi_kernel_dialect.py +++ b/tools/infrt/generate_phi_kernel_dialect.py @@ -95,7 +95,7 @@ def generate_inputs_info(input_info): def generate_arguments_info(op_name, input_info, attr_info): input_args = generate_inputs_info(input_info) attr_args = generate_attrs_info(op_name, attr_info) - context_args = "CPU_Context:$dev_ctx" + context_args = "Context:$dev_ctx" argument_ = "{},{},{}".format(context_args, input_args, attr_args) return (("let arguments = (ins {});".format(argument_.strip(","))))