diff --git a/paddle/infrt/dialect/infrt/common/types.cc b/paddle/infrt/dialect/infrt/common/types.cc index 62419a196288bb052a9f240ecc25f34c102a5b35..c10679b01342f03b35e816bf290f71790f541ee2 100644 --- a/paddle/infrt/dialect/infrt/common/types.cc +++ b/paddle/infrt/dialect/infrt/common/types.cc @@ -30,6 +30,8 @@ llvm::Optional GetLayoutType(llvm::StringRef key) { return LayoutType::NCHW; else if (key.equals_insensitive("NHWC")) return LayoutType::NHWC; + else if (key.equals_insensitive("ANY")) + return LayoutType::ANY; else return llvm::None; } @@ -39,6 +41,8 @@ llvm::Optional GetPrecisionType(llvm::StringRef key) { return PrecisionType::FLOAT32; else if (key.equals_insensitive("FP16")) return PrecisionType::FLOAT16; + else if (key.equals_insensitive("UNK")) + return PrecisionType::UNK; else return llvm::None; } @@ -67,6 +71,9 @@ llvm::StringRef GetString(LayoutType type) { case (LayoutType::NHWC): str = "NHWC"; break; + case (LayoutType::ANY): + str = "ANY"; + break; default: str = "Unsupported"; } @@ -82,6 +89,9 @@ llvm::StringRef GetString(PrecisionType type) { case (PrecisionType::FLOAT16): str = "FP16"; break; + case (PrecisionType::UNK): + str = "UNK"; + break; default: str = "Unsupported"; } diff --git a/paddle/infrt/host_context/paddle_mlir.cc b/paddle/infrt/host_context/paddle_mlir.cc index 4e7de9e2df10d3efccd29f13d758183d8a325edb..29328520212fd4d020afc28c1e48d2db604414bc 100644 --- a/paddle/infrt/host_context/paddle_mlir.cc +++ b/paddle/infrt/host_context/paddle_mlir.cc @@ -91,11 +91,15 @@ llvm::SmallVector MLIRModelGenImpl::GetModelInputsType( if (var_desc.name() == input_var_name) { std::vector dims = RepeatedToVector( var_desc.type().lod_tensor().tensor().dims()); - mlir::Type precision_; - ConvertDataType(var_desc.type().lod_tensor().tensor().data_type(), - builder_, - &precision_); - mlir::Type type_ = mlir::RankedTensorType::get(dims, precision_); + infrt::PrecisionType precision_; + ConvertDataTypeToPhi( + var_desc.type().lod_tensor().tensor().data_type(), &precision_); + mlir::Type type_ = + infrt::DenseTensorType::get(context_, + infrt::TargetType::CPU, + precision_, + infrt::LayoutType::ANY); + operandTypes.push_back(type_); } } @@ -117,11 +121,14 @@ llvm::SmallVector MLIRModelGenImpl::GetModelOutputsType( if (var_desc.name() == input_var_name) { std::vector dims = RepeatedToVector( var_desc.type().lod_tensor().tensor().dims()); - mlir::Type precision_; - ConvertDataType(var_desc.type().lod_tensor().tensor().data_type(), - builder_, - &precision_); - mlir::Type type_ = mlir::RankedTensorType::get(dims, precision_); + infrt::PrecisionType precision_; + ConvertDataTypeToPhi( + var_desc.type().lod_tensor().tensor().data_type(), &precision_); + mlir::Type type_ = + infrt::DenseTensorType::get(context_, + infrt::TargetType::CPU, + precision_, + infrt::LayoutType::ANY); resultTypes.push_back(type_); } } @@ -168,15 +175,11 @@ void MLIRModelGenImpl::UpdateModelParams( auto name = builder_.getStringAttr(var_desc.name()); std::vector dims = RepeatedToVector( var_desc.type().lod_tensor().tensor().dims()); - mlir::Type precision_; - ConvertDataType(var_desc.type().lod_tensor().tensor().data_type(), - builder_, - &precision_); - mlir::Type type_ = - infrt::DenseTensorType::get(context_, - infrt::TargetType::CPU, - infrt::PrecisionType::FLOAT32, - infrt::LayoutType::NCHW); + infrt::PrecisionType precision_; + ConvertDataTypeToPhi(var_desc.type().lod_tensor().tensor().data_type(), + &precision_); + mlir::Type type_ = infrt::DenseTensorType::get( + context_, infrt::TargetType::CPU, precision_, infrt::LayoutType::ANY); auto op = builder_.create( mlir::UnknownLoc::get(context_), type_, map, name); params_map_.insert(std::pair( @@ -262,11 +265,13 @@ llvm::SmallVector MLIRModelGenImpl::GetOpOutputType( if (var_desc.name() == var_name) { std::vector dims = RepeatedToVector( var_desc.type().lod_tensor().tensor().dims()); - mlir::Type precision_; - ConvertDataType(var_desc.type().lod_tensor().tensor().data_type(), - builder_, - &precision_); - mlir::Type type_ = mlir::RankedTensorType::get(dims, precision_); + infrt::PrecisionType precision_; + ConvertDataTypeToPhi(var_desc.type().lod_tensor().tensor().data_type(), + &precision_); + mlir::Type type_ = infrt::DenseTensorType::get(context_, + infrt::TargetType::CPU, + precision_, + infrt::LayoutType::ANY); resultTypes.push_back(type_); } } @@ -403,3 +408,38 @@ bool ConvertDataType(infrt::paddle::framework_proto::VarType::Type dtype, return false; } } + +bool ConvertDataTypeToPhi(infrt::paddle::framework_proto::VarType::Type dtype, + infrt::PrecisionType *type) { + switch (dtype) { + case infrt::paddle::framework_proto::VarType::Type::VarType_Type_FP16: + *type = infrt::PrecisionType::FLOAT16; + return true; + case infrt::paddle::framework_proto::VarType::Type::VarType_Type_FP32: + *type = infrt::PrecisionType::FLOAT32; + return true; + case infrt::paddle::framework_proto::VarType::Type::VarType_Type_FP64: + *type = infrt::PrecisionType::FLOAT64; + return true; + case infrt::paddle::framework_proto::VarType::Type::VarType_Type_BOOL: + *type = infrt::PrecisionType::BOOL; + return true; + case infrt::paddle::framework_proto::VarType::Type::VarType_Type_INT8: + *type = infrt::PrecisionType::INT8; + return true; + case infrt::paddle::framework_proto::VarType::Type::VarType_Type_INT16: + *type = infrt::PrecisionType::INT16; + return true; + case infrt::paddle::framework_proto::VarType::Type::VarType_Type_INT32: + *type = infrt::PrecisionType::INT32; + return true; + case infrt::paddle::framework_proto::VarType::Type::VarType_Type_INT64: + *type = infrt::PrecisionType::INT64; + return true; + case infrt::paddle::framework_proto::VarType::Type::VarType_Type_UINT8: + *type = infrt::PrecisionType::UINT8; + return true; + default: + return false; + } +} diff --git a/paddle/infrt/host_context/paddle_mlir.h b/paddle/infrt/host_context/paddle_mlir.h index d5f1209b9925b6f2bb916cdd99024a5782485365..a351b5cf80e2356a6481ccd302a544dcfe595e05 100644 --- a/paddle/infrt/host_context/paddle_mlir.h +++ b/paddle/infrt/host_context/paddle_mlir.h @@ -102,4 +102,7 @@ inline std::vector RepeatedToVector( bool ConvertDataType(infrt::paddle::framework_proto::VarType::Type dtype, mlir::Builder builder, mlir::Type *type); +bool ConvertDataTypeToPhi(infrt::paddle::framework_proto::VarType::Type dtype, + infrt::PrecisionType *type); + #endif // PADDLE_INFRT_HOST_CONTEXT_PADDLE_MLIR_H_ diff --git a/paddle/scripts/infrt_build.sh b/paddle/scripts/infrt_build.sh index 850d4015abf7a8164add9d4896d5a9bdfa26989d..1b259023f94df7279066533bb6c182a644b4e9c2 100755 --- a/paddle/scripts/infrt_build.sh +++ b/paddle/scripts/infrt_build.sh @@ -45,6 +45,7 @@ function update_pd_ops() { python3 generate_pd_op_dialect_from_paddle_op_maker.py python3 generate_phi_kernel_dialect.py # generate test model + cd ${PADDLE_ROOT} python3 paddle/infrt/tests/model/abs_model.py ${PADDLE_ROOT}/build/paddle/infrt/tests/abs } diff --git a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py index 8855e1eee38717a6cffc14e9c1762af36e94fa84..b0e420da64aa280b71859b27334a2abeaaacc53b 100644 --- a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py +++ b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py @@ -191,6 +191,21 @@ def generate_all_ops_inputs_outputs_map(op_descs): ops_inputs_outputs_head_file.write(cpp_style_ops_outputs_map_str) +def get_constraint(op_type, op_proto): + # 2.3.1 inputs + constraint = "NoSideEffect" + + optional_input_num_ = 0 + for input_ in op_proto[INPUTS]: + if op_proto[INPUTS][input_][EXTRA] != True and op_proto[INPUTS][input_][ + INTERMEDIATE] != True and op_proto[INPUTS][input_][ + DISPENSABLE] == True: + optional_input_num_ += 1 + if optional_input_num_ > 1: + constraint += ", AttrSizedOperandSegments" + return constraint + + # funtion to generate paddle op dialect file def convert_op_proto_into_mlir(op_descs): dst_dialect_file = "../../paddle/infrt/dialect/pd/ir/pd_ops.td" @@ -237,9 +252,11 @@ def convert_op_proto_into_mlir(op_descs): if (op_type in skipped_op_list) or (op_type not in original_ops_): continue automatically_generated_op_dialect.append(op_type) + constraint_ = get_constraint(op_type, op_proto) # 2.1 OpDef - HEAD = 'def PD_{op_type_capitalize}Op : PD_Op<"{op_type}", [NoSideEffect]> {left_brace}\n'.format( + HEAD = 'def PD_{op_type_capitalize}Op : PD_Op<"{op_type}", [{constraint}]> {left_brace}\n'.format( op_type_capitalize=op_type.capitalize(), + constraint=constraint_, op_type=op_type, left_brace="{") SUMMARY = ' let summary = "{} op";\n'.format(op_type) @@ -256,14 +273,22 @@ def convert_op_proto_into_mlir(op_descs): ARGUMENTS = "" if (len(op_proto[INPUTS]) > 0 or len(op_proto[ATTRS]) > 0): ARGUMENTS = " let arguments = (ins " + # 2.3.1 inputs for input_ in op_proto[INPUTS]: if op_proto[INPUTS][input_][EXTRA] != True and op_proto[INPUTS][ input_][INTERMEDIATE] != True: - if op_proto[INPUTS][input_][DUPLICABLE] != "true": - ARGUMENTS = ARGUMENTS + " PD_Tensor:$" + input_ + "," + if op_proto[INPUTS][input_][DISPENSABLE] != True: + if op_proto[INPUTS][input_][DUPLICABLE] != True: + ARGUMENTS = ARGUMENTS + " PD_Tensor:$" + input_ + "," + else: + ARGUMENTS = ARGUMENTS + " PD_Tensor_Array:$" + input_ + "," else: - ARGUMENTS = ARGUMENTS + " PD_Tensor_Array:$" + input_ + "," + if op_proto[INPUTS][input_][DUPLICABLE] != True: + ARGUMENTS = ARGUMENTS + " Optional:$" + input_ + "," + else: + ARGUMENTS = ARGUMENTS + " Optional:$" + input_ + "," + # unsupported: BLOCK = 8; BLOCKS = 10; attr_mlir_converter = { 0: 'SI32Attr', @@ -332,7 +357,7 @@ def convert_op_proto_into_mlir(op_descs): for output_ in op_proto[OUTPUTS]: if op_proto[OUTPUTS][output_][EXTRA] != True and op_proto[ OUTPUTS][output_][INTERMEDIATE] != True: - if op_proto[OUTPUTS][output_][DUPLICABLE] != "true": + if op_proto[OUTPUTS][output_][DUPLICABLE] != True: outputs = outputs + "PD_Tensor:${},".format(output_) else: outputs = outputs + "PD_Tensor_Array:${},".format( diff --git a/tools/infrt/generate_phi_kernel_dialect.py b/tools/infrt/generate_phi_kernel_dialect.py index 36561d4e71da8b669f1e06b0240a4d4b3b2ca92e..f632c9a9dba504d209946e494e55eb970e727629 100644 --- a/tools/infrt/generate_phi_kernel_dialect.py +++ b/tools/infrt/generate_phi_kernel_dialect.py @@ -43,7 +43,8 @@ precision_type_converter = { "float64": "FLOAT64", "complex64": "COMPLEX64", "complex128": "COMPLEX128", - "bool": "BOOL" + "bool": "BOOL", + "Undefined": "UNK" } kernel_types_info_file = "./kernels.json"