From f37a23a774b9e8ba6f1cadd408168496626a82dc Mon Sep 17 00:00:00 2001 From: huzhiqiang <912790387@qq.com> Date: Wed, 19 Jan 2022 14:13:45 +0800 Subject: [PATCH] convert paddle op definations into pd dialect in infrt (#38708) --- paddle/infrt/dialect/pd_op_base.td | 1 + paddle/infrt/dialect/pd_ops.cc | 5 +- paddle/infrt/dialect/pd_ops.td | 212 -------------- paddle/infrt/dialect/rewrite.td | 4 +- paddle/scripts/infrt_build.sh | 51 +++- tools/infrt/custom_pdop.td | 57 ++++ ...rate_pd_op_dialect_from_paddle_op_maker.py | 266 ++++++++++++++++++ 7 files changed, 370 insertions(+), 226 deletions(-) delete mode 100644 paddle/infrt/dialect/pd_ops.td create mode 100644 tools/infrt/custom_pdop.td create mode 100644 tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py diff --git a/paddle/infrt/dialect/pd_op_base.td b/paddle/infrt/dialect/pd_op_base.td index a3e3c4ae59..40795eb9d2 100644 --- a/paddle/infrt/dialect/pd_op_base.td +++ b/paddle/infrt/dialect/pd_op_base.td @@ -73,5 +73,6 @@ def PD_ElementType : Type; +def PD_Tensor_Array : VectorOf<[PD_Tensor]>; #endif // PD_OP_BASE diff --git a/paddle/infrt/dialect/pd_ops.cc b/paddle/infrt/dialect/pd_ops.cc index fe38996883..6e32abbe2a 100644 --- a/paddle/infrt/dialect/pd_ops.cc +++ b/paddle/infrt/dialect/pd_ops.cc @@ -25,6 +25,7 @@ namespace mlir { namespace pd { + PaddleDialect::PaddleDialect(MLIRContext *context) : Dialect("pd", context, TypeID::get()) { addOperations< @@ -69,7 +70,7 @@ mlir::OpFoldResult ConstantOp::fold( ::llvm::ArrayRef operands) { return value(); } - +/* LogicalResult ElementwiseAdd::inferReturnTypes( MLIRContext *context, Optional location, @@ -165,7 +166,7 @@ void FusedRepeatedFCRelu::getCanonicalizationPatterns( void BatchNormOp::getCanonicalizationPatterns( mlir::OwningRewritePatternList &results, mlir::MLIRContext *context) { results.insert(context); -} +}*/ } // namespace pd } // namespace mlir diff --git a/paddle/infrt/dialect/pd_ops.td b/paddle/infrt/dialect/pd_ops.td deleted file mode 100644 index 3addf15082..0000000000 --- a/paddle/infrt/dialect/pd_ops.td +++ /dev/null @@ -1,212 +0,0 @@ -#ifndef PD_OPS -#define PD_OPS - -include "mlir/Interfaces/InferTypeOpInterface.td" -include "mlir/Interfaces/LoopLikeInterface.td" -include "mlir/IR/OpBase.td" -include "paddle/infrt/dialect/pd_op_base.td" - -def PD_FeedOp : PD_Op<"feed"> { - let summary = "Feed Op"; - - let description = [{ - Feed a tensor into the model. - }]; - - let arguments = (ins StrAttr:$name); - let results = (outs PD_Tensor:$out); - - let assemblyFormat = [{ - `(` `)` attr-dict `:` type($out) - }]; -} - -def PD_FetchOp : PD_Op<"fetch", [Terminator]> { - let summary = "fetch Op"; - - let description = [{ - Return the output tensor from the subgraph. - }]; - - let arguments = (ins PD_Tensor :$inputs, StrAttr:$name); -} - -def PD_ReturnOp : PD_Op<"return", [Terminator]> { - let summary = "return Op"; - - let description = [{ - Fetch tensor from the graph. - }]; - - let arguments = (ins Variadic:$inputs); -} - -def PD_GraphOp : PD_Op<"graph", [SingleBlockImplicitTerminator<"ReturnOp">]> { - let summary = "paddle graph Op"; - let description = [{ - Describe a paddle graph or subgraph. - }]; - let regions = (region SizedRegion<1>:$body); - let arguments = (ins Variadic:$inputs); - let results = (outs Variadic:$outputs); -} - -def PD_ConstantOp : PD_Op<"constant", [NoSideEffect, ConstantLike, DeclareOpInterfaceMethods, AllTypesMatch<["value", "output"]>]> { - let summary = "constant Op"; - let description = [{}]; - - let arguments = (ins ElementsAttr:$value); - let results = (outs PD_Tensor:$output); - let hasFolder = 1; - - let builders = [ - OpBuilder<(ins "Attribute":$value)>, - ]; -} - -def PD_AbsOp : PD_Op<"abs", [NoSideEffect, SameOperandsAndResultType]> { - let summary = "Computes the absolute value of a tensor"; - - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$x); - let results = (outs PD_Tensor:$y); -} - -def PD_SqrtOp : PD_Op<"sqrt", [NoSideEffect, SameOperandsAndResultType]> { - let summary = "Computes the sqrt value of a tensor"; - - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$x); - let results = (outs PD_Tensor:$y); -} - -def PD_ReluOp : PD_Op<"relu", [NoSideEffect, SameOperandsAndResultType]> { - let summary = "Computes the Relu of a tensor"; - - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$x); - let results = (outs PD_Tensor:$y); - let hasCanonicalizer = 1; -} - -def PD_Relu6Op : PD_Op<"relu6", [NoSideEffect, SameOperandsAndResultType]> { - let summary = "Computes the Relu6 of a tensor"; - - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$x); - let results = (outs PD_Tensor:$y); -} - -def PD_ElementwiseAdd : PD_Op<"elementwise_add", [NoSideEffect, Commutative, DeclareOpInterfaceMethods]> { - let summary = "ElementwiseAdd Op"; - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$x, PD_Tensor:$y, DefaultValuedAttr:$axis); - let results = (outs PD_Tensor:$out); - let hasCanonicalizer = 1; - let hasFolder = 1; -} - -def PD_ElementwiseSub : PD_Op<"elementwise_sub", [NoSideEffect, DeclareOpInterfaceMethods]> { - let summary = "ElementwiseSub Op"; - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$x, PD_Tensor:$y, DefaultValuedAttr:$axis); - let results = (outs PD_Tensor:$out); -} - -def PD_ElementwiseMul : PD_Op<"elementwise_mul", [NoSideEffect, Commutative, DeclareOpInterfaceMethods]> { - let summary = "ElementwiseMul Op"; - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$x, PD_Tensor:$y, DefaultValuedAttr:$axis); - let results = (outs PD_Tensor:$out); -} - -def PD_ElementwiseDiv : PD_Op<"elementwise_div", [NoSideEffect, DeclareOpInterfaceMethods]> { - let summary = "ElementwiseDiv Op"; - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$x, PD_Tensor:$y, DefaultValuedAttr:$axis); - let results = (outs PD_Tensor:$out); -} - -def PD_MatmulOp : PD_Op<"matmul", [NoSideEffect]> { - let summary = "Computes the matrix mulplication result of two tensors"; - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$x, PD_Tensor:$y, - DefaultValuedAttr:$transpose_x, - DefaultValuedAttr:$transpose_y, - DefaultValuedAttr:$alpha); - let results = (outs PD_Tensor:$out); - - //let hasCanonicalizer = 1; -} - -def PD_MulOp : PD_Op<"mul", [NoSideEffect, DeclareOpInterfaceMethods]> { - let summary = "paddle mul op"; - let description = [{}]; - - let arguments = (ins PD_Tensor:$x, PD_Tensor:$y); - let results = (outs PD_Tensor:$out); - - //let hasCanonicalizer = 1; -} - -def PD_Conv2dOp : PD_Op<"conv2d", [NoSideEffect]> { - let summary = "paddle conv2d operation"; - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$Input, PD_Tensor:$Filter, PD_Tensor:$Bias); - let results = (outs PD_Tensor:$Output); - - //let hasCanonicalizer = 1; -} - -def PD_BatchNormOp : PD_Op<"batch_norm", [NoSideEffect]> { - let summary = "paddle batch_norm operation"; - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$X, PD_Tensor:$Scale, PD_Tensor:$Bias, - PD_Tensor:$Mean, PD_Tensor:$Variance, - DefaultValuedAttr:$epsilon); - let results = (outs PD_Tensor:$Y); - - let hasCanonicalizer = 1; -} - -def PD_FusedFC : PD_Op<"fc", [NoSideEffect]> { - let summary = "Computes the Fully Connected result of two tensors"; - let description = [{ - }]; - - let arguments = (ins PD_Tensor:$input, PD_Tensor:$w, PD_Tensor:$bias, DefaultValuedAttr:$in_num_col_dims); - let results = (outs PD_Tensor:$out); -} - -def PD_FusedRepeatedFCRelu : PD_Op<"fusion_repeated_fc_relu", [SameVariadicOperandSize, NoSideEffect]> { - let summary = ""; - let description = [{ }]; - - let arguments = (ins PD_Tensor:$input, Variadic:$w, Variadic:$bias); - let results = (outs PD_Tensor:$out); - let hasCanonicalizer = 1; -} - -#endif // PD_OPS diff --git a/paddle/infrt/dialect/rewrite.td b/paddle/infrt/dialect/rewrite.td index b5b7cf0667..db75ba041b 100644 --- a/paddle/infrt/dialect/rewrite.td +++ b/paddle/infrt/dialect/rewrite.td @@ -4,7 +4,7 @@ include "paddle/infrt/dialect/infrt_base.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "paddle/infrt/dialect/pd_ops.td" - +/* //===----------------------------------------------------------------------===// // This is to fuse the composition: 'Matmul o ElementwiseAdd' into 'PD_FusedFC'. // @@ -89,5 +89,5 @@ def FuseBatchNormWithConvPattern: Pat< $coefficientW), (INFRT_createI32Attr<"1">))) >; - +*/ #endif // INFRT_REWRITE diff --git a/paddle/scripts/infrt_build.sh b/paddle/scripts/infrt_build.sh index 74f690da76..e34bf7cff8 100644 --- a/paddle/scripts/infrt_build.sh +++ b/paddle/scripts/infrt_build.sh @@ -18,7 +18,7 @@ # Utils #================================================= -set -ex +set -e if [ -z ${BRANCH} ]; then BRANCH="develop" @@ -27,6 +27,22 @@ fi EXIT_CODE=0; tmp_dir=`mktemp -d` +function update_pd_ops() { + PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )" + # compile and install paddle + rm -rf ${PADDLE_ROOT}/build && mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build + cmake .. -DWITH_PYTHON=ON -DWITH_GPU=OFF -DPYTHON_EXECUTABLE=`which python3` + make -j8 + cd ${PADDLE_ROOT}/build + cd python/dist/ + python3 -m pip uninstall -y paddlepaddle + python3 -m pip install *whl + # update pd_ops.td + cd ${PADDLE_ROOT}/tools/infrt/ + python3 generate_pd_op_dialect_from_paddle_op_maker.py +} + function init() { RED='\033[0;31m' BLUE='\033[0;34m' @@ -62,7 +78,11 @@ function infrt_gen_and_build() { fi startTime_s=`date +%s` set +e + mkdir -p ${PADDLE_ROOT}/build + # step1. reinstall paddle and generate pd_ops.td + update_pd_ops + # step2. compile infrt cd ${PADDLE_ROOT}/build rm -f infrt_summary.txt cmake .. -DWITH_MKL=OFF -DWITH_GPU=OFF -DWITH_CRYPTO=OFF -DCMAKE_BUILD_TYPE=Release -DWITH_INFRT=ON -DWITH_PYTHON=OFF -DWITH_TESTING==${WITH_TESTING:-ON}; build_error=$? @@ -104,9 +124,19 @@ EOF } function main() { - local CMD=$1 + local CMD=$1 local parallel_number=$2 + if [ -z "$1" ]; then + echo "Usage:" + echo " (1)bash infrt_build.sh build_and_test" + echo " (2)bash infrt_build.sh build_only" + echo " (3)bash infrt_build.sh test_only" + echo " optional command: --update_pd_ops : pd_ops.td will be updated according to paddle's code." + exit 0 + fi + init + case $CMD in build_and_test) infrt_gen_and_build ${parallel_number} @@ -122,14 +152,15 @@ function main() { print_usage exit 1 ;; - esac - set +x - if [[ -f ${PADDLE_ROOT}/build/infrt_summary.txt ]];then - echo "=====================build summary======================" - cat ${PADDLE_ROOT}/build/infrt_summary.txt - echo "========================================================" - fi - echo "paddle_build script finished as expected!" + esac + + set +x + if [[ -f ${PADDLE_ROOT}/build/infrt_summary.txt ]];then + echo "=====================build summary======================" + cat ${PADDLE_ROOT}/build/infrt_summary.txt + echo "========================================================" + fi + echo "paddle_build script finished as expected!" } main $@ diff --git a/tools/infrt/custom_pdop.td b/tools/infrt/custom_pdop.td new file mode 100644 index 0000000000..83e2957831 --- /dev/null +++ b/tools/infrt/custom_pdop.td @@ -0,0 +1,57 @@ +def PD_FeedOp : PD_Op<"feed"> { + let summary = "Feed Op"; + + let description = [{ + Feed a tensor into the model. + }]; + + let arguments = (ins StrAttr:$name); + let results = (outs PD_Tensor:$out); + + let assemblyFormat = [{ + `(` `)` attr-dict `:` type($out) + }]; +} + +def PD_FetchOp : PD_Op<"fetch", [Terminator]> { + let summary = "fetch Op"; + + let description = [{ + Return the output tensor from the subgraph. + }]; + + let arguments = (ins PD_Tensor :$inputs, StrAttr:$name); +} + +def PD_ReturnOp : PD_Op<"return", [Terminator]> { + let summary = "return Op"; + + let description = [{ + Fetch tensor from the graph. + }]; + + let arguments = (ins Variadic:$inputs); +} + +def PD_GraphOp : PD_Op<"graph", [SingleBlockImplicitTerminator<"ReturnOp">]> { + let summary = "paddle graph Op"; + let description = [{ + Describe a paddle graph or subgraph. + }]; + let regions = (region SizedRegion<1>:$body); + let arguments = (ins Variadic:$inputs); + let results = (outs Variadic:$outputs); +} + +def PD_ConstantOp : PD_Op<"constant", [NoSideEffect, ConstantLike, DeclareOpInterfaceMethods, AllTypesMatch<["value", "output"]>]> { + let summary = "constant Op"; + let description = [{}]; + + let arguments = (ins ElementsAttr:$value); + let results = (outs PD_Tensor:$output); + let hasFolder = 1; + + let builders = [ + OpBuilder<(ins "Attribute":$value)>, + ]; +} diff --git a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py new file mode 100644 index 0000000000..2688efcf63 --- /dev/null +++ b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py @@ -0,0 +1,266 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid.framework as framework +from paddle.fluid import core +from paddle import compat as cpt + + +# collect original ops: op which has both inference and grid defination +def get_original_ops(): + all_ops, _, _ = core.op_supported_infos('CPU', core.VarDesc.VarType.FP16) + grad_ops = [] + original_ops = [] + + for op in all_ops: + if op.endswith("_grad"): + if op.endswith("_grad_grad"): + continue + grad_ops.append(op) + for op in all_ops: + if str(op + "_grad") in grad_ops: + original_ops.append(op) + + print("Grad ops num: " + str(len(grad_ops))) + print("Responded original ops num: " + str(len(original_ops))) + return original_ops + + +# functions of parsing Paddle Proto +INPUTS = "Inputs" +OUTPUTS = "Outputs" +ATTRS = "Attrs" +COMMENT = "Comment" + +DUPLICABLE = "duplicable" +INTERMEDIATE = "intermediate" +DISPENSABLE = "dispensable" + +TYPE = "type" +GENERATED = "generated" +DEFAULT_VALUE = "default_value" + +EXTRA = "extra" +QUANT = "quant" + + +def get_attr_default_value(op_name): + return core.get_op_attrs_default_value(cpt.to_bytes(op_name)) + + +def get_vars_info(op_vars_proto): + vars_info = {} + for var_proto in op_vars_proto: + name = str(var_proto.name) + vars_info[name] = {} + vars_info[name][DUPLICABLE] = var_proto.duplicable + vars_info[name][DISPENSABLE] = var_proto.dispensable + vars_info[name][INTERMEDIATE] = var_proto.intermediate + vars_info[name][EXTRA] = var_proto.extra + vars_info[name][QUANT] = var_proto.quant + return vars_info + + +def get_attrs_info(op_proto, op_attrs_proto): + attrs_info = {} + attrs_default_values = get_attr_default_value(op_proto.type) + for attr_proto in op_attrs_proto: + attr_name = str(attr_proto.name) + attrs_info[attr_name] = {} + attrs_info[attr_name][TYPE] = attr_proto.type + attrs_info[attr_name][GENERATED] = attr_proto.generated + attrs_info[attr_name][DEFAULT_VALUE] = attrs_default_values[ + attr_name] if attr_name in attrs_default_values else None + attrs_info[attr_name][EXTRA] = attr_proto.extra + attrs_info[attr_name][QUANT] = attr_proto.quant + return attrs_info + + +def get_op_desc(op_proto): + op_info = {} + op_info[INPUTS] = get_vars_info(op_proto.inputs) + op_info[OUTPUTS] = get_vars_info(op_proto.outputs) + op_info[ATTRS] = get_attrs_info(op_proto, op_proto.attrs) + op_info[COMMENT] = op_proto.comment + return op_info + + +def get_all_ops_desc(): + all_op_protos_dict = {} + all_op_protos = framework.get_all_op_protos() + for op_proto in all_op_protos: + op_type = str(op_proto.type) + all_op_protos_dict[op_type] = get_op_desc(op_proto) + return all_op_protos_dict + + +# funtion to generate paddle op dialect file +def convert_op_proto_into_mlir(op_descs): + dst_dialect_file = "../../paddle/infrt/dialect/pd_ops.td" + custom_dialect_file = "custom_pdop.td" + + # 1. Head files + comment_ = "/*===- TableGen'source file -----------------------------------------------===*\\\n\ +|* *|\n\ +|* Op Definitions *|\n\ +|* *|\n\ +|* Automatically generated file, do not edit! *|\n\ +|* Generated by tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py *|\n\ +|* *|\n\ +\*===----------------------------------------------------------------------===*/\n" + + start_ = comment_ + "#ifndef PD_OPS\n#define PD_OPS\ninclude \"mlir/Interfaces/InferTypeOpInterface.td\"\ninclude \"mlir/Interfaces/LoopLikeInterface.td\"\ninclude \"mlir/IR/OpBase.td\"\ninclude \"paddle/infrt/dialect/pd_op_base.td\"\n\n" + with open(dst_dialect_file, 'w') as ops_mlir_file: + ops_mlir_file.write(start_) + + # 2. Op dialect + # skip list ( ops whose dialect can not be generated automatically will be recorded here) + skipped_op_list = [ + "cos_sim", "fused_embedding_seq_pool", "cosh", "kron", "recurrent", + "while", "conditional_block", "set_value", "run_program" + ] + skipped_attr_list = [ + "trainable_statistics", "use_global_stats", "is_test", "use_mkldnn", + "use_cudnn" + ] + original_ops_ = get_original_ops() + automatically_generated_op_dialect = [] + for op_type, op_proto in op_descs.items(): + if (op_type in skipped_op_list) or (op_type not in original_ops_): + continue + automatically_generated_op_dialect.append(op_type) + # 2.1 OpDef + HEAD = "def PD_" + op_type.capitalize( + ) + "Op : PD_Op<\"" + op_type + "\", [NoSideEffect]> {\n" + SUMMARY = " let summary = \"" + op_type + " op\";\n" + + # 2.2 Description + DESCRIPTION = " let description = [{\n" + contents = (op_proto[COMMENT]).split("\n") + for line_ in contents: + DESCRIPTION = DESCRIPTION + " " + line_ + "\n" + DESCRIPTION += " }];\n" + + # 2.3 arguments info + ARGUMENTS = "" + if (len(op_proto[INPUTS]) > 0 or len(op_proto[ATTRS]) > 0): + ARGUMENTS = " let arguments = (ins " + # 2.3.1 inputs + for input_ in op_proto[INPUTS]: + if op_proto[INPUTS][input_][EXTRA] != True and op_proto[INPUTS][ + input_][INTERMEDIATE] != True: + if op_proto[INPUTS][input_][DUPLICABLE] != "true": + ARGUMENTS = ARGUMENTS + " PD_Tensor:$" + input_ + "," + else: + ARGUMENTS = ARGUMENTS + " PD_Tensor_Array:$" + input_ + "," + # unsupported: BLOCK = 8; BLOCKS = 10; + attr_mlir_converter = { + 0: 'SI32Attr', + 1: 'F32Attr', + 2: 'StrAttr', + 3: 'I32ArrayAttr', + 4: 'F32ArrayAttr', + 5: 'StrArrayAttr', + 6: 'BoolAttr', + 7: 'BoolArrayAttr', + 9: 'SI64Attr', + 11: 'I64ArrayAttr' + } + + # 2.3.2 attributes + for attr in op_proto[ATTRS]: + if (op_proto[ATTRS][attr][EXTRA] == True) or ( + attr in skipped_attr_list): + continue + if op_proto[ATTRS][attr][DEFAULT_VALUE] != None: + if op_proto[ATTRS][attr][TYPE] in attr_mlir_converter: + default_value = str(op_proto[ATTRS][attr][ + DEFAULT_VALUE]) + if (attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] in + [ + 'I32ArrayAttr', 'F32ArrayAttr', 'StrArrayAttr', + 'BoolArrayAttr', 'I64ArrayAttr' + ]): + default_value = default_value.replace( + '[', '{').replace(']', '}') + if (attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] in + ['BoolAttr', 'BoolArrayAttr']): + default_value = default_value.lower() + elif (attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] + in ['StrAttr', 'StrArrayAttr']): + default_value = default_value.replace('\'', '\\\"') + if attr_mlir_converter[op_proto[ATTRS][attr][ + TYPE]] == "StrAttr": + default_value = '\\\"' + default_value + '\\\"' + attr_list = " DefaultValuedAttr<" + attr_mlir_converter[ + op_proto[ATTRS][attr] + [TYPE]] + ", \"" + default_value + "\">:$" + attr + "," + ARGUMENTS += attr_list + else: + print("Error:" + op_type + ":" + attr + ":" + str( + op_proto[ATTRS][attr][TYPE])) + else: + if op_proto[ATTRS][attr][TYPE] in attr_mlir_converter: + attr_type_ = attr_mlir_converter[op_proto[ATTRS][attr][ + TYPE]] + if (attr_type_ in [ + 'I32ArrayAttr', 'F32ArrayAttr', 'StrArrayAttr', + 'BoolArrayAttr', 'I64ArrayAttr' + ]): + attr_list = attr_type_ + ":$" + attr + "," + ARGUMENTS += attr_list + else: + print(" ouch Error:" + op_type + ":" + attr + ":" + str( + op_proto[ATTRS][attr][TYPE])) + ARGUMENTS = ARGUMENTS[:-1] + ");\n" + + # 2.4 results info + RESULTS = "" + if (len(op_proto[OUTPUTS]) > 0): + RESULTS = "\n let results = (outs " + for output_ in op_proto[OUTPUTS]: + if op_proto[OUTPUTS][output_][EXTRA] != True and op_proto[ + OUTPUTS][output_][INTERMEDIATE] != True: + if op_proto[OUTPUTS][output_][DUPLICABLE] != "true": + RESULTS = RESULTS + "PD_Tensor:$" + output_ + "," + else: + RESULTS = RESULTS + "PD_Tensor_Array:$" + output_ + "," + print(HEAD + " PD_Tensor_Array:$" + output_ + ",") + + RESULTS = RESULTS[:-1] + ");\n" + with open(dst_dialect_file, 'a') as ops_mlir_file: + ops_mlir_file.write(HEAD) + ops_mlir_file.write(SUMMARY) + ops_mlir_file.write(DESCRIPTION) + ops_mlir_file.write(ARGUMENTS) + ops_mlir_file.write(RESULTS) + ops_mlir_file.write("}\n") + + print("Skipped ops num: " + str(len(skipped_op_list))) + print("Automatically generated op dialects num: " + str( + len(automatically_generated_op_dialect))) + + # 3. custom op dialect and end of file + with open(dst_dialect_file, 'a') as ops_mlir_file: + with open(custom_dialect_file, 'r') as custom_ops_file: + custom_ops = custom_ops_file.readlines() + ops_mlir_file.writelines(custom_ops) + + end_ = "\n#endif // PD_OPS" + ops_mlir_file.write(end_) + + +if __name__ == "__main__": + all_op_protos_dict = get_all_ops_desc() + convert_op_proto_into_mlir(all_op_protos_dict) -- GitLab