未验证 提交 f37a23a7 编写于 作者: H huzhiqiang 提交者: GitHub

convert paddle op definations into pd dialect in infrt (#38708)

上级 c7de7440
......@@ -73,5 +73,6 @@ def PD_ElementType : Type<Or<[PD_Float.predicate,
def PD_Tensor : TensorOf<[PD_ElementType]>;
def PD_Tensor_Array : VectorOf<[PD_Tensor]>;
#endif // PD_OP_BASE
......@@ -25,6 +25,7 @@
namespace mlir {
namespace pd {
PaddleDialect::PaddleDialect(MLIRContext *context)
: Dialect("pd", context, TypeID::get<PaddleDialect>()) {
addOperations<
......@@ -69,7 +70,7 @@ mlir::OpFoldResult ConstantOp::fold(
::llvm::ArrayRef<mlir::Attribute> operands) {
return value();
}
/*
LogicalResult ElementwiseAdd::inferReturnTypes(
MLIRContext *context,
Optional<Location> location,
......@@ -165,7 +166,7 @@ void FusedRepeatedFCRelu::getCanonicalizationPatterns(
void BatchNormOp::getCanonicalizationPatterns(
mlir::OwningRewritePatternList &results, mlir::MLIRContext *context) {
results.insert<FuseBatchNormWithConvPattern>(context);
}
}*/
} // namespace pd
} // namespace mlir
#ifndef PD_OPS
#define PD_OPS
include "mlir/Interfaces/InferTypeOpInterface.td"
include "mlir/Interfaces/LoopLikeInterface.td"
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/pd_op_base.td"
def PD_FeedOp : PD_Op<"feed"> {
let summary = "Feed Op";
let description = [{
Feed a tensor into the model.
}];
let arguments = (ins StrAttr:$name);
let results = (outs PD_Tensor:$out);
let assemblyFormat = [{
`(` `)` attr-dict `:` type($out)
}];
}
def PD_FetchOp : PD_Op<"fetch", [Terminator]> {
let summary = "fetch Op";
let description = [{
Return the output tensor from the subgraph.
}];
let arguments = (ins PD_Tensor :$inputs, StrAttr:$name);
}
def PD_ReturnOp : PD_Op<"return", [Terminator]> {
let summary = "return Op";
let description = [{
Fetch tensor from the graph.
}];
let arguments = (ins Variadic<PD_Tensor>:$inputs);
}
def PD_GraphOp : PD_Op<"graph", [SingleBlockImplicitTerminator<"ReturnOp">]> {
let summary = "paddle graph Op";
let description = [{
Describe a paddle graph or subgraph.
}];
let regions = (region SizedRegion<1>:$body);
let arguments = (ins Variadic<PD_Tensor>:$inputs);
let results = (outs Variadic<PD_Tensor>:$outputs);
}
def PD_ConstantOp : PD_Op<"constant", [NoSideEffect, ConstantLike, DeclareOpInterfaceMethods<InferTypeOpInterface>, AllTypesMatch<["value", "output"]>]> {
let summary = "constant Op";
let description = [{}];
let arguments = (ins ElementsAttr:$value);
let results = (outs PD_Tensor:$output);
let hasFolder = 1;
let builders = [
OpBuilder<(ins "Attribute":$value)>,
];
}
def PD_AbsOp : PD_Op<"abs", [NoSideEffect, SameOperandsAndResultType]> {
let summary = "Computes the absolute value of a tensor";
let description = [{
}];
let arguments = (ins PD_Tensor:$x);
let results = (outs PD_Tensor:$y);
}
def PD_SqrtOp : PD_Op<"sqrt", [NoSideEffect, SameOperandsAndResultType]> {
let summary = "Computes the sqrt value of a tensor";
let description = [{
}];
let arguments = (ins PD_Tensor:$x);
let results = (outs PD_Tensor:$y);
}
def PD_ReluOp : PD_Op<"relu", [NoSideEffect, SameOperandsAndResultType]> {
let summary = "Computes the Relu of a tensor";
let description = [{
}];
let arguments = (ins PD_Tensor:$x);
let results = (outs PD_Tensor:$y);
let hasCanonicalizer = 1;
}
def PD_Relu6Op : PD_Op<"relu6", [NoSideEffect, SameOperandsAndResultType]> {
let summary = "Computes the Relu6 of a tensor";
let description = [{
}];
let arguments = (ins PD_Tensor:$x);
let results = (outs PD_Tensor:$y);
}
def PD_ElementwiseAdd : PD_Op<"elementwise_add", [NoSideEffect, Commutative, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
let summary = "ElementwiseAdd Op";
let description = [{
}];
let arguments = (ins PD_Tensor:$x, PD_Tensor:$y, DefaultValuedAttr<I32Attr, "-1">:$axis);
let results = (outs PD_Tensor:$out);
let hasCanonicalizer = 1;
let hasFolder = 1;
}
def PD_ElementwiseSub : PD_Op<"elementwise_sub", [NoSideEffect, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
let summary = "ElementwiseSub Op";
let description = [{
}];
let arguments = (ins PD_Tensor:$x, PD_Tensor:$y, DefaultValuedAttr<I32Attr, "-1">:$axis);
let results = (outs PD_Tensor:$out);
}
def PD_ElementwiseMul : PD_Op<"elementwise_mul", [NoSideEffect, Commutative, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
let summary = "ElementwiseMul Op";
let description = [{
}];
let arguments = (ins PD_Tensor:$x, PD_Tensor:$y, DefaultValuedAttr<I32Attr, "-1">:$axis);
let results = (outs PD_Tensor:$out);
}
def PD_ElementwiseDiv : PD_Op<"elementwise_div", [NoSideEffect, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
let summary = "ElementwiseDiv Op";
let description = [{
}];
let arguments = (ins PD_Tensor:$x, PD_Tensor:$y, DefaultValuedAttr<I32Attr, "-1">:$axis);
let results = (outs PD_Tensor:$out);
}
def PD_MatmulOp : PD_Op<"matmul", [NoSideEffect]> {
let summary = "Computes the matrix mulplication result of two tensors";
let description = [{
}];
let arguments = (ins PD_Tensor:$x, PD_Tensor:$y,
DefaultValuedAttr<BoolAttr, "false">:$transpose_x,
DefaultValuedAttr<BoolAttr, "false">:$transpose_y,
DefaultValuedAttr<F32Attr, "1.0">:$alpha);
let results = (outs PD_Tensor:$out);
//let hasCanonicalizer = 1;
}
def PD_MulOp : PD_Op<"mul", [NoSideEffect, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
let summary = "paddle mul op";
let description = [{}];
let arguments = (ins PD_Tensor:$x, PD_Tensor:$y);
let results = (outs PD_Tensor:$out);
//let hasCanonicalizer = 1;
}
def PD_Conv2dOp : PD_Op<"conv2d", [NoSideEffect]> {
let summary = "paddle conv2d operation";
let description = [{
}];
let arguments = (ins PD_Tensor:$Input, PD_Tensor:$Filter, PD_Tensor:$Bias);
let results = (outs PD_Tensor:$Output);
//let hasCanonicalizer = 1;
}
def PD_BatchNormOp : PD_Op<"batch_norm", [NoSideEffect]> {
let summary = "paddle batch_norm operation";
let description = [{
}];
let arguments = (ins PD_Tensor:$X, PD_Tensor:$Scale, PD_Tensor:$Bias,
PD_Tensor:$Mean, PD_Tensor:$Variance,
DefaultValuedAttr<F32Attr, "1e-05">:$epsilon);
let results = (outs PD_Tensor:$Y);
let hasCanonicalizer = 1;
}
def PD_FusedFC : PD_Op<"fc", [NoSideEffect]> {
let summary = "Computes the Fully Connected result of two tensors";
let description = [{
}];
let arguments = (ins PD_Tensor:$input, PD_Tensor:$w, PD_Tensor:$bias, DefaultValuedAttr<I32Attr, "1">:$in_num_col_dims);
let results = (outs PD_Tensor:$out);
}
def PD_FusedRepeatedFCRelu : PD_Op<"fusion_repeated_fc_relu", [SameVariadicOperandSize, NoSideEffect]> {
let summary = "";
let description = [{ }];
let arguments = (ins PD_Tensor:$input, Variadic<PD_Tensor>:$w, Variadic<PD_Tensor>:$bias);
let results = (outs PD_Tensor:$out);
let hasCanonicalizer = 1;
}
#endif // PD_OPS
......@@ -4,7 +4,7 @@
include "paddle/infrt/dialect/infrt_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/pd_ops.td"
/*
//===----------------------------------------------------------------------===//
// This is to fuse the composition: 'Matmul o ElementwiseAdd' into 'PD_FusedFC'.
//
......@@ -89,5 +89,5 @@ def FuseBatchNormWithConvPattern: Pat<
$coefficientW),
(INFRT_createI32Attr<"1">)))
>;
*/
#endif // INFRT_REWRITE
......@@ -18,7 +18,7 @@
# Utils
#=================================================
set -ex
set -e
if [ -z ${BRANCH} ]; then
BRANCH="develop"
......@@ -27,6 +27,22 @@ fi
EXIT_CODE=0;
tmp_dir=`mktemp -d`
function update_pd_ops() {
PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )"
# compile and install paddle
rm -rf ${PADDLE_ROOT}/build && mkdir -p ${PADDLE_ROOT}/build
cd ${PADDLE_ROOT}/build
cmake .. -DWITH_PYTHON=ON -DWITH_GPU=OFF -DPYTHON_EXECUTABLE=`which python3`
make -j8
cd ${PADDLE_ROOT}/build
cd python/dist/
python3 -m pip uninstall -y paddlepaddle
python3 -m pip install *whl
# update pd_ops.td
cd ${PADDLE_ROOT}/tools/infrt/
python3 generate_pd_op_dialect_from_paddle_op_maker.py
}
function init() {
RED='\033[0;31m'
BLUE='\033[0;34m'
......@@ -62,7 +78,11 @@ function infrt_gen_and_build() {
fi
startTime_s=`date +%s`
set +e
mkdir -p ${PADDLE_ROOT}/build
# step1. reinstall paddle and generate pd_ops.td
update_pd_ops
# step2. compile infrt
cd ${PADDLE_ROOT}/build
rm -f infrt_summary.txt
cmake .. -DWITH_MKL=OFF -DWITH_GPU=OFF -DWITH_CRYPTO=OFF -DCMAKE_BUILD_TYPE=Release -DWITH_INFRT=ON -DWITH_PYTHON=OFF -DWITH_TESTING==${WITH_TESTING:-ON}; build_error=$?
......@@ -104,9 +124,19 @@ EOF
}
function main() {
local CMD=$1
local CMD=$1
local parallel_number=$2
if [ -z "$1" ]; then
echo "Usage:"
echo " (1)bash infrt_build.sh build_and_test"
echo " (2)bash infrt_build.sh build_only"
echo " (3)bash infrt_build.sh test_only"
echo " optional command: --update_pd_ops : pd_ops.td will be updated according to paddle's code."
exit 0
fi
init
case $CMD in
build_and_test)
infrt_gen_and_build ${parallel_number}
......@@ -122,14 +152,15 @@ function main() {
print_usage
exit 1
;;
esac
set +x
if [[ -f ${PADDLE_ROOT}/build/infrt_summary.txt ]];then
echo "=====================build summary======================"
cat ${PADDLE_ROOT}/build/infrt_summary.txt
echo "========================================================"
fi
echo "paddle_build script finished as expected!"
esac
set +x
if [[ -f ${PADDLE_ROOT}/build/infrt_summary.txt ]];then
echo "=====================build summary======================"
cat ${PADDLE_ROOT}/build/infrt_summary.txt
echo "========================================================"
fi
echo "paddle_build script finished as expected!"
}
main $@
......
def PD_FeedOp : PD_Op<"feed"> {
let summary = "Feed Op";
let description = [{
Feed a tensor into the model.
}];
let arguments = (ins StrAttr:$name);
let results = (outs PD_Tensor:$out);
let assemblyFormat = [{
`(` `)` attr-dict `:` type($out)
}];
}
def PD_FetchOp : PD_Op<"fetch", [Terminator]> {
let summary = "fetch Op";
let description = [{
Return the output tensor from the subgraph.
}];
let arguments = (ins PD_Tensor :$inputs, StrAttr:$name);
}
def PD_ReturnOp : PD_Op<"return", [Terminator]> {
let summary = "return Op";
let description = [{
Fetch tensor from the graph.
}];
let arguments = (ins Variadic<PD_Tensor>:$inputs);
}
def PD_GraphOp : PD_Op<"graph", [SingleBlockImplicitTerminator<"ReturnOp">]> {
let summary = "paddle graph Op";
let description = [{
Describe a paddle graph or subgraph.
}];
let regions = (region SizedRegion<1>:$body);
let arguments = (ins Variadic<PD_Tensor>:$inputs);
let results = (outs Variadic<PD_Tensor>:$outputs);
}
def PD_ConstantOp : PD_Op<"constant", [NoSideEffect, ConstantLike, DeclareOpInterfaceMethods<InferTypeOpInterface>, AllTypesMatch<["value", "output"]>]> {
let summary = "constant Op";
let description = [{}];
let arguments = (ins ElementsAttr:$value);
let results = (outs PD_Tensor:$output);
let hasFolder = 1;
let builders = [
OpBuilder<(ins "Attribute":$value)>,
];
}
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid.framework as framework
from paddle.fluid import core
from paddle import compat as cpt
# collect original ops: op which has both inference and grid defination
def get_original_ops():
all_ops, _, _ = core.op_supported_infos('CPU', core.VarDesc.VarType.FP16)
grad_ops = []
original_ops = []
for op in all_ops:
if op.endswith("_grad"):
if op.endswith("_grad_grad"):
continue
grad_ops.append(op)
for op in all_ops:
if str(op + "_grad") in grad_ops:
original_ops.append(op)
print("Grad ops num: " + str(len(grad_ops)))
print("Responded original ops num: " + str(len(original_ops)))
return original_ops
# functions of parsing Paddle Proto
INPUTS = "Inputs"
OUTPUTS = "Outputs"
ATTRS = "Attrs"
COMMENT = "Comment"
DUPLICABLE = "duplicable"
INTERMEDIATE = "intermediate"
DISPENSABLE = "dispensable"
TYPE = "type"
GENERATED = "generated"
DEFAULT_VALUE = "default_value"
EXTRA = "extra"
QUANT = "quant"
def get_attr_default_value(op_name):
return core.get_op_attrs_default_value(cpt.to_bytes(op_name))
def get_vars_info(op_vars_proto):
vars_info = {}
for var_proto in op_vars_proto:
name = str(var_proto.name)
vars_info[name] = {}
vars_info[name][DUPLICABLE] = var_proto.duplicable
vars_info[name][DISPENSABLE] = var_proto.dispensable
vars_info[name][INTERMEDIATE] = var_proto.intermediate
vars_info[name][EXTRA] = var_proto.extra
vars_info[name][QUANT] = var_proto.quant
return vars_info
def get_attrs_info(op_proto, op_attrs_proto):
attrs_info = {}
attrs_default_values = get_attr_default_value(op_proto.type)
for attr_proto in op_attrs_proto:
attr_name = str(attr_proto.name)
attrs_info[attr_name] = {}
attrs_info[attr_name][TYPE] = attr_proto.type
attrs_info[attr_name][GENERATED] = attr_proto.generated
attrs_info[attr_name][DEFAULT_VALUE] = attrs_default_values[
attr_name] if attr_name in attrs_default_values else None
attrs_info[attr_name][EXTRA] = attr_proto.extra
attrs_info[attr_name][QUANT] = attr_proto.quant
return attrs_info
def get_op_desc(op_proto):
op_info = {}
op_info[INPUTS] = get_vars_info(op_proto.inputs)
op_info[OUTPUTS] = get_vars_info(op_proto.outputs)
op_info[ATTRS] = get_attrs_info(op_proto, op_proto.attrs)
op_info[COMMENT] = op_proto.comment
return op_info
def get_all_ops_desc():
all_op_protos_dict = {}
all_op_protos = framework.get_all_op_protos()
for op_proto in all_op_protos:
op_type = str(op_proto.type)
all_op_protos_dict[op_type] = get_op_desc(op_proto)
return all_op_protos_dict
# funtion to generate paddle op dialect file
def convert_op_proto_into_mlir(op_descs):
dst_dialect_file = "../../paddle/infrt/dialect/pd_ops.td"
custom_dialect_file = "custom_pdop.td"
# 1. Head files
comment_ = "/*===- TableGen'source file -----------------------------------------------===*\\\n\
|* *|\n\
|* Op Definitions *|\n\
|* *|\n\
|* Automatically generated file, do not edit! *|\n\
|* Generated by tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py *|\n\
|* *|\n\
\*===----------------------------------------------------------------------===*/\n"
start_ = comment_ + "#ifndef PD_OPS\n#define PD_OPS\ninclude \"mlir/Interfaces/InferTypeOpInterface.td\"\ninclude \"mlir/Interfaces/LoopLikeInterface.td\"\ninclude \"mlir/IR/OpBase.td\"\ninclude \"paddle/infrt/dialect/pd_op_base.td\"\n\n"
with open(dst_dialect_file, 'w') as ops_mlir_file:
ops_mlir_file.write(start_)
# 2. Op dialect
# skip list ( ops whose dialect can not be generated automatically will be recorded here)
skipped_op_list = [
"cos_sim", "fused_embedding_seq_pool", "cosh", "kron", "recurrent",
"while", "conditional_block", "set_value", "run_program"
]
skipped_attr_list = [
"trainable_statistics", "use_global_stats", "is_test", "use_mkldnn",
"use_cudnn"
]
original_ops_ = get_original_ops()
automatically_generated_op_dialect = []
for op_type, op_proto in op_descs.items():
if (op_type in skipped_op_list) or (op_type not in original_ops_):
continue
automatically_generated_op_dialect.append(op_type)
# 2.1 OpDef
HEAD = "def PD_" + op_type.capitalize(
) + "Op : PD_Op<\"" + op_type + "\", [NoSideEffect]> {\n"
SUMMARY = " let summary = \"" + op_type + " op\";\n"
# 2.2 Description
DESCRIPTION = " let description = [{\n"
contents = (op_proto[COMMENT]).split("\n")
for line_ in contents:
DESCRIPTION = DESCRIPTION + " " + line_ + "\n"
DESCRIPTION += " }];\n"
# 2.3 arguments info
ARGUMENTS = ""
if (len(op_proto[INPUTS]) > 0 or len(op_proto[ATTRS]) > 0):
ARGUMENTS = " let arguments = (ins "
# 2.3.1 inputs
for input_ in op_proto[INPUTS]:
if op_proto[INPUTS][input_][EXTRA] != True and op_proto[INPUTS][
input_][INTERMEDIATE] != True:
if op_proto[INPUTS][input_][DUPLICABLE] != "true":
ARGUMENTS = ARGUMENTS + " PD_Tensor:$" + input_ + ","
else:
ARGUMENTS = ARGUMENTS + " PD_Tensor_Array:$" + input_ + ","
# unsupported: BLOCK = 8; BLOCKS = 10;
attr_mlir_converter = {
0: 'SI32Attr',
1: 'F32Attr',
2: 'StrAttr',
3: 'I32ArrayAttr',
4: 'F32ArrayAttr',
5: 'StrArrayAttr',
6: 'BoolAttr',
7: 'BoolArrayAttr',
9: 'SI64Attr',
11: 'I64ArrayAttr'
}
# 2.3.2 attributes
for attr in op_proto[ATTRS]:
if (op_proto[ATTRS][attr][EXTRA] == True) or (
attr in skipped_attr_list):
continue
if op_proto[ATTRS][attr][DEFAULT_VALUE] != None:
if op_proto[ATTRS][attr][TYPE] in attr_mlir_converter:
default_value = str(op_proto[ATTRS][attr][
DEFAULT_VALUE])
if (attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] in
[
'I32ArrayAttr', 'F32ArrayAttr', 'StrArrayAttr',
'BoolArrayAttr', 'I64ArrayAttr'
]):
default_value = default_value.replace(
'[', '{').replace(']', '}')
if (attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] in
['BoolAttr', 'BoolArrayAttr']):
default_value = default_value.lower()
elif (attr_mlir_converter[op_proto[ATTRS][attr][TYPE]]
in ['StrAttr', 'StrArrayAttr']):
default_value = default_value.replace('\'', '\\\"')
if attr_mlir_converter[op_proto[ATTRS][attr][
TYPE]] == "StrAttr":
default_value = '\\\"' + default_value + '\\\"'
attr_list = " DefaultValuedAttr<" + attr_mlir_converter[
op_proto[ATTRS][attr]
[TYPE]] + ", \"" + default_value + "\">:$" + attr + ","
ARGUMENTS += attr_list
else:
print("Error:" + op_type + ":" + attr + ":" + str(
op_proto[ATTRS][attr][TYPE]))
else:
if op_proto[ATTRS][attr][TYPE] in attr_mlir_converter:
attr_type_ = attr_mlir_converter[op_proto[ATTRS][attr][
TYPE]]
if (attr_type_ in [
'I32ArrayAttr', 'F32ArrayAttr', 'StrArrayAttr',
'BoolArrayAttr', 'I64ArrayAttr'
]):
attr_list = attr_type_ + ":$" + attr + ","
ARGUMENTS += attr_list
else:
print(" ouch Error:" + op_type + ":" + attr + ":" + str(
op_proto[ATTRS][attr][TYPE]))
ARGUMENTS = ARGUMENTS[:-1] + ");\n"
# 2.4 results info
RESULTS = ""
if (len(op_proto[OUTPUTS]) > 0):
RESULTS = "\n let results = (outs "
for output_ in op_proto[OUTPUTS]:
if op_proto[OUTPUTS][output_][EXTRA] != True and op_proto[
OUTPUTS][output_][INTERMEDIATE] != True:
if op_proto[OUTPUTS][output_][DUPLICABLE] != "true":
RESULTS = RESULTS + "PD_Tensor:$" + output_ + ","
else:
RESULTS = RESULTS + "PD_Tensor_Array:$" + output_ + ","
print(HEAD + " PD_Tensor_Array:$" + output_ + ",")
RESULTS = RESULTS[:-1] + ");\n"
with open(dst_dialect_file, 'a') as ops_mlir_file:
ops_mlir_file.write(HEAD)
ops_mlir_file.write(SUMMARY)
ops_mlir_file.write(DESCRIPTION)
ops_mlir_file.write(ARGUMENTS)
ops_mlir_file.write(RESULTS)
ops_mlir_file.write("}\n")
print("Skipped ops num: " + str(len(skipped_op_list)))
print("Automatically generated op dialects num: " + str(
len(automatically_generated_op_dialect)))
# 3. custom op dialect and end of file
with open(dst_dialect_file, 'a') as ops_mlir_file:
with open(custom_dialect_file, 'r') as custom_ops_file:
custom_ops = custom_ops_file.readlines()
ops_mlir_file.writelines(custom_ops)
end_ = "\n#endif // PD_OPS"
ops_mlir_file.write(end_)
if __name__ == "__main__":
all_op_protos_dict = get_all_ops_desc()
convert_op_proto_into_mlir(all_op_protos_dict)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册