未验证 提交 da5a2584 编写于 作者: J jjyaoao 提交者: GitHub

Clear the infrt-related code (#52273)

* Clear the infrt-related code

* remove tools/infrt
上级 fb5910f4
#!/usr/bin/env bash
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=================================================
# Utils
#=================================================
set -e
if [ -z ${BRANCH} ]; then
BRANCH="develop"
fi
EXIT_CODE=0;
tmp_dir=`mktemp -d`
function update_pd_ops() {
PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )"
# compile and install paddle
rm -rf ${PADDLE_ROOT}/build && mkdir -p ${PADDLE_ROOT}/build
cd ${PADDLE_ROOT}/build
cmake .. -DWITH_PYTHON=ON -DWITH_MKL=OFF -DWITH_GPU=OFF -DPYTHON_EXECUTABLE=`which python3` -DWITH_XBYAK=OFF -DWITH_NCCL=OFF -DWITH_RCCL=OFF -DWITH_CRYPTO=OFF
make -j8 paddle_python print_phi_kernels kernel_signature_generator
cd ${PADDLE_ROOT}/build
./paddle/phi/tools/print_phi_kernels > ../tools/infrt/kernels.json
./paddle/fluid/pybind/kernel_signature_generator > ../tools/infrt/kernel_signature.json
cd python/dist/
python3 -m pip uninstall -y paddlepaddle
python3 -m pip install *whl
# update pd_ops.td
cd ${PADDLE_ROOT}/tools/infrt/
python3 generate_pd_op_dialect_from_paddle_op_maker.py
python3 generate_phi_kernel_dialect.py
}
function init() {
RED='\033[0;31m'
BLUE='\033[0;34m'
BOLD='\033[1m'
NONE='\033[0m'
PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )"
export PADDLE_ROOT
if [ -z "${SCRIPT_NAME}" ]; then
SCRIPT_NAME=$0
fi
ENABLE_MAKE_CLEAN=${ENABLE_MAKE_CLEAN:-ON}
# NOTE(chenweihang): For easy debugging, CI displays the C++ error stacktrace by default
export FLAGS_call_stack_level=2
}
function infrt_gen_and_build() {
if [ "$1" != "" ]; then
parallel_number=$1
fi
startTime_s=`date +%s`
set +e
mkdir -p ${PADDLE_ROOT}/build
# step1. reinstall paddle and generate pd_ops.td
update_pd_ops
# step2. compile infrt
cd ${PADDLE_ROOT}/build
rm -f infrt_summary.txt
cmake .. -DWITH_MKL=OFF -DWITH_GPU=OFF -DWITH_CRYPTO=OFF -DCMAKE_BUILD_TYPE=Release -DWITH_INFRT=ON -DWITH_PYTHON=OFF -DWITH_TESTING==${WITH_TESTING:-ON}; build_error=$?
if [ "$build_error" != 0 ];then
exit 7;
fi
make -j ${parallel_number} infrt infrtopt infrtexec test_infrt_exec trt-exec phi-exec infrt_lib_dist paddle-mlir-convert;build_error=$?
if [ "$build_error" != 0 ];then
exit 7;
fi
endTime_s=`date +%s`
[ -n "$startTime_firstBuild" ] && startTime_s=$startTime_firstBuild
echo "Build Time: $[ $endTime_s - $startTime_s ]s"
echo "ipipe_log_param_Infrt_Build_Time: $[ $endTime_s - $startTime_s ]s" >> ${PADDLE_ROOT}/build/infrt_summary.txt
}
function create_fake_models() {
cd ${PADDLE_ROOT}/build
cd python/dist/
# create multi_fc model, this will generate "multi_fc_model"
python3 -m pip uninstall -y paddlepaddle
python3 -m pip install *whl
# generate test model
cd ${PADDLE_ROOT}
mkdir -p ${PADDLE_ROOT}/build/models
python3 paddle/infrt/tests/models/abs_model.py ${PADDLE_ROOT}/build/paddle/infrt/tests/abs
python3 paddle/infrt/tests/models/resnet50_model.py ${PADDLE_ROOT}/build/models/resnet50/model
python3 paddle/infrt/tests/models/efficientnet-b4/model.py ${PADDLE_ROOT}/build/models/efficientnet-b4/model
cd ${PADDLE_ROOT}/build
python3 ${PADDLE_ROOT}/tools/infrt/fake_models/multi_fc.py
python3 ${PADDLE_ROOT}/paddle/infrt/tests/models/linear.py
}
function test_infrt() {
create_fake_models
# install llvm-lit toolkit
python3 -m pip install lit
mkdir -p ${PADDLE_ROOT}/build
cd ${PADDLE_ROOT}/build
if [ ${WITH_TESTING:-ON} == "ON" ] ; then
cat <<EOF
========================================
Running infrt unit tests ...
========================================
EOF
tmpfile_rand=`date +%s%N`
tmpfile=$tmp_dir/$tmpfile_rand
ut_startTime_s=`date +%s`
ctest --output-on-failure -R test_infrt* | tee $tmpfile
ut_endTime_s=`date +%s`
echo "infrt testCase Time: $[ $ut_endTime_s - $ut_startTime_s ]s"
exit_code=0
grep -q 'The following tests FAILED:' $tmpfile||exit_code=$?
if [ $exit_code -eq 0 ]; then
exit 8;
fi
fi
}
function main() {
local CMD=$1
local parallel_number=$2
if [ -z "$1" ]; then
echo "Usage:"
echo " (1)bash infrt_build.sh build_and_test"
echo " (2)bash infrt_build.sh build_only"
echo " (3)bash infrt_build.sh test_only"
echo " optional command: --update_pd_ops : pd_ops.td will be updated according to paddle's code."
exit 0
fi
init
case $CMD in
build_and_test)
infrt_gen_and_build ${parallel_number}
test_infrt
;;
build_only)
infrt_gen_and_build ${parallel_number}
;;
test_only)
test_infrt
;;
*)
print_usage
exit 1
;;
esac
set +x
if [[ -f ${PADDLE_ROOT}/build/infrt_summary.txt ]];then
echo "=====================build summary======================"
cat ${PADDLE_ROOT}/build/infrt_summary.txt
echo "========================================================"
fi
echo "paddle_build script finished as expected!"
}
main $@
rm -rf $tmp_dir
此差异已折叠。
......@@ -232,14 +232,6 @@ if [ ${HAS_PADDLE_GET} ] && [ "${GIT_PR_ID}" != "" ]; then
check_approval 1 6836917 47554610 22561442
fi
# infrt needs to temporarily use LOG(FATAL) during the debugging period, and will replace it with standard error format in the future.
NO_INFRT_FILES=`git diff --name-only upstream/develop | grep -v "tools/\|paddle/infrt/" || true`
HAS_LOG_FATAL=`git diff -U0 upstream/$BRANCH $NO_INFRT_FILES |grep "^+" |grep -o -m 1 "LOG(FATAL)" || true`
if [ ${NO_INFRT_FILES} ] && [ ${HAS_LOG_FATAL} ] && [ "${GIT_PR_ID}" != "" ]; then
echo_line="LOG(FATAL) is not recommended, because it will throw exception without standard stack information, so please use PADDLE_THROW macro here. If you have to use LOG(FATAL) here, please request chenwhql (Recommend), luotao1 or lanxianghit review and approve.\n"
check_approval 1 6836917 47554610 22561442
fi
FILTER=`git diff --name-only upstream/develop | grep -v "tools/"`
HAS_LEGACY_KERNEL_REGISTRATION=`git diff -U0 upstream/$BRANCH $FILTER | grep '^\+' | grep -oE -m 1 "REGISTER_OP[A-Z_]{1,9}KERNEL[_FUNCTOR|_WITH_CUSTOM_TYPE|_EX]*" || true`
if [ ${HAS_LEGACY_KERNEL_REGISTRATION} ] && [ "${GIT_PR_ID}" != "" ]; then
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A fake model with multiple FC layers to test CINN on a more complex model.
"""
import paddle
from paddle import fluid
size = 2
num_layers = 4
paddle.enable_static()
a = paddle.static.data(name="A", shape=[-1, size], dtype='float32')
label = paddle.static.data(name="label", shape=[-1, size], dtype='float32')
fc_out = paddle.static.nn.fc(
x=a,
size=size,
activation="relu",
bias_attr=fluid.ParamAttr(name="fc_bias"),
num_flatten_dims=1,
)
for i in range(num_layers - 1):
fc_out = paddle.static.nn.fc(
x=fc_out,
size=size,
activation="relu",
bias_attr=fluid.ParamAttr(name="fc_bias"),
num_flatten_dims=1,
)
cost = fluid.layers.square_error_cost(fc_out, label)
avg_cost = paddle.mean(cost)
optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_cost)
cpu = fluid.core.CPUPlace()
loss = exe = fluid.Executor(cpu)
exe.run(fluid.default_startup_program())
fluid.io.save_inference_model("./multi_fc_model", [a.name], [fc_out], exe)
fluid.io.save_inference_model(
"./multi_fc_model",
[a.name],
[fc_out],
exe,
None,
"fc.pdmodel",
"fc.pdiparams",
)
print('output name', fc_out.name)
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid import core, framework
# collect original ops: op which has both inference and grad definition
def get_original_ops():
all_ops, _, _ = core.op_supported_infos('CPU', core.VarDesc.VarType.FP16)
grad_ops = []
original_ops = []
necessary_ops = ["scale"]
for op in all_ops:
if op.endswith("_grad"):
if op.endswith("_grad_grad"):
continue
grad_ops.append(op)
for op in all_ops:
if str(op + "_grad") in grad_ops:
original_ops.append(op)
elif op in necessary_ops:
original_ops.append(op)
print("Grad ops num: " + str(len(grad_ops)))
print("Responded original ops num: " + str(len(original_ops)))
return original_ops
# functions of parsing Paddle Proto
INPUTS = "Inputs"
OUTPUTS = "Outputs"
ATTRS = "Attrs"
COMMENT = "Comment"
DUPLICABLE = "duplicable"
INTERMEDIATE = "intermediate"
DISPENSABLE = "dispensable"
TYPE = "type"
GENERATED = "generated"
DEFAULT_VALUE = "default_value"
EXTRA = "extra"
QUANT = "quant"
def get_attr_default_value(op_name):
return core.get_op_attrs_default_value(op_name.encode())
def get_vars_info(op_vars_proto):
vars_info = {}
for var_proto in op_vars_proto:
name = str(var_proto.name)
vars_info[name] = {}
vars_info[name][DUPLICABLE] = var_proto.duplicable
vars_info[name][DISPENSABLE] = var_proto.dispensable
vars_info[name][INTERMEDIATE] = var_proto.intermediate
vars_info[name][EXTRA] = var_proto.extra
vars_info[name][QUANT] = var_proto.quant
return vars_info
def get_attrs_info(op_proto, op_attrs_proto):
attrs_info = {}
attrs_default_values = get_attr_default_value(op_proto.type)
for attr_proto in op_attrs_proto:
attr_name = str(attr_proto.name)
attrs_info[attr_name] = {}
attrs_info[attr_name][TYPE] = attr_proto.type
attrs_info[attr_name][GENERATED] = attr_proto.generated
attrs_info[attr_name][DEFAULT_VALUE] = (
attrs_default_values[attr_name]
if attr_name in attrs_default_values
else None
)
attrs_info[attr_name][EXTRA] = attr_proto.extra
attrs_info[attr_name][QUANT] = attr_proto.quant
return attrs_info
def get_op_desc(op_proto):
op_info = {}
op_info[INPUTS] = get_vars_info(op_proto.inputs)
op_info[OUTPUTS] = get_vars_info(op_proto.outputs)
op_info[ATTRS] = get_attrs_info(op_proto, op_proto.attrs)
op_info[COMMENT] = op_proto.comment
return op_info
def get_all_ops_desc():
all_op_protos_dict = {}
all_op_protos = framework.get_all_op_protos()
for op_proto in all_op_protos:
op_type = str(op_proto.type)
all_op_protos_dict[op_type] = get_op_desc(op_proto)
return all_op_protos_dict
def generate_all_ops_inputs_outputs_map(op_descs):
# 1. Collect input and output name information of each Op
original_ops_ = get_original_ops()
ops_inputs_map = {}
ops_outputs_map = {}
for op_type, op_proto in op_descs.items():
if op_type not in original_ops_:
continue
inputs = []
outpus = []
for input_ in op_proto[INPUTS]:
if (
not op_proto[INPUTS][input_][EXTRA]
and not op_proto[INPUTS][input_][INTERMEDIATE]
):
inputs.append(input_)
for output_ in op_proto[OUTPUTS]:
if (
not op_proto[OUTPUTS][output_][EXTRA]
and not op_proto[OUTPUTS][output_][INTERMEDIATE]
):
outpus.append(output_)
ops_inputs_map[op_type] = inputs
ops_outputs_map[op_type] = outpus
# 2. Generate Cpp style map str
cpp_style_ops_inputs_map_str = ""
start_ = (
"#include <unordered_map>\n#include <vector>\n#include <string>\n"
+ "const std::unordered_map<std::string, std::unordered_map<std::string, uint8_t>> pd_dialect_inputs_info_map_ = {\n"
)
ops_inputs_str = ""
for ele in ops_inputs_map.items():
op_name = ele[0]
op_inputs = ele[1]
op_inputs_str = "{"
input_idx = 0
for op_input in op_inputs:
op_input_str = (
'{left_brace}"{op_input}", {input_idx}{right_brace}, '.format(
left_brace="{",
op_input=op_input,
input_idx=input_idx,
right_brace="}",
)
)
input_idx = input_idx + 1
op_inputs_str = op_inputs_str + op_input_str
op_inputs_str = op_inputs_str[:-2] + "}"
pair = '{left_brace}"{op_name}", {op_inputs}{right_brace},\n'.format(
left_brace="{",
op_name=op_name,
op_inputs=op_inputs_str,
right_brace="}",
)
ops_inputs_str = ops_inputs_str + " " + pair
ops_inputs_str = ops_inputs_str[:-2]
cpp_style_ops_inputs_map_str = start_ + ops_inputs_str + "\n};"
cpp_style_ops_outputs_map_str = ""
start_ = "const std::unordered_map<std::string, std::unordered_map<std::string, uint8_t>> pd_dialect_outputs_info_map_ = {\n"
ops_outputs_str = ""
for ele in ops_outputs_map.items():
op_name = ele[0]
op_outputs = ele[1]
op_outputs_str = "{"
output_idx = 0
for op_output in op_outputs:
op_output_str = (
'{left_brace}"{op_output}", {output_idx}{right_brace}, '.format(
left_brace="{",
op_output=op_output,
output_idx=output_idx,
right_brace="}",
)
)
output_idx = output_idx + 1
op_outputs_str = op_outputs_str + op_output_str
op_outputs_str = op_outputs_str[:-2] + "}"
pair = '{left_brace}"{op_name}", {op_outputs}{right_brace},\n'.format(
left_brace="{",
op_name=op_name,
op_outputs=op_outputs_str,
right_brace="}",
)
ops_outputs_str = ops_outputs_str + " " + pair
ops_outputs_str = ops_outputs_str[:-2]
cpp_style_ops_outputs_map_str = start_ + ops_outputs_str + "\n};"
# 3. Write to header file
dst_head_file = "../../paddle/infrt/dialect/pd/common/pd_ops_info.h"
with open(dst_head_file, 'w') as ops_inputs_outputs_head_file:
ops_inputs_outputs_head_file.write(cpp_style_ops_inputs_map_str)
ops_inputs_outputs_head_file.write("\n\n")
ops_inputs_outputs_head_file.write(cpp_style_ops_outputs_map_str)
def get_constraint(op_type, op_proto):
# 2.3.1 inputs
constraint = "NoSideEffect"
optional_input_num_ = 0
for input_ in op_proto[INPUTS]:
if (
not op_proto[INPUTS][input_][EXTRA]
and not op_proto[INPUTS][input_][INTERMEDIATE]
and op_proto[INPUTS][input_][DISPENSABLE]
):
optional_input_num_ += 1
if optional_input_num_ > 1:
constraint += ", AttrSizedOperandSegments"
return constraint
# function to generate paddle op dialect file
def convert_op_proto_into_mlir(op_descs):
dst_dialect_file = "../../paddle/infrt/dialect/pd/ir/pd_ops.td"
# 1. Head files
comment_ = "/*===- TableGen'source file -----------------------------------------------===*\\\n\
|* *|\n\
|* Op Definitions *|\n\
|* *|\n\
|* Automatically generated file, do not edit! *|\n\
|* Generated by tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py *|\n\
|* *|\n\
\\*===----------------------------------------------------------------------===*/\n"
lines = [
"#ifndef PD_OPS",
"#define PD_OPS",
"include \"mlir/Interfaces/InferTypeOpInterface.td\"",
"include \"mlir/Interfaces/LoopLikeInterface.td\"",
"include \"mlir/IR/OpBase.td\"",
"include \"paddle/infrt/dialect/pd/ir/pd_op_base.td\"",
"",
]
start_ = comment_ + "\n".join(lines)
with open(dst_dialect_file, 'w') as ops_mlir_file:
ops_mlir_file.write(start_)
# 2. Op dialect
# skip list ( ops whose dialect can not be generated automatically will be recorded here)
skipped_op_list = [
"cos_sim",
"fused_embedding_seq_pool",
"cosh",
"kron",
"recurrent",
"while",
"conditional_block",
"set_value",
"run_program",
]
skipped_attr_list = [
"trainable_statistics",
"use_global_stats",
"is_test",
"use_quantizer",
]
original_ops_ = get_original_ops()
automatically_generated_op_dialect = []
for op_type, op_proto in op_descs.items():
if (op_type in skipped_op_list) or (op_type not in original_ops_):
continue
automatically_generated_op_dialect.append(op_type)
constraint_ = get_constraint(op_type, op_proto)
# 2.1 OpDef
HEAD = 'def PD_{op_type_capitalize}Op : PD_Op<"{op_type}", [{constraint}]> {left_brace}\n'.format(
op_type_capitalize=op_type.capitalize(),
constraint=constraint_,
op_type=op_type,
left_brace="{",
)
SUMMARY = ' let summary = "{} op";\n'.format(op_type)
# 2.2 Description
contents = ""
origin_contents = (op_proto[COMMENT]).split("\n")
for line_ in origin_contents:
contents = contents + " {}\n".format(line_)
DESCRIPTION = " let description = [{left_brace}\n{description} {right_brace}];\n".format(
left_brace="{", description=contents, right_brace="}"
)
# 2.3 arguments info
ARGUMENTS = ""
if len(op_proto[INPUTS]) > 0 or len(op_proto[ATTRS]) > 0:
ARGUMENTS = " let arguments = (ins "
# 2.3.1 inputs
for input_ in op_proto[INPUTS]:
if (
not op_proto[INPUTS][input_][EXTRA]
and not op_proto[INPUTS][input_][INTERMEDIATE]
):
if not op_proto[INPUTS][input_][DISPENSABLE]:
if not op_proto[INPUTS][input_][DUPLICABLE]:
ARGUMENTS = (
ARGUMENTS + " PD_Tensor:$" + input_ + ","
)
else:
ARGUMENTS = (
ARGUMENTS + " PD_Tensor_Array:$" + input_ + ","
)
else:
if not op_proto[INPUTS][input_][DUPLICABLE]:
ARGUMENTS = (
ARGUMENTS
+ " Optional<PD_Tensor>:$"
+ input_
+ ","
)
else:
ARGUMENTS = (
ARGUMENTS
+ " Optional<PD_Tensor_Array>:$"
+ input_
+ ","
)
# unsupported: BLOCK = 8; BLOCKS = 10;
attr_mlir_converter = {
0: 'SI32Attr',
1: 'F32Attr',
2: 'StrAttr',
3: 'I32ArrayAttr',
4: 'F32ArrayAttr',
5: 'StrArrayAttr',
6: 'BoolAttr',
7: 'BoolArrayAttr',
9: 'SI64Attr',
11: 'I64ArrayAttr',
}
# 2.3.2 attributes
for attr in op_proto[ATTRS]:
if (op_proto[ATTRS][attr][EXTRA]) or (
attr in skipped_attr_list
):
continue
if op_proto[ATTRS][attr][DEFAULT_VALUE] is not None:
if op_proto[ATTRS][attr][TYPE] in attr_mlir_converter:
default_value = str(
op_proto[ATTRS][attr][DEFAULT_VALUE]
)
if attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] in [
'I32ArrayAttr',
'F32ArrayAttr',
'StrArrayAttr',
'BoolArrayAttr',
'I64ArrayAttr',
]:
default_value = default_value.replace(
'[', '{'
).replace(']', '}')
if attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] in [
'BoolAttr',
'BoolArrayAttr',
]:
default_value = default_value.lower()
elif attr_mlir_converter[
op_proto[ATTRS][attr][TYPE]
] in ['StrAttr', 'StrArrayAttr']:
default_value = default_value.replace('\'', '\\\"')
if (
attr_mlir_converter[op_proto[ATTRS][attr][TYPE]]
== "StrAttr"
):
default_value = '\\\"' + default_value + '\\\"'
attr_list = (
" DefaultValuedAttr<"
+ attr_mlir_converter[op_proto[ATTRS][attr][TYPE]]
+ ", \""
+ default_value
+ "\">:$"
+ attr
+ ","
)
ARGUMENTS += attr_list
else:
print(
"Error:"
+ op_type
+ ":"
+ attr
+ ":"
+ str(op_proto[ATTRS][attr][TYPE])
)
else:
if op_proto[ATTRS][attr][TYPE] in attr_mlir_converter:
attr_type_ = attr_mlir_converter[
op_proto[ATTRS][attr][TYPE]
]
if attr_type_ in [
'StrAttr',
'I32ArrayAttr',
'F32ArrayAttr',
'StrArrayAttr',
'BoolArrayAttr',
'I64ArrayAttr',
]:
attr_list = attr_type_ + ":$" + attr + ","
ARGUMENTS += attr_list
else:
print(
" ouch Error:"
+ op_type
+ ":"
+ attr
+ ":"
+ str(op_proto[ATTRS][attr][TYPE])
)
ARGUMENTS = ARGUMENTS[:-1] + ");\n"
# 2.4 results info
RESULTS = ""
if len(op_proto[OUTPUTS]) > 0:
outputs = ""
for output_ in op_proto[OUTPUTS]:
if (
not op_proto[OUTPUTS][output_][EXTRA]
and not op_proto[OUTPUTS][output_][INTERMEDIATE]
):
if not op_proto[OUTPUTS][output_][DUPLICABLE]:
outputs = outputs + "PD_Tensor:${},".format(output_)
else:
outputs = outputs + "PD_Tensor_Array:${},".format(
output_
)
RESULTS = "\n let results = (outs {});\n".format(outputs[:-1])
with open(dst_dialect_file, 'a') as ops_mlir_file:
ops_mlir_file.write(HEAD)
ops_mlir_file.write(SUMMARY)
ops_mlir_file.write(DESCRIPTION)
ops_mlir_file.write(ARGUMENTS)
ops_mlir_file.write(RESULTS)
ops_mlir_file.write("}\n")
with open(dst_dialect_file, 'a') as ops_mlir_file:
ops_mlir_file.write("\n#endif // PD_OPS")
print("Skipped ops num: " + str(len(skipped_op_list)))
print(
"Automatically generated op dialects num: "
+ str(len(automatically_generated_op_dialect))
)
if __name__ == "__main__":
all_op_protos_dict = get_all_ops_desc()
generate_all_ops_inputs_outputs_map(all_op_protos_dict)
convert_op_proto_into_mlir(all_op_protos_dict)
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import yaml
from get_compat_kernel_signature import get_compat_kernels_info
# TODO @DannyIsFunny: more attr types need to be supported.
attr_type_converter = {
"int": 'SI32Attr',
"bool": 'BoolAttr',
"int64_t": 'SI64Attr',
"float": 'F32Attr',
"string": 'StrAttr',
"vector<int>": 'I32ArrayAttr',
}
target_type_converter = {"CPU": "CPU", "GPU": "GPU", "Undefined": "UNK"}
layout_type_converter = {
"NCHW": "NCHW",
"NHWC": "NHWC",
"Undefined(AnyLayout)": "ANY",
}
precision_type_converter = {
"uint8": "UINT8",
"int8": "INT8",
"int16": "INT16",
"int32": "INT32",
"int64": "INT64",
"float16": "FLOAT16",
"bfloat16": "BFLOAT16",
"float32": "FLOAT32",
"float64": "FLOAT64",
"complex64": "COMPLEX64",
"complex128": "COMPLEX128",
"bool": "BOOL",
"Undefined": "UNK",
}
kernel_types_info_file = "./kernels.json"
kernel_signature_info_file = "./kernel_signature.json"
skipped_phi_api_list_file = "./skipped_phi_api.json"
def get_skipped_kernel_list():
skiped_kernel_list = []
with open(skipped_phi_api_list_file, 'r') as f:
skiped_api_list = json.load(f)
infer_meta_data = get_api_yaml_info("../../")
for api in infer_meta_data:
if "kernel" not in api or "infer_meta" not in api:
continue
if api["op"] in skiped_api_list["phi_apis"]:
skiped_kernel_list.append(api["kernel"]["func"])
skiped_kernel_list += skiped_api_list["phi_kernels"]
return skiped_kernel_list
def get_api_yaml_info(file_path):
apis = []
with open(file_path + "/paddle/phi/api/yaml/api.yaml", 'r') as f:
api_list = yaml.load(f, Loader=yaml.FullLoader)
if api_list:
apis.extend(api_list)
with open(file_path + "/paddle/phi/api/yaml/legacy_api.yaml", 'r') as f:
legacy_api_list = yaml.load(f, Loader=yaml.FullLoader)
if legacy_api_list:
apis.extend(legacy_api_list)
return apis
def generate_kernel_name(op_name, place_str):
[target_, layout_, precision_] = place_str[1:-1].split(',')
target_ = target_type_converter[target_.strip()]
layout_ = layout_type_converter[layout_.strip()]
precision_ = precision_type_converter[precision_.strip()]
class_name_ = "{}{}".format(
op_name.replace("_", "").title(),
"".join(
[
target_.strip().title(),
precision_.strip(),
layout_.strip().title().title(),
]
),
)
alias_ = "{}.{}".format(
op_name,
".".join([target_.strip(), precision_.strip(), layout_.strip()]),
)
return alias_, class_name_
def generate_attrs_info(op_name, attrs_info):
kernel_attrs_names = {}
attrs_args_ = ""
with open(kernel_signature_info_file) as f:
kernel_attrs_names = json.load(f)
kernel_attrs_names.update(get_compat_kernels_info())
if len(kernel_attrs_names[op_name]["attrs"]) == len(attrs_info):
for index in range(len(attrs_info)):
attr_name = kernel_attrs_names[op_name]["attrs"][index]
attr_type = attr_type_converter[attrs_info[index]]
attrs_args_ += '{type_}:${name_},'.format(
type_=attr_type, name_=attr_name
)
return attrs_args_[:-1]
def generate_inputs_info(input_info):
input_args_ = ""
for index in range(len(input_info)):
[target_, layout_, precision_] = input_info[index].split(',')
# todo: check validity
target_ = target_type_converter[target_.strip()]
layout_ = layout_type_converter[layout_.strip()]
precision_ = precision_type_converter[precision_.strip()]
input_args_ += " DenseTensor<\"{}\",\"{}\",\"{}\">:$in{},".format(
target_.strip(), precision_.strip(), layout_.strip(), str(index)
)
input_args_ = input_args_[:-1]
return input_args_
def generate_arguments_info(op_name, input_info, attr_info):
input_args = generate_inputs_info(input_info)
attr_args = generate_attrs_info(op_name, attr_info)
context_args = "Context:$dev_ctx"
argument_list = (
[context_args] + input_args.split(",") + attr_args.split(",")
)
while "" in argument_list:
argument_list.remove("")
argument_ = ",".join(argument_list)
return "let arguments = (ins {});".format(argument_.strip(","))
def generate_results_info(output_info):
output_args_ = "let results = (outs "
for index in range(len(output_info)):
[target_, layout_, precision_] = output_info[index].split(',')
# todo: check validity
target_ = target_type_converter[target_.strip()]
layout_ = layout_type_converter[layout_.strip()]
precision_ = precision_type_converter[precision_.strip()]
output_args_ += " DenseTensor<\"{}\",\"{}\",\"{}\">:$out{},".format(
target_.strip(), precision_.strip(), layout_.strip(), str(index)
)
return "{});".format(output_args_[:-1])
def generate_supported_kernel_list(load_dict):
supported_kernels_list_ = []
kernel_attrs_names = {}
with open(kernel_signature_info_file) as f:
kernel_attrs_names = json.load(f)
kernel_attrs_names.update(get_compat_kernels_info())
for op_name in load_dict:
kernel_list = load_dict[op_name]
for kernel_info in kernel_list:
for kernel_alias_ in kernel_info:
attributes = kernel_info[kernel_alias_]["attribute"]
flag = True
for attribute in attributes:
if attribute not in attr_type_converter:
flag = False
if flag and op_name in kernel_attrs_names:
supported_kernels_list_.append(op_name)
supported_kernels_list_ = list(set(supported_kernels_list_))
skipped_kernel_list = get_skipped_kernel_list()
for skipped_kernel in skipped_kernel_list:
if skipped_kernel in skipped_kernel_list:
supported_kernels_list_.remove(skipped_kernel)
return supported_kernels_list_
def scan_kernel_info(load_dict):
target_type_ = []
layout_type_ = []
precision_type_ = []
for op_name in load_dict:
kernel_list = load_dict[op_name]
for kernel_info in kernel_list:
for kernel_alias_ in kernel_info:
[target_, layout_, precision_] = kernel_alias_[1:-1].split(',')
target_type_.append(target_.strip())
layout_type_.append(layout_.strip())
precision_type_.append(precision_.strip())
target_type_ = list(set(target_type_))
layout_type_ = list(set(layout_type_))
precision_type_ = list(set(precision_type_))
print(target_type_)
print(layout_type_)
print(precision_type_)
def generate_cpu_kernel_dialect(op_name, kernel_alias_, kernel_info):
alias, class_name = generate_kernel_name(op_name, kernel_alias_)
summary = 'let summary = "{name}";'.format(name=alias)
dialect_name = alias.split(".")
dialect_name = (
dialect_name[0] + "." + dialect_name[2] + "." + dialect_name[3]
)
header = 'def {kernel_name} : PDTCPU_Kernel<"{name}",[NoSideEffect]> {left_brace}'.format(
kernel_name=class_name, name=dialect_name.lower(), left_brace="{"
)
inputs_ = kernel_info["input"]
attributes = kernel_info["attribute"]
arguments = generate_arguments_info(op_name, inputs_, attributes)
outputs = kernel_info["output"]
results = generate_results_info(outputs)
kernel_dialect = '{header_}\n {summary_}\n {arguments_}\n {results_}\n{right_brace}\n'.format(
header_=header,
summary_=summary,
arguments_=arguments,
results_=results,
right_brace="}",
)
return kernel_dialect
def generate_gpu_kernel_dialect(op_name, kernel_alias_, kernel_info):
alias, class_name = generate_kernel_name(op_name, kernel_alias_)
summary = 'let summary = "{name}";'.format(name=alias)
dialect_name = alias.split(".")
dialect_name = (
dialect_name[0] + "." + dialect_name[2] + "." + dialect_name[3]
)
header = 'def {kernel_name} : PDTGPU_Kernel<"{name}",[NoSideEffect]> {left_brace}'.format(
kernel_name=class_name, name=dialect_name.lower(), left_brace="{"
)
inputs_ = kernel_info["input"]
attributes = kernel_info["attribute"]
arguments = generate_arguments_info(op_name, inputs_, attributes)
outputs = kernel_info["output"]
results = generate_results_info(outputs)
kernel_dialect = '{header_}\n {summary_}\n {arguments_}\n {results_}\n{right_brace}\n'.format(
header_=header,
summary_=summary,
arguments_=arguments,
results_=results,
right_brace="}",
)
return kernel_dialect
def generate_dialect_head():
comment_ = "/*===- TableGen'source file -----------------------------------------------===*\\\n\
|* *|\n\
|* Kernel Definitions *|\n\
|* *|\n\
|* Automatically generated file, do not edit! *|\n\
|* Generated by tools/infrt/generate_pten_kernel_dialect.py *|\n\
|* *|\n\
\\*===----------------------------------------------------------------------===*/\n"
includes_ = "#ifndef PTEN_KERNELS\n\
#define PTEN_KERNELS\n\
include \"mlir/Interfaces/InferTypeOpInterface.td\"\n\
include \"mlir/Interfaces/LoopLikeInterface.td\"\n\
include \"mlir/IR/OpBase.td\"\n\
include \"paddle/infrt/dialect/phi/ir/infrt_phi_kernel.td\""
return comment_ + includes_
def get_kernel_target(kernel_alias_):
target = kernel_alias_[1:-1].split(",")
return target[0]
def main():
with open(kernel_types_info_file, "r") as f:
load_dict = json.load(f)
head = generate_dialect_head()
cpu_registry_ = ""
gpu_registry_ = ""
supported_kernels = generate_supported_kernel_list(load_dict)
print("Supported kernels:")
print(supported_kernels)
for op_name in load_dict:
if op_name not in supported_kernels:
continue
kernel_list = load_dict[op_name]
for kernel_info in kernel_list:
for kernel_alias_ in kernel_info:
if get_kernel_target(kernel_alias_) == "CPU":
kernel_registry = generate_cpu_kernel_dialect(
op_name, kernel_alias_, kernel_info[kernel_alias_]
)
cpu_registry_ += kernel_registry
elif get_kernel_target(kernel_alias_) == "GPU":
kernel_registry = generate_gpu_kernel_dialect(
op_name, kernel_alias_, kernel_info[kernel_alias_]
)
gpu_registry_ += kernel_registry
else:
print(
"Unsupported backend:"
+ get_kernel_target(kernel_alias_)
)
end = "#endif // PTEN_KERNELS"
with open(
"../../paddle/infrt/dialect/phi/ir/phi_cpu_kernels.td", "w"
) as dst:
dst.write(
'{start_}\n{dialect_}\n{end_}'.format(
start_=head, dialect_=cpu_registry_, end_=end
)
)
with open(
"../../paddle/infrt/dialect/phi/ir/phi_gpu_kernels.td", "w"
) as dst:
dst.write(
'{start_}\n{dialect_}\n{end_}'.format(
start_=head, dialect_=gpu_registry_, end_=end
)
)
if __name__ == '__main__':
if not os.path.exists(kernel_types_info_file):
print(
"Error: '{file_name}' not exist!".format(
file_name=kernel_types_info_file
)
)
if not os.path.exists(kernel_signature_info_file):
print(
"Error: '{file_name}' not exist!".format(
file_name=kernel_signature_info_file
)
)
if os.path.exists(kernel_types_info_file) and os.path.exists(
kernel_signature_info_file
):
main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
skip_list = ["adam_sig.cc", "adamw_sig.cc"]
def is_grad_kernel(kernel_info):
kernel_name = kernel_info.split(",")[0]
if kernel_name.endswith("_grad"):
return True
return False
def parse_compat_registry(kernel_info):
name, inputs_str, attrs_str, outputs_str = kernel_info.split(",{")
kernel_info = {}
kernel_info["inputs"] = inputs_str[:-1].split(",")
kernel_info["attrs"] = attrs_str[:-1].split(",")
kernel_info["outputs"] = outputs_str[:-1].split(",")
return name, kernel_info
def remove_grad_registry(kernels_registry):
clean_kernel_registry = {}
for registry in kernels_registry:
if "_grad" not in registry:
clean_kernel_registry[registry] = kernels_registry[registry]
return clean_kernel_registry
def get_compat_kernels_info():
kernels_info = {}
compat_files = os.listdir("../../paddle/phi/ops/compat")
for file_ in compat_files:
if ".cc" not in file_:
compat_files.remove(file_)
for file_ in compat_files:
if file_ in skip_list:
continue
with open("../../paddle/phi/ops/compat/" + file_) as in_file:
txt = in_file.readlines()
content = ""
registry = False
for line in txt:
if "KernelSignature(" in line:
content = ""
registry = True
if registry:
content += line
if registry and ";" in line:
data = (
content.replace("\n", "")
.replace(" ", "")
.strip("return")
.strip("KernelSignature(")
.strip(r"\);")
.replace("\"", "")
.replace("\\", "")
)
registry = False
if is_grad_kernel(data):
continue
name, registry_info = parse_compat_registry(data)
if name in kernels_info:
cur_reg = kernels_info[name]
kernels_info[name]["inputs"] = list(
set(
registry_info["inputs"]
+ kernels_info[name]["inputs"]
)
)
kernels_info[name]["attrs"] = list(
set(
registry_info["attrs"]
+ kernels_info[name]["attrs"]
)
)
kernels_info[name]["outputs"] = list(
set(
registry_info["outputs"]
+ kernels_info[name]["outputs"]
)
)
else:
kernels_info[name] = registry_info
compat_registry_ = remove_grad_registry(kernels_info)
return compat_registry_
#!/usr/bin/env bash
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=================================================
# Utils
#=================================================
set -e
#step 1:get kernel registered info
# The shell script has some problem when register with macro, such as in `activation_kernel.c*`
kernel_register_info_file=`mktemp`
PADDLE_ROOT="$( cd "$( dirname "$0" )/../../" && pwd )"
unset GREP_OPTIONS && find ${PADDLE_ROOT}/paddle/phi/kernels -name "*.c*" | grep -v "activation_kernel.c*" \
| xargs sed -e '/PD_REGISTER_\(GENERAL_\)\?KERNEL(/,/)/!d' \
| awk 'BEGIN { RS="{" }{ gsub(/\n /,""); print $0 }' \
| grep PD_REGISTER \
| awk -F ",|\(|\)" '{gsub(/ /,"");$1="";print}' \
| sort -u | awk '{gsub(/phi::/,"");gsub(/paddle::platform::/,"");gsub(/dtype::/,"");gsub(/paddle::/,"");print $0}' \
| grep -v "_grad" > $kernel_register_info_file
# handle `activation_kernel.cc` case by case.
find ${PADDLE_ROOT}/paddle/phi/kernels -name "activation_kernel.cc" | xargs sed -e '/PD_REGISTER_KERNEL(relu/,/)/!d' \
| awk 'BEGIN { RS="{" }{ gsub(/\n /,""); print $0 }' | grep PD_REGISTER_KERNEL \
| awk -F ",|\(|\)" '{gsub(/ /,"");$1="";print}' \
| sort -u | awk '{gsub(/phi::/,"");gsub(/paddle::platform::/,"");gsub(/dtype::/,"");gsub(/paddle::/,"");print $0}' \
| grep -v "_grad" >> $kernel_register_info_file
act_temp=$(find ${PADDLE_ROOT}/paddle/phi/kernels -name "activation_kernel.cc" | xargs sed -e '/PD_REGISTER_KERNEL(name/,/)/!d' \
| awk 'BEGIN { RS="{" }{ gsub(/\n /,""); print $0 }' | grep -E "PD_REGISTER_(GENERAL_)?KERNEL" \
| awk -F ",|\(|\)" '{gsub(/ /,"");gsub(/\\/,"");$1="";print}' | sort -u \
| awk '{gsub(/phi::/,"");gsub(/paddle::platform::/,"");gsub(/dtype::/,"");gsub(/paddle::/,"");print $0}' \
| grep -v "_grad")
all_act_arg=$(find ${PADDLE_ROOT}/paddle/phi/kernels -name "activation_kernel.cc" | xargs sed -e '/PD_REGISTER_ACTIVATION_KERNEL(/,/)/!d' | grep -v '#define' | grep PD_REGISTER_ACTIVATION_KERNEL | awk -F "\(|\)" '{gsub(/ /,"");$1="";print}' | sed -e 's/[ \t]*$//g')
for act in $all_act_arg
do
name=${act%,*}
kernel=$(echo ${act#*,} | sed -e 's/\r//g')
tmp=${act_temp/name/${name}}
echo "${tmp/func/${kernel}}" >> $kernel_register_info_file
done
# TODO(wilber): We just support cuda, not support rocm.
# handle `activation_kernel.cu` which register with macro.
# - process relu kernel.
find ${PADDLE_ROOT}/paddle/phi/kernels -name "activation_kernel.cu" | xargs sed -e '/PD_REGISTER_KERNEL(relu/,/)/!d' \
| awk 'BEGIN { RS="{" }{ gsub(/\n /,""); print $0 }' | awk 'NR>2' | grep PD_REGISTER \
| awk -F ",|\(|\)" '{gsub(/ /,"");$1="";print}' \
| sort -u | awk '{gsub(/phi::/,"");gsub(/paddle::platform::/,"");gsub(/dtype::/,"");gsub(/paddle::/,"");print $0}' \
| grep -v "_grad" >> $kernel_register_info_file
# - process PD_REGISTER_ACTIVATION_KERNEL kernels.
act_temp=$(find ${PADDLE_ROOT}/paddle/phi/kernels -name "activation_kernel.cu" | xargs sed -e '/PD_REGISTER_KERNEL(name/,/)/!d' \
| awk 'BEGIN { RS="{" }{ gsub(/\n /,""); print $0 }' | grep PD_REGISTER \
| awk -F ",|\(|\)" '{gsub(/ /,"");gsub(/\\/,"");$1="";print}' | sort -u \
| awk '{gsub(/phi::/,"");gsub(/paddle::platform::/,"");gsub(/dtype::/,"");gsub(/paddle::/,"");print $0}' \
| grep -v "_grad")
all_act_arg=$(find ${PADDLE_ROOT}/paddle/phi/kernels -name "activation_kernel.cu" | xargs sed -e '/PD_REGISTER_ACTIVATION_KERNEL(/,/)/!d' | grep -v '#define' | grep PD_REGISTER_ACTIVATION_KERNEL | awk -F "\(|\)" '{gsub(/ /,"");$1="";print}' | sed -e 's/[ \t]*$//g')
for act in $all_act_arg
do
name=${act%,*}
kernel=$(echo ${act#*,} | sed -e 's/\r//g')
tmp=${act_temp/name/${name}}
echo "${tmp/func/${kernel}}" >> $kernel_register_info_file
done
#step 2:get simple general inferMeta function wrap info
temp_path=`mktemp -d`
python3 ${PADDLE_ROOT}/paddle/phi/api/yaml/generator/wrapped_infermeta_gen.py \
--api_yaml_path ${PADDLE_ROOT}/paddle/phi/api/yaml/api.yaml ${PADDLE_ROOT}/paddle/phi/api/yaml/legacy_api.yaml \
--wrapped_infermeta_header_path ${temp_path}/generate.h \
--wrapped_infermeta_source_path ${temp_path}/generate.cc
find ${PADDLE_ROOT}/paddle/phi/ -name "*.cc" | xargs grep PD_REGISTER_INFER_META_FN ${temp_path}/generate.cc \
| awk -F "\(|,|::|\)" '{print $2, $4}' > ${temp_path}/wrap_info.txt
#step 3:get ir's attr_name.
ir_attr_name_info_file=`mktemp`
# phi_cpu attr
all_ir_name=`grep -Eo "PDTCPU_Kernel<.*\"" ${PADDLE_ROOT}/paddle/infrt/dialect/phi/ir/phi_cpu_kernels.td | awk -v FS="<" '{gsub(/\"/,"");print $2}'`
for ir in $all_ir_name
do
attr_name=`grep "<\"$ir" -A 3 ${PADDLE_ROOT}/paddle/infrt/dialect/phi/ir/phi_cpu_kernels.td | grep -Eo "Attr:.*)" \
| awk '{gsub(/F32Attr/,"");gsub(/F64Attr/,"");gsub(/StrAttr/,"");gsub(/BoolAttr/,""); \
gsub(/SI1Attr/,"");gsub(/SI8Attr/,"");gsub(/SI16Attr/,"");gsub(/SI32Attr/,"");gsub(/SI64Attr/,""); \
gsub(/UI1Attr/,"");gsub(/UI8Attr/,"");gsub(/I16Attr/,"");gsub(/I32Attr/,"");gsub(/I64Attr/,""); \
gsub(/I1Attr/,"");gsub(/I8Attr/,"");gsub(/UI16Attr/,"");gsub(/UI32Attr/,"");gsub(/UI64Attr/,""); \
gsub(/I32ArrayAttr/,"");gsub(/SI32ArrayAttr/,""); \
gsub(/Attr/,"");gsub(/\)/,""); \
gsub(/[,:]/,"");print $a}'`
echo phi_cpu.$ir $attr_name >> $ir_attr_name_info_file
done
# phi_gpu attr
all_ir_name=`grep -Eo "PDTGPU_Kernel<.*\"" ${PADDLE_ROOT}/paddle/infrt/dialect/phi/ir/phi_gpu_kernels.td | awk -v FS="<" '{gsub(/\"/,"");print $2}'`
for ir in $all_ir_name
do
attr_name=`grep "<\"$ir" -A 3 ${PADDLE_ROOT}/paddle/infrt/dialect/phi/ir/phi_gpu_kernels.td | grep -Eo "Attr:.*)" \
| awk '{gsub(/F32Attr/,"");gsub(/F64Attr/,"");gsub(/StrAttr/,"");gsub(/BoolAttr/,""); \
gsub(/SI1Attr/,"");gsub(/SI8Attr/,"");gsub(/SI16Attr/,"");gsub(/SI32Attr/,"");gsub(/SI64Attr/,""); \
gsub(/UI1Attr/,"");gsub(/UI8Attr/,"");gsub(/I16Attr/,"");gsub(/I32Attr/,"");gsub(/I64Attr/,""); \
gsub(/I1Attr/,"");gsub(/I8Attr/,"");gsub(/UI16Attr/,"");gsub(/UI32Attr/,"");gsub(/UI64Attr/,""); \
gsub(/I32ArrayAttr/,"");gsub(/SI32ArrayAttr/,""); \
gsub(/Attr/,"");gsub(/\)/,"") \
gsub(/[,:]/,"");print $a}'`
echo phi_gpu.$ir $attr_name >> $ir_attr_name_info_file
done
#step 4: merge all infos
# @input1 => phi kernel infomation : kernel_name kernel_key(GPU/CPU, precision, layout)
# @input2 => information from api.yaml : kernel_name kernel_function_name inferMeta_function_name
# @input3 => information from wrapped_infermeta_gen : ensure the inferMeta function has
# same signature with kernel function
python3 ${PADDLE_ROOT}/tools/infrt/get_phi_kernel_info.py \
--paddle_root_path ${PADDLE_ROOT} \
--kernel_info_file $kernel_register_info_file \
--infermeta_wrap_file ${temp_path}/wrap_info.txt \
--attr_info_file $ir_attr_name_info_file \
--generate_file ${PADDLE_ROOT}/paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.cc
#!/bin/python
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from typing import Dict, List
import yaml
skipped_phi_api_list_file = "/tools/infrt/skipped_phi_api.json"
api_yaml_file = "/paddle/phi/api/yaml/api.yaml"
legacy_api_yaml_file = "/paddle/phi/api/yaml/legacy_api.yaml"
def get_skipped_kernel_list():
skiped_kernel_list = []
with open(skipped_phi_api_list_file, 'r') as f:
skiped_api_list = json.load(f)
infer_meta_data = []
api_meta_data = get_api_yaml_info(api_yaml_file)
legacy_api_meta_data = get_api_yaml_info(legacy_api_yaml_file)
if api_meta_data:
infer_meta_data.extend(api_meta_data)
if legacy_api_meta_data:
infer_meta_data.extend(legacy_api_meta_data)
for api in infer_meta_data:
if "kernel" not in api or "infer_meta" not in api:
continue
if api["op"] in skiped_api_list["phi_apis"]:
skiped_kernel_list.append(api["kernel"]["func"])
skiped_kernel_list += skiped_api_list["phi_kernels"]
return skiped_kernel_list
def parse_args():
parser = argparse.ArgumentParser("gather phi kernel and infermeta info")
parser.add_argument(
"--paddle_root_path",
type=str,
required=True,
help="root path of paddle src[WORK_PATH/Paddle].",
)
parser.add_argument(
"--kernel_info_file",
type=str,
required=True,
help="kernel info file generated by get_phi_kernel_function.sh.",
)
parser.add_argument(
"--infermeta_wrap_file",
type=str,
required=True,
help="inferMeta wrap info file.",
)
parser.add_argument(
"--attr_info_file", type=str, required=True, help="attr info file."
)
parser.add_argument(
"--generate_file",
type=str,
required=True,
default="../paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.cc",
help="generated file.",
)
args = parser.parse_args()
return args
def get_api_yaml_info(file_path):
f = open(file_path, "r")
cont = f.read()
return yaml.load(cont, Loader=yaml.FullLoader)
def get_kernel_info(file_path):
f = open(file_path, "r")
cont = f.readlines()
ret = []
prev = []
for line in cont:
info = line.strip().split()
if not info:
continue
if len(prev) == 0:
ret.append(line.strip())
prev = info
continue
if prev[0] == info[0] and prev[1] == info[1]:
ret.pop()
ret.append(line.strip())
prev = info
return ret
def get_infermeta_info(file_path):
f = open(file_path, "r")
cont = f.readlines()
return [l.strip() for l in cont if l.strip() != ""]
def get_attr_info(file_path):
"""
phi_gpu.argsort.float64.any $axisBool$descending
"""
ret = {}
with open(file_path, 'r') as f:
cont = f.readlines()
for l in cont:
datas = l.strip().split(' ')
if len(datas) == 2:
attrs = datas[1].split('$')
ret[datas[0]] = attrs[1:]
else:
ret[datas[0]] = None
return ret
def merge(infer_meta_data, kernel_data, wrap_data):
meta_map = {}
for api in infer_meta_data:
if "kernel" not in api or "infer_meta" not in api:
continue
meta_map[api["kernel"]["func"]] = api["infer_meta"]["func"]
wrap_map = {}
for l in wrap_data:
wrap_map[l.split()[0]] = l.split()[1]
full_kernel_data = []
for l in kernel_data:
key = l.split()[0]
if key in wrap_map:
full_kernel_data.append((l + " " + wrap_map[key]).split())
elif key in meta_map:
full_kernel_data.append((l + " " + meta_map[key]).split())
else:
full_kernel_data.append((l + " unknown").split())
return full_kernel_data
def gen_warn_info():
return """// Generated by tools/infrt/gen_phi_kernel_register.py for infrt.
// DO NOT edit or include it within paddle.
"""
def gen_include_headers():
return """
#include "paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.h"
#include "paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h"
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/include/kernels.h"
#include "paddle/phi/include/infermeta.h"
#include "paddle/phi/infermeta/generated.h"
"""
def gen_namespace():
return (
"""
namespace infrt {
namespace kernel {
""",
"""
} // namespace kernel
} // namespace infrt
""",
)
def gen_context(val):
if val == "CPU":
return "::phi::CPUContext", "phi_cpu"
elif val == "GPU":
return "::phi::GPUContext", "phi_gpu"
# elif val == "XPU":
# return "::phi::XPUContext", "phi_xpu"
else:
# raise Exception(f"Unknown context type {val}")
return "", ""
def gen_layout(val):
if val == "ALL_LAYOUT":
return 'any'
else:
# TODO(wilber): now only process ALL_LAYOUT
raise Exception(f"Unknown layout type {val}")
def gen_kernel_func(val, ctx_name, dtype_name):
if '<' in val and '>' in val:
st = val.index('<')
ed = val.index('>')
func_name = val[:st]
template_name = val[st + 1 : ed]
if '::phi::' in template_name:
return "&::phi::" + val
else:
return "&::phi::" + func_name + "<::phi::" + template_name + ">"
else:
return "&::phi::" + val + "<" + dtype_name + ", " + ctx_name + ">"
def gen_dtype(vals: List[str]):
ir_dtypes, origin_dtypes = [], []
for val in vals:
if val == "float":
ir_dtypes.append("float32")
origin_dtypes.append("float")
elif val == "double":
ir_dtypes.append("float64")
origin_dtypes.append("double")
elif val == "float16":
ir_dtypes.append("float16")
origin_dtypes.append("phi::float16")
elif val == "bfloat16":
ir_dtypes.append("bf16")
origin_dtypes.append("phi::bfloat16")
elif val == "bool":
ir_dtypes.append("bool")
origin_dtypes.append("bool")
elif val == "int8_t":
ir_dtypes.append("int8")
origin_dtypes.append("int8_t")
elif val == "uint8_t":
ir_dtypes.append("uint8")
origin_dtypes.append("uint8_t")
elif val == "int16_t":
ir_dtypes.append("int16")
origin_dtypes.append("int16_t")
elif val == "int" or val == "int32_t":
ir_dtypes.append("int32")
origin_dtypes.append("int32_t")
elif val == "int64_t":
ir_dtypes.append("int64")
origin_dtypes.append("int64_t")
elif val == "complex<float>" or val == "complex64":
ir_dtypes.append("complex64")
origin_dtypes.append("phi::complex64")
elif val == "complex<double>" or val == "complex128":
ir_dtypes.append("complex128")
origin_dtypes.append("phi::complex128")
elif val == "pstring":
ir_dtypes.append("pstring")
origin_dtypes.append("phi::pstring")
elif val == "ALL_DTYPE":
ir_dtypes.append("all")
origin_dtypes.append("all")
else:
if "VA_ARGS" in val:
continue
raise Exception(f"Unknown data type {val}")
return ir_dtypes, origin_dtypes
# Note: Now only process CPUContext and GPUContext.
def gen_register_code_info(item: List[str], attr_data: Dict[str, List[str]]):
"""
item: ['add', 'CPU', 'ALL_LAYOUT', 'AddKernel', 'float', 'double', '...'(varaidic types), 'ElementwiseInferMeta']
attr_data: {'phi_cpu.arg_min.float32.any': ['axisBool', 'keepdimsBool', 'flatten', 'dtype']}
"""
ctx_name, ir_ctx_name = gen_context(item[1])
if ctx_name == "":
return ""
item[2] = gen_layout(item[2])
ir_dtypes, origin_dtypes = gen_dtype(item[4:-1])
infer_shape_func = "&::phi::" + item[-1]
res = ""
if item[-1] == "unknown":
# TODO(wilber): handle the unknown inferShape func.
return ""
skipped_kernel_list = get_skipped_kernel_list()
for ir_dtype, origin_dtype in zip(ir_dtypes, origin_dtypes):
kernel_func = gen_kernel_func(item[3], ctx_name, origin_dtype)
if item[0].lower() in skipped_kernel_list:
continue
ir_name = (
ir_ctx_name
+ '.'
+ item[0].lower()
+ '.'
+ ir_dtype
+ '.'
+ item[2].lower()
)
if ir_name in attr_data.keys() and attr_data[ir_name] is not None:
attr_names = ', '.join(
["\"" + a + "\"" for a in attr_data[ir_name]]
)
res += f"""
registry->AddKernel("{ir_name}","""
res += f"""
&KernelLauncherFunc<decltype({kernel_func}),
{kernel_func},
decltype({infer_shape_func}),
{infer_shape_func}>,
{{{attr_names}}});
"""
else:
res += f"""
registry->AddKernel("{ir_name}","""
res += f"""
&KernelLauncherFunc<decltype({kernel_func}),
{kernel_func},
decltype({infer_shape_func}),
{infer_shape_func}>);
"""
return res
def gen_register_info(
resources: List[List[str]], attr_data: Dict[str, List[str]]
):
"""
resources: [['add', 'CPU', 'ALL_LAYOUT', 'AddKernel', 'float', 'double', '...'(varaidic types), 'ElementwiseInferMeta'], ...]
attr_data: {'phi_cpu.arg_min.float32.any': ['axisBool', 'keepdimsBool', 'flatten', 'dtype']}
"""
res = "void RegisterInferShapeLaunchers(host_context::KernelRegistry* registry) {"
# register cpu kernels.
for item in resources:
# The output string is polluted by C++ macros, here the \ is removed
update_item = [v.strip('\\') for v in item]
if update_item[1] != "CPU":
continue
code = gen_register_code_info(item, attr_data)
if code == "":
continue
res += code
# register gpu kernels.
res += "\n#ifdef INFRT_WITH_GPU"
for item in resources:
# The output string is polluted by C++ macros, here the \ is removed
update_item = [v.strip('\\') for v in item]
if update_item[1] != "GPU":
continue
code = gen_register_code_info(item, attr_data)
if code == "":
continue
res += code
res += "#endif // INFRT_WITH_GPU"
res += "\n}"
return res
def gen_phi_kernel_register_code(
resources: List[List[str]],
attr_data: Dict[str, List[str]],
src_file_path: str,
):
source_file = open(src_file_path, 'w')
source_file.write(gen_warn_info())
source_file.write(gen_include_headers())
namespace = gen_namespace()
source_file.write(namespace[0])
source_file.write(gen_register_info(resources, attr_data))
source_file.write(namespace[1])
source_file.close()
if __name__ == "__main__":
args = parse_args()
skipped_phi_api_list_file = (
args.paddle_root_path + skipped_phi_api_list_file
)
api_yaml_file = args.paddle_root_path + api_yaml_file
legacy_api_yaml_file = args.paddle_root_path + legacy_api_yaml_file
infer_meta_data = []
api_meta_data = get_api_yaml_info(api_yaml_file)
legacy_api_meta_data = get_api_yaml_info(legacy_api_yaml_file)
if api_meta_data:
infer_meta_data.extend(api_meta_data)
if legacy_api_meta_data:
infer_meta_data.extend(legacy_api_meta_data)
kernel_data = get_kernel_info(args.kernel_info_file)
info_meta_wrap_data = get_infermeta_info(args.infermeta_wrap_file)
attr_data = get_attr_info(args.attr_info_file)
out = merge(infer_meta_data, kernel_data, info_meta_wrap_data)
gen_phi_kernel_register_code(out, attr_data, args.generate_file)
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
skip_list = []
def remove_grad_kernel(kernels):
clean_kernels = []
for kernel_ in kernels:
if "_grad" not in kernel_:
clean_kernels.append(kernel_)
return clean_kernels
CPU_KERNEL_REGISTER = "REGISTER_OP_CPU_KERNEL("
GPU_KERNEL_REGISTER = "REGISTER_OP_CUDA_KERNEL("
XPU_KERNEL_REGISTER = "REGISTER_OP_XPU_KERNEL("
def get_compat_kernels_info(register):
kernels_info = {}
kernel_names = []
for dirpath, dirnames, filenames in os.walk("../../paddle/fluid/operators"):
for file_name in filenames:
if ".cc" not in file_name:
continue
with open(os.path.join(dirpath, file_name)) as f:
txt = f.readlines()
content = ""
registry = False
is_macro_definition = False
for line in txt:
if line.strip().startswith(
"#define"
) and line.strip().endswith("\\"):
is_macro_definition = True
continue
if is_macro_definition:
if not line.strip().endswith("\\"):
is_macro_definition = False
continue
if register in line:
content = ""
registry = True
if registry:
content += line
if registry and ";" in line:
kernel_name = (
content.replace("\n", "")
.replace(" ", "")
.strip(register)
.split(",")
)
registry = False
kernel_names.append(kernel_name[0])
return remove_grad_kernel(kernel_names)
def show_kernel_statistics(backend, kernels):
print("=== kernels statistics === ")
print(
"the number of " + backend + " kernels is: " + str(len(kernels)) + "\n"
)
print(kernels)
print("\n")
def show_pass_statistics(backend, passes):
print("=== Passes Statistics === ")
print("The number of " + backend + " passes is: " + str(len(passes)) + "\n")
print(passes)
print("\n")
def get_passes_info(register):
pass_registry_func = ""
with open("../../paddle/fluid/inference/api/paddle_pass_builder.cc") as f:
txt = f.readlines()
stack = []
registry_fun_found = False
for line in txt:
if line.strip().startswith("//"):
continue
if register in line:
registry_fun_found = True
if registry_fun_found:
pass_registry_func += line
if registry_fun_found:
for char in line:
if char == "{":
stack.append(char)
if char == "}":
stack.pop()
if len(stack) == 0:
registry_fun_found = False
pass_list = re.findall("\"(.+?)_pass\"", pass_registry_func)
return pass_list
if __name__ == "__main__":
cpu_kernels = get_compat_kernels_info(CPU_KERNEL_REGISTER)
gpu_kernels = get_compat_kernels_info(GPU_KERNEL_REGISTER)
xpu_kernels = get_compat_kernels_info(XPU_KERNEL_REGISTER)
show_kernel_statistics("CPU", cpu_kernels)
show_kernel_statistics("GPU", gpu_kernels)
show_kernel_statistics("XPU", xpu_kernels)
cpu_passes = get_passes_info("CpuPassStrategy::CpuPassStrategy()")
gpu_passes = get_passes_info("GpuPassStrategy::GpuPassStrategy()")
show_pass_statistics("CPU", cpu_passes)
show_pass_statistics("GPU", gpu_passes)
{
"phi_apis":["conj", "deformable_conv", "dropout", "expand_as", "nll_loss", "psroi_pool", "roi_align", "roi_pool", "label_smooth", "layer_norm", "instance_norm", "group_norm"],
"phi_kernels":["equal_all"]
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册