未验证 提交 a1addeef 编写于 作者: Y Yan Chunwei 提交者: GitHub

INFRT/add LLVM lit to infrt (#39149)

上级 4748486e
......@@ -39,3 +39,10 @@ model_test
Testing
tools/__pycache__
# This file is automatically generated.
# TODO(zhiqiang) Move this file to build directory.
paddle/infrt/dialect/pd_ops.td
.lit_test_times.txt
paddle/infrt/tests/dialect/Output
paddle/infrt/tests/lit.cfg.py
......@@ -76,6 +76,7 @@ add_subdirectory(tensor)
add_subdirectory(support)
add_subdirectory(external_kernels)
add_subdirectory(paddle)
add_subdirectory(tests)
# MLIR td file generations
......@@ -86,13 +87,14 @@ set(infrt_mlir_incs
tensor_shape_inc
dense_tensor_inc
pd_ops_inc
pd_extra_ops_inc
rewrite_inc
trt_ops_inc
)
message(STATUS "infrt srcs:\n${infrt_src}")
cc_library(infrt SHARED SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto pten dense_tensor)
cc_library(infrt_static SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto pten dense_tensor)
cc_library(infrt SHARED SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto)
cc_library(infrt_static SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto)
add_dependencies(infrt ${infrt_mlir_incs})
add_custom_target(test_infrt_exec DEPENDS ${INFRT_TEST_TARGETS})
......@@ -20,6 +20,7 @@ mlir_tablegen_on(tensor_shape DIALECT ts)
mlir_tablegen_on(dense_tensor DIALECT dt)
mlir_tablegen_on(pd_op_base DIALECT pd)
mlir_tablegen_on(pd_ops)
mlir_tablegen_on(pd_extra_ops)
mlir_add_rewriter(rewrite)
# TODO(Superjomn) add a cmake function cc_executable to ecapsulate the following code
......@@ -30,22 +31,6 @@ add_executable(print-ir print_ir.cc)
target_link_libraries(print-ir infrt ${mlir_libs})
add_dependencies(print-ir pd_ops_inc)
# MLIR opt tests
# %{
set(infrt_opt_path ${CMAKE_CURRENT_BINARY_DIR}/infrtopt)
add_test(test_infrt_mlir_opt_on_basic ${infrt_opt_path} ${CMAKE_CURRENT_SOURCE_DIR}/mlir_tests/basic.mlir)
add_test(test_infrt_mlir_opt_on_tensor_shape ${infrt_opt_path} ${CMAKE_CURRENT_SOURCE_DIR}/mlir_tests/tensor_shape.mlir)
add_test(test_infrt_mlir_opt_on_paddle_op ${infrt_opt_path} ${CMAKE_CURRENT_SOURCE_DIR}/mlir_tests/paddle_ops.mlir)
# %}
cc_test_tiny(test_infrt_mlir_loader SRCS mlir_loader_test.cc DEPS infrt ${MLIR_IR_LIBS})
# execute mlir and run FileCheck
infrt_exec_check(test_infrt_tensor_type mlir_tests/tensor_type.mlir)
infrt_exec_check(test_infrt__basic mlir_tests/basic.mlir)
infrt_exec_check(test_infrt_benchmark mlir_tests/benchmark.mlir)
infrt_exec_check(test_infrt_mlir_dense_tensor mlir_tests/dense_tensor.mlir)
add_subdirectory(tensorrt)
#ifndef PD_EXTRA_OPS
#define PD_EXTRA_OPS
include "mlir/Interfaces/InferTypeOpInterface.td"
include "mlir/Interfaces/LoopLikeInterface.td"
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/pd_op_base.td"
def PD_FusedFC : PD_Op<"FC", [NoSideEffect]> {
let summary = "Computes the Fully Connected result of two tensors";
let description = [{
}];
let arguments = (ins PD_Tensor:$input, PD_Tensor:$w, PD_Tensor:$bias, DefaultValuedAttr<I32Attr, "1">:$in_num_col_dims);
let results = (outs PD_Tensor:$out);
}
#endif
......@@ -20,6 +20,8 @@
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_extra_ops.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/rewrite.hpp.inc" // NOLINT
......@@ -31,8 +33,10 @@ PaddleDialect::PaddleDialect(MLIRContext *context)
addOperations<
#define GET_OP_LIST
#include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT
,
#define GET_OP_LIST
#include "paddle/infrt/dialect/pd_extra_ops.cpp.inc" // NOLINT
>();
#undef GET_OP_LIST
}
mlir::Operation *PaddleDialect::materializeConstant(mlir::OpBuilder &builder,
......@@ -81,11 +85,14 @@ LogicalResult ElementwiseAdd::inferReturnTypes(
inferredReturnTypes.push_back(operands[0].getType());
return success();
}
void ElementwiseAdd::getCanonicalizationPatterns(
*/
void Elementwise_addOp::getCanonicalizationPatterns(
mlir::OwningRewritePatternList &results, mlir::MLIRContext *context) {
results.insert<FuseMulAdd>(context);
}
/*
mlir::OpFoldResult ElementwiseAdd::fold(
llvm::ArrayRef<mlir::Attribute> operands) {
if (getElementTypeOrSelf(getType()).isa<FloatType>()) {
......
......@@ -57,3 +57,5 @@ class PaddleDialect : public Dialect {
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_ops.hpp.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_extra_ops.hpp.inc"
......@@ -4,7 +4,8 @@
include "paddle/infrt/dialect/infrt_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/pd_ops.td"
/*
include "paddle/infrt/dialect/pd_extra_ops.td"
//===----------------------------------------------------------------------===//
// This is to fuse the composition: 'Matmul o ElementwiseAdd' into 'PD_FusedFC'.
//
......@@ -23,10 +24,11 @@ include "paddle/infrt/dialect/pd_ops.td"
// 1. Make the constrait more completely.
// 2. Consider the case of : out = bias + z
//===----------------------------------------------------------------------===//
def FuseMulAdd : Pat<(PD_ElementwiseAdd (PD_MatmulOp $x, $y, ConstBoolAttrFalse:$_, ConstBoolAttrFalse:$_, $alpha), $bias, $axis),
def FuseMulAdd : Pat<(PD_Elementwise_addOp (PD_Matmul_v2Op $x, $y, ConstBoolAttrFalse:$_, ConstBoolAttrFalse:$_), $bias, $axis),
(PD_FusedFC $x, $y, $bias, (INFRT_createI32Attr<"1">))>;
/*
//===----------------------------------------------------------------------===//
// This is to fuse the composition: 'FusedFC o Relu' into 'FusedRepeatedFCRelu'.
//
......
......@@ -9,5 +9,5 @@ message(STATUS "basic_mlir: ${basic_mlir}")
message(STATUS "external_kernels_lib: ${external_kernels_lib}")
add_test(
NAME run_and_check_external_kernels
COMMAND sh -c "${CMAKE_BINARY_DIR}/infrt/host_context/infrt-exec -i ${basic_mlir} --shared_libs=${external_kernels_lib} | ${LLVM_PATH}/bin/FileCheck ${basic_mlir}"
COMMAND sh -c "${CMAKE_BINARY_DIR}/infrt/host_context/infrtexec -i ${basic_mlir} --shared_libs=${external_kernels_lib} | ${LLVM_PATH}/bin/FileCheck ${basic_mlir}"
)
......@@ -21,9 +21,5 @@ cc_test_tiny(test_infrt_op_executable SRCS op_executable_test.cc DEPS infrt ${ML
cc_test_tiny(test_infrt_core_runtime SRCS core_runtime_test.cc DEPS infrt ${MLIR_IR_LIBS})
cc_test_tiny(test_infrt_mlir_to_runtime_translate SRCS mlir_to_runtime_translate_test.cc DEPS infrt ${MLIR_IR_LIBS})
infrt_exec_check(test_infrt_mlir_exec_on_basic mlir_tests/basic.mlir)
infrt_exec_check(test_infrt_mlir_exec_on_shape mlir_tests/shape.mlir)
infrt_exec_check(test_infrt_mlir_exec_on_dense_tensor mlir_tests/dense_tensor.mlir)
add_executable(infrt-exec mlir_exec.cc)
target_link_libraries(infrt-exec infrt ${MLIR_IR_LIBS})
add_executable(infrtexec mlir_exec.cc)
target_link_libraries(infrtexec infrt ${MLIR_IR_LIBS})
......@@ -29,9 +29,6 @@
#include "paddle/infrt/tensor/tensor_map.h"
#include "paddle/infrt/tensor/tensor_shape.h"
#include "paddle/pten/backends/cpu/cpu_context.h"
#include "paddle/pten/core/dense_tensor.h"
namespace infrt {
namespace host_context {
......@@ -48,8 +45,8 @@ using ValueVariantType = Variant<int16_t,
tensor::DenseHostTensor,
MlirFunctionExecutable*,
tensor::TensorMap,
pten::CPUContext,
pten::DenseTensor,
// pten::CPUContext,
// pten::DenseTensor,
std::vector<int16_t>,
std::vector<int32_t>,
std::vector<int64_t>,
......
......@@ -2,7 +2,7 @@ core_gather_headers()
gather_srcs(infrt_src SRCS
basic_kernels.cc
pten_kernels.cc
# pten_kernels.cc
test_kernels.cc
tensor_shape_kernels.cc
tensor_kernels.cc
......
......@@ -12,14 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/kernel/pten_kernels.h"
#include <iostream>
#include <string>
#include "paddle/infrt/host_context/kernel_registry.h"
#include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/infrt/kernel/pten_kernels.h"
#include "paddle/pten/backends/cpu/cpu_context.h"
#include "paddle/pten/kernels/math_kernel.h"
// Disable temporarily.
// #include "paddle/pten/backends/cpu/cpu_context.h"
// #include "paddle/pten/kernels/math_kernel.h"
using infrt::host_context::Attribute;
......
configure_file(lit.cfg.py.in "${CMAKE_SOURCE_DIR}/paddle/infrt/tests/lit.cfg.py")
add_test(NAME test_infrt_by_lit COMMAND sh -c "lit -v ${CMAKE_SOURCE_DIR}/paddle/infrt/tests --filter-out \"disabled_*\""
DEPENDS infrtopt infrtexec)
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: @basic_f32
func @basic_f32() -> f32 {
%v0 = infrt.constant.f32 1.0
......
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: @benchmark
func @benchmark() {
// CHECK-LABEL: BM:add.f32:Count: 3
......
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: dense_shape0
func @dense_shape0() {
%shape = ts.build_shape [1:i64, 57:i64]
......
// RUN: infrtopt %s | FileCheck %s
// CHECK-LABEL: @ops
func @ops() {
%a = pd.feed() {name="input0"} : tensor<?xf32>
%b = pd.feed() {name="input1"}: tensor<?xf32>
......
// RUN: infrtopt --canonicalize %s | FileCheck %s
// CHECK-LABEL: @main
func @main() -> tensor<?xf32> {
%a = "pd.feed"() {name="input0"} : () -> tensor<?xf32>
......@@ -9,16 +10,19 @@ func @main() -> tensor<?xf32> {
%bias1 = "pd.feed"() {name="input5"} : () -> tensor<?xf32>
%bias2 = "pd.feed"() {name="input6"} : () -> tensor<?xf32>
%c = "pd.matmul"(%a, %b) {transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d = "pd.elementwise_add"(%c, %bias) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
// CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c = "pd.matmul_v2"(%a, %b) {transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d = "pd.elementwise_add"(%c, %bias) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e = "pd.relu6"(%d) {} : (tensor<?xf32>) -> tensor<?xf32>
%c1 = "pd.matmul"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
// CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c1 = "pd.matmul_v2"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e1 = "pd.relu"(%d1) {} : (tensor<?xf32>) -> tensor<?xf32>
%c2 = "pd.matmul"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
// CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c2 = "pd.matmul_v2"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e2 = "pd.relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32>
"pd.fetch"(%e2) {name="output"} :(tensor<?xf32>)->()
}
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: @build_tensor1
func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92]
ts.print_shape %a
infrt.return
}
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: test_tensor_type
func @test_tensor_type() {
%a = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.tensor<X86, NCHW, F32>
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lit.formats
import os
config.name = "MLIR tests"
config.test_format = lit.formats.ShTest(True)
build_dir = "@CMAKE_BINARY_DIR@"
config.llvm_tools_dir = os.path.join(build_dir, "third_party/install/llvm/bin")
config.llvm_tools_dir = os.path.join(build_dir, "/third_party/install/llvm/lib")
infrtopt_bin = os.path.join(build_dir, "paddle/infrt/dialect/")
infrtexec_bin = os.path.join(build_dir, "paddle/infrt/host_context/")
llvm_bin = os.path.join(build_dir, "third_party/install/llvm/bin/")
config.environment['PATH'] = os.path.pathsep.join(
(infrtopt_bin, infrtexec_bin, llvm_bin, config.environment['PATH']))
config.suffixes = ['.mlir']
......@@ -32,8 +32,8 @@ function update_pd_ops() {
# compile and install paddle
rm -rf ${PADDLE_ROOT}/build && mkdir -p ${PADDLE_ROOT}/build
cd ${PADDLE_ROOT}/build
cmake .. -DWITH_PYTHON=ON -DWITH_GPU=OFF -DPYTHON_EXECUTABLE=`which python3`
make -j8
cmake .. -DWITH_PYTHON=ON -DWITH_GPU=OFF -DPYTHON_EXECUTABLE=`which python3` -DWITH_XBYAK=OFF -DWITH_NCCL=OFF -DWITH_RCCL=OFF -DWITH_CRYPTO=OFF
make -j8 paddle_python
cd ${PADDLE_ROOT}/build
cd python/dist/
python3 -m pip uninstall -y paddlepaddle
......@@ -90,7 +90,7 @@ function infrt_gen_and_build() {
exit 7;
fi
make -j ${parallel_number} infrt infrtopt infrt-exec test_infrt_exec trt-exec infrt_lib_dist;build_error=$?
make -j ${parallel_number} infrt infrtopt infrtexec test_infrt_exec trt-exec infrt_lib_dist;build_error=$?
if [ "$build_error" != 0 ];then
exit 7;
fi
......@@ -101,6 +101,9 @@ function infrt_gen_and_build() {
}
function test_infrt() {
# install llvm-lit toolkit
python3 -m pip install lit
mkdir -p ${PADDLE_ROOT}/build
cd ${PADDLE_ROOT}/build
if [ ${WITH_TESTING:-ON} == "ON" ] ; then
......
......@@ -16,6 +16,8 @@ import paddle.fluid.framework as framework
from paddle.fluid import core
from paddle import compat as cpt
ops_having_canonicalization = {"elementwise_add", }
# collect original ops: op which has both inference and grid defination
def get_original_ops():
......@@ -120,7 +122,18 @@ def convert_op_proto_into_mlir(op_descs):
|* *|\n\
\*===----------------------------------------------------------------------===*/\n"
start_ = comment_ + "#ifndef PD_OPS\n#define PD_OPS\ninclude \"mlir/Interfaces/InferTypeOpInterface.td\"\ninclude \"mlir/Interfaces/LoopLikeInterface.td\"\ninclude \"mlir/IR/OpBase.td\"\ninclude \"paddle/infrt/dialect/pd_op_base.td\"\n\n"
lines = [
"#ifndef PD_OPS",
"#define PD_OPS",
"include \"mlir/Interfaces/InferTypeOpInterface.td\"",
"include \"mlir/Interfaces/LoopLikeInterface.td\"",
"include \"mlir/IR/OpBase.td\"",
"include \"paddle/infrt/dialect/pd_op_base.td\"",
"",
]
start_ = comment_ + "\n".join(lines)
with open(dst_dialect_file, 'w') as ops_mlir_file:
ops_mlir_file.write(start_)
......@@ -134,6 +147,7 @@ def convert_op_proto_into_mlir(op_descs):
"trainable_statistics", "use_global_stats", "is_test", "use_mkldnn",
"use_cudnn"
]
original_ops_ = get_original_ops()
automatically_generated_op_dialect = []
for op_type, op_proto in op_descs.items():
......@@ -144,6 +158,7 @@ def convert_op_proto_into_mlir(op_descs):
HEAD = "def PD_" + op_type.capitalize(
) + "Op : PD_Op<\"" + op_type + "\", [NoSideEffect]> {\n"
SUMMARY = " let summary = \"" + op_type + " op\";\n"
CANONICALIZATION = "let hasCanonicalizer = 1;" if op_type in ops_having_canonicalization else ""
# 2.2 Description
DESCRIPTION = " let description = [{\n"
......@@ -245,6 +260,7 @@ def convert_op_proto_into_mlir(op_descs):
ops_mlir_file.write(DESCRIPTION)
ops_mlir_file.write(ARGUMENTS)
ops_mlir_file.write(RESULTS)
ops_mlir_file.write(CANONICALIZATION)
ops_mlir_file.write("}\n")
print("Skipped ops num: " + str(len(skipped_op_list)))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册