From a1addeef67efa9cb7c7fd0687ec5e3d970225531 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Thu, 27 Jan 2022 13:11:40 +0800 Subject: [PATCH] INFRT/add LLVM lit to infrt (#39149) --- .gitignore | 7 +++++ paddle/infrt/CMakeLists.txt | 6 ++-- paddle/infrt/dialect/CMakeLists.txt | 17 +---------- paddle/infrt/dialect/mlir_tests/rewrite.mlir | 24 --------------- paddle/infrt/dialect/pd_extra_ops.td | 18 +++++++++++ paddle/infrt/dialect/pd_ops.cc | 11 +++++-- paddle/infrt/dialect/pd_ops.h | 2 ++ paddle/infrt/dialect/rewrite.td | 6 ++-- paddle/infrt/external_kernels/CMakeLists.txt | 2 +- paddle/infrt/host_context/CMakeLists.txt | 8 ++--- paddle/infrt/host_context/value.h | 7 ++--- paddle/infrt/kernel/CMakeLists.txt | 2 +- paddle/infrt/kernel/pten_kernels.cc | 9 ++++-- paddle/infrt/tests/CMakeLists.txt | 4 +++ .../mlir_tests => tests/dialect}/basic.mlir | 1 + .../dialect}/benchmark.mlir | 1 + .../dialect}/dense_tensor.mlir | 1 + .../dialect/disabled_rewrite_conv_bn.mlir} | 0 .../dialect/disabled_tensor_map.mlir} | 0 .../dialect/disabled_trt_ops.mlir} | 0 .../dialect}/paddle_ops.mlir | 2 ++ paddle/infrt/tests/dialect/rewrite.mlir | 28 +++++++++++++++++ .../dialect}/tensor_shape.mlir | 3 ++ .../dialect}/tensor_type.mlir | 1 + paddle/infrt/tests/lit.cfg.py.in | 30 +++++++++++++++++++ paddle/scripts/infrt_build.sh | 9 ++++-- ...rate_pd_op_dialect_from_paddle_op_maker.py | 24 ++++++++++++--- 27 files changed, 154 insertions(+), 69 deletions(-) delete mode 100644 paddle/infrt/dialect/mlir_tests/rewrite.mlir create mode 100644 paddle/infrt/dialect/pd_extra_ops.td create mode 100644 paddle/infrt/tests/CMakeLists.txt rename paddle/infrt/{dialect/mlir_tests => tests/dialect}/basic.mlir (96%) rename paddle/infrt/{dialect/mlir_tests => tests/dialect}/benchmark.mlir (95%) rename paddle/infrt/{dialect/mlir_tests => tests/dialect}/dense_tensor.mlir (96%) rename paddle/infrt/{dialect/mlir_tests/rewrite_conv_bn.mlir => tests/dialect/disabled_rewrite_conv_bn.mlir} (100%) rename paddle/infrt/{dialect/mlir_tests/tensor_map.mlir => tests/dialect/disabled_tensor_map.mlir} (100%) rename paddle/infrt/{dialect/mlir_tests/trt_ops.mlir => tests/dialect/disabled_trt_ops.mlir} (100%) rename paddle/infrt/{dialect/mlir_tests => tests/dialect}/paddle_ops.mlir (81%) create mode 100644 paddle/infrt/tests/dialect/rewrite.mlir rename paddle/infrt/{dialect/mlir_tests => tests/dialect}/tensor_shape.mlir (52%) rename paddle/infrt/{dialect/mlir_tests => tests/dialect}/tensor_type.mlir (90%) create mode 100644 paddle/infrt/tests/lit.cfg.py.in diff --git a/.gitignore b/.gitignore index e905833cae7..14b75fce515 100644 --- a/.gitignore +++ b/.gitignore @@ -39,3 +39,10 @@ model_test Testing tools/__pycache__ + +# This file is automatically generated. +# TODO(zhiqiang) Move this file to build directory. +paddle/infrt/dialect/pd_ops.td +.lit_test_times.txt +paddle/infrt/tests/dialect/Output +paddle/infrt/tests/lit.cfg.py diff --git a/paddle/infrt/CMakeLists.txt b/paddle/infrt/CMakeLists.txt index e371e239182..f17ec328f0c 100644 --- a/paddle/infrt/CMakeLists.txt +++ b/paddle/infrt/CMakeLists.txt @@ -76,6 +76,7 @@ add_subdirectory(tensor) add_subdirectory(support) add_subdirectory(external_kernels) add_subdirectory(paddle) +add_subdirectory(tests) # MLIR td file generations @@ -86,13 +87,14 @@ set(infrt_mlir_incs tensor_shape_inc dense_tensor_inc pd_ops_inc + pd_extra_ops_inc rewrite_inc trt_ops_inc ) message(STATUS "infrt srcs:\n${infrt_src}") -cc_library(infrt SHARED SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto pten dense_tensor) -cc_library(infrt_static SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto pten dense_tensor) +cc_library(infrt SHARED SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto) +cc_library(infrt_static SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto) add_dependencies(infrt ${infrt_mlir_incs}) add_custom_target(test_infrt_exec DEPENDS ${INFRT_TEST_TARGETS}) diff --git a/paddle/infrt/dialect/CMakeLists.txt b/paddle/infrt/dialect/CMakeLists.txt index c064b214526..a5e049ea154 100644 --- a/paddle/infrt/dialect/CMakeLists.txt +++ b/paddle/infrt/dialect/CMakeLists.txt @@ -20,6 +20,7 @@ mlir_tablegen_on(tensor_shape DIALECT ts) mlir_tablegen_on(dense_tensor DIALECT dt) mlir_tablegen_on(pd_op_base DIALECT pd) mlir_tablegen_on(pd_ops) +mlir_tablegen_on(pd_extra_ops) mlir_add_rewriter(rewrite) # TODO(Superjomn) add a cmake function cc_executable to ecapsulate the following code @@ -30,22 +31,6 @@ add_executable(print-ir print_ir.cc) target_link_libraries(print-ir infrt ${mlir_libs}) add_dependencies(print-ir pd_ops_inc) - -# MLIR opt tests -# %{ -set(infrt_opt_path ${CMAKE_CURRENT_BINARY_DIR}/infrtopt) - -add_test(test_infrt_mlir_opt_on_basic ${infrt_opt_path} ${CMAKE_CURRENT_SOURCE_DIR}/mlir_tests/basic.mlir) -add_test(test_infrt_mlir_opt_on_tensor_shape ${infrt_opt_path} ${CMAKE_CURRENT_SOURCE_DIR}/mlir_tests/tensor_shape.mlir) -add_test(test_infrt_mlir_opt_on_paddle_op ${infrt_opt_path} ${CMAKE_CURRENT_SOURCE_DIR}/mlir_tests/paddle_ops.mlir) -# %} - cc_test_tiny(test_infrt_mlir_loader SRCS mlir_loader_test.cc DEPS infrt ${MLIR_IR_LIBS}) -# execute mlir and run FileCheck -infrt_exec_check(test_infrt_tensor_type mlir_tests/tensor_type.mlir) -infrt_exec_check(test_infrt__basic mlir_tests/basic.mlir) -infrt_exec_check(test_infrt_benchmark mlir_tests/benchmark.mlir) -infrt_exec_check(test_infrt_mlir_dense_tensor mlir_tests/dense_tensor.mlir) - add_subdirectory(tensorrt) diff --git a/paddle/infrt/dialect/mlir_tests/rewrite.mlir b/paddle/infrt/dialect/mlir_tests/rewrite.mlir deleted file mode 100644 index 5e207634da8..00000000000 --- a/paddle/infrt/dialect/mlir_tests/rewrite.mlir +++ /dev/null @@ -1,24 +0,0 @@ -// CHECK-LABEL: @main -func @main() -> tensor { - %a = "pd.feed"() {name="input0"} : () -> tensor - %b = "pd.feed"() {name="input1"} : () -> tensor - %bias = "pd.feed"() {name="input2"} : () -> tensor - - %b1 = "pd.feed"() {name="input3"} : () -> tensor - %b2 = "pd.feed"() {name="input4"} : () -> tensor - %bias1 = "pd.feed"() {name="input5"} : () -> tensor - %bias2 = "pd.feed"() {name="input6"} : () -> tensor - - %c = "pd.matmul"(%a, %b) {transpose_y=false} : (tensor, tensor) -> tensor - %d = "pd.elementwise_add"(%c, %bias) {axis=1:i32} : (tensor, tensor) -> tensor - %e = "pd.relu6"(%d) {} : (tensor) -> tensor - - %c1 = "pd.matmul"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor, tensor) -> tensor - %d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:i32} : (tensor, tensor) -> tensor - %e1 = "pd.relu"(%d1) {} : (tensor) -> tensor - - %c2 = "pd.matmul"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor, tensor) -> tensor - %d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:i32} : (tensor, tensor) -> tensor - %e2 = "pd.relu"(%d2) {} : (tensor) -> tensor - "pd.fetch"(%e2) {name="output"} :(tensor)->() -} \ No newline at end of file diff --git a/paddle/infrt/dialect/pd_extra_ops.td b/paddle/infrt/dialect/pd_extra_ops.td new file mode 100644 index 00000000000..c6d3f530455 --- /dev/null +++ b/paddle/infrt/dialect/pd_extra_ops.td @@ -0,0 +1,18 @@ +#ifndef PD_EXTRA_OPS +#define PD_EXTRA_OPS + +include "mlir/Interfaces/InferTypeOpInterface.td" +include "mlir/Interfaces/LoopLikeInterface.td" +include "mlir/IR/OpBase.td" +include "paddle/infrt/dialect/pd_op_base.td" + +def PD_FusedFC : PD_Op<"FC", [NoSideEffect]> { + let summary = "Computes the Fully Connected result of two tensors"; + let description = [{ + }]; + + let arguments = (ins PD_Tensor:$input, PD_Tensor:$w, PD_Tensor:$bias, DefaultValuedAttr:$in_num_col_dims); + let results = (outs PD_Tensor:$out); +} + +#endif diff --git a/paddle/infrt/dialect/pd_ops.cc b/paddle/infrt/dialect/pd_ops.cc index 6e32abbe2ab..f3b85ae4b5d 100644 --- a/paddle/infrt/dialect/pd_ops.cc +++ b/paddle/infrt/dialect/pd_ops.cc @@ -20,6 +20,8 @@ #define GET_OP_CLASSES #include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT +#define GET_OP_CLASSES +#include "paddle/infrt/dialect/pd_extra_ops.cpp.inc" // NOLINT #include "paddle/infrt/dialect/rewrite.hpp.inc" // NOLINT @@ -31,8 +33,10 @@ PaddleDialect::PaddleDialect(MLIRContext *context) addOperations< #define GET_OP_LIST #include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT + , +#define GET_OP_LIST +#include "paddle/infrt/dialect/pd_extra_ops.cpp.inc" // NOLINT >(); -#undef GET_OP_LIST } mlir::Operation *PaddleDialect::materializeConstant(mlir::OpBuilder &builder, @@ -81,11 +85,14 @@ LogicalResult ElementwiseAdd::inferReturnTypes( inferredReturnTypes.push_back(operands[0].getType()); return success(); } -void ElementwiseAdd::getCanonicalizationPatterns( +*/ + +void Elementwise_addOp::getCanonicalizationPatterns( mlir::OwningRewritePatternList &results, mlir::MLIRContext *context) { results.insert(context); } +/* mlir::OpFoldResult ElementwiseAdd::fold( llvm::ArrayRef operands) { if (getElementTypeOrSelf(getType()).isa()) { diff --git a/paddle/infrt/dialect/pd_ops.h b/paddle/infrt/dialect/pd_ops.h index 7d1d1d6f584..b48c68060d4 100644 --- a/paddle/infrt/dialect/pd_ops.h +++ b/paddle/infrt/dialect/pd_ops.h @@ -57,3 +57,5 @@ class PaddleDialect : public Dialect { #define GET_OP_CLASSES #include "paddle/infrt/dialect/pd_ops.hpp.inc" +#define GET_OP_CLASSES +#include "paddle/infrt/dialect/pd_extra_ops.hpp.inc" diff --git a/paddle/infrt/dialect/rewrite.td b/paddle/infrt/dialect/rewrite.td index db75ba041ba..5e228fed4d5 100644 --- a/paddle/infrt/dialect/rewrite.td +++ b/paddle/infrt/dialect/rewrite.td @@ -4,7 +4,8 @@ include "paddle/infrt/dialect/infrt_base.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "paddle/infrt/dialect/pd_ops.td" -/* +include "paddle/infrt/dialect/pd_extra_ops.td" + //===----------------------------------------------------------------------===// // This is to fuse the composition: 'Matmul o ElementwiseAdd' into 'PD_FusedFC'. // @@ -23,10 +24,11 @@ include "paddle/infrt/dialect/pd_ops.td" // 1. Make the constrait more completely. // 2. Consider the case of : out = bias + z //===----------------------------------------------------------------------===// -def FuseMulAdd : Pat<(PD_ElementwiseAdd (PD_MatmulOp $x, $y, ConstBoolAttrFalse:$_, ConstBoolAttrFalse:$_, $alpha), $bias, $axis), +def FuseMulAdd : Pat<(PD_Elementwise_addOp (PD_Matmul_v2Op $x, $y, ConstBoolAttrFalse:$_, ConstBoolAttrFalse:$_), $bias, $axis), (PD_FusedFC $x, $y, $bias, (INFRT_createI32Attr<"1">))>; +/* //===----------------------------------------------------------------------===// // This is to fuse the composition: 'FusedFC o Relu' into 'FusedRepeatedFCRelu'. // diff --git a/paddle/infrt/external_kernels/CMakeLists.txt b/paddle/infrt/external_kernels/CMakeLists.txt index faffc3909bc..9e90c1896c7 100644 --- a/paddle/infrt/external_kernels/CMakeLists.txt +++ b/paddle/infrt/external_kernels/CMakeLists.txt @@ -9,5 +9,5 @@ message(STATUS "basic_mlir: ${basic_mlir}") message(STATUS "external_kernels_lib: ${external_kernels_lib}") add_test( NAME run_and_check_external_kernels - COMMAND sh -c "${CMAKE_BINARY_DIR}/infrt/host_context/infrt-exec -i ${basic_mlir} --shared_libs=${external_kernels_lib} | ${LLVM_PATH}/bin/FileCheck ${basic_mlir}" + COMMAND sh -c "${CMAKE_BINARY_DIR}/infrt/host_context/infrtexec -i ${basic_mlir} --shared_libs=${external_kernels_lib} | ${LLVM_PATH}/bin/FileCheck ${basic_mlir}" ) diff --git a/paddle/infrt/host_context/CMakeLists.txt b/paddle/infrt/host_context/CMakeLists.txt index fdba9af4a59..f5b4dac3408 100644 --- a/paddle/infrt/host_context/CMakeLists.txt +++ b/paddle/infrt/host_context/CMakeLists.txt @@ -21,9 +21,5 @@ cc_test_tiny(test_infrt_op_executable SRCS op_executable_test.cc DEPS infrt ${ML cc_test_tiny(test_infrt_core_runtime SRCS core_runtime_test.cc DEPS infrt ${MLIR_IR_LIBS}) cc_test_tiny(test_infrt_mlir_to_runtime_translate SRCS mlir_to_runtime_translate_test.cc DEPS infrt ${MLIR_IR_LIBS}) -infrt_exec_check(test_infrt_mlir_exec_on_basic mlir_tests/basic.mlir) -infrt_exec_check(test_infrt_mlir_exec_on_shape mlir_tests/shape.mlir) -infrt_exec_check(test_infrt_mlir_exec_on_dense_tensor mlir_tests/dense_tensor.mlir) - -add_executable(infrt-exec mlir_exec.cc) -target_link_libraries(infrt-exec infrt ${MLIR_IR_LIBS}) +add_executable(infrtexec mlir_exec.cc) +target_link_libraries(infrtexec infrt ${MLIR_IR_LIBS}) diff --git a/paddle/infrt/host_context/value.h b/paddle/infrt/host_context/value.h index 7f68e59f8a6..5ed89e78f11 100644 --- a/paddle/infrt/host_context/value.h +++ b/paddle/infrt/host_context/value.h @@ -29,9 +29,6 @@ #include "paddle/infrt/tensor/tensor_map.h" #include "paddle/infrt/tensor/tensor_shape.h" -#include "paddle/pten/backends/cpu/cpu_context.h" -#include "paddle/pten/core/dense_tensor.h" - namespace infrt { namespace host_context { @@ -48,8 +45,8 @@ using ValueVariantType = Variant, std::vector, std::vector, diff --git a/paddle/infrt/kernel/CMakeLists.txt b/paddle/infrt/kernel/CMakeLists.txt index 7e9ed8e5572..b7ef5691e47 100644 --- a/paddle/infrt/kernel/CMakeLists.txt +++ b/paddle/infrt/kernel/CMakeLists.txt @@ -2,7 +2,7 @@ core_gather_headers() gather_srcs(infrt_src SRCS basic_kernels.cc - pten_kernels.cc + # pten_kernels.cc test_kernels.cc tensor_shape_kernels.cc tensor_kernels.cc diff --git a/paddle/infrt/kernel/pten_kernels.cc b/paddle/infrt/kernel/pten_kernels.cc index 70c44b829f7..62e2db659ad 100644 --- a/paddle/infrt/kernel/pten_kernels.cc +++ b/paddle/infrt/kernel/pten_kernels.cc @@ -12,14 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "paddle/infrt/kernel/pten_kernels.h" + #include #include #include "paddle/infrt/host_context/kernel_registry.h" #include "paddle/infrt/host_context/kernel_utils.h" -#include "paddle/infrt/kernel/pten_kernels.h" -#include "paddle/pten/backends/cpu/cpu_context.h" -#include "paddle/pten/kernels/math_kernel.h" + +// Disable temporarily. +// #include "paddle/pten/backends/cpu/cpu_context.h" +// #include "paddle/pten/kernels/math_kernel.h" using infrt::host_context::Attribute; diff --git a/paddle/infrt/tests/CMakeLists.txt b/paddle/infrt/tests/CMakeLists.txt new file mode 100644 index 00000000000..a27fb3d8f18 --- /dev/null +++ b/paddle/infrt/tests/CMakeLists.txt @@ -0,0 +1,4 @@ +configure_file(lit.cfg.py.in "${CMAKE_SOURCE_DIR}/paddle/infrt/tests/lit.cfg.py") + +add_test(NAME test_infrt_by_lit COMMAND sh -c "lit -v ${CMAKE_SOURCE_DIR}/paddle/infrt/tests --filter-out \"disabled_*\"" + DEPENDS infrtopt infrtexec) diff --git a/paddle/infrt/dialect/mlir_tests/basic.mlir b/paddle/infrt/tests/dialect/basic.mlir similarity index 96% rename from paddle/infrt/dialect/mlir_tests/basic.mlir rename to paddle/infrt/tests/dialect/basic.mlir index 84b9b0fbd71..3c76b438a0e 100644 --- a/paddle/infrt/dialect/mlir_tests/basic.mlir +++ b/paddle/infrt/tests/dialect/basic.mlir @@ -1,3 +1,4 @@ +// RUN: infrtexec -i %s | FileCheck %s // CHECK-LABEL: @basic_f32 func @basic_f32() -> f32 { %v0 = infrt.constant.f32 1.0 diff --git a/paddle/infrt/dialect/mlir_tests/benchmark.mlir b/paddle/infrt/tests/dialect/benchmark.mlir similarity index 95% rename from paddle/infrt/dialect/mlir_tests/benchmark.mlir rename to paddle/infrt/tests/dialect/benchmark.mlir index 8b4530689df..1a57b434990 100644 --- a/paddle/infrt/dialect/mlir_tests/benchmark.mlir +++ b/paddle/infrt/tests/dialect/benchmark.mlir @@ -1,3 +1,4 @@ +// RUN: infrtexec -i %s | FileCheck %s // CHECK-LABEL: @benchmark func @benchmark() { // CHECK-LABEL: BM:add.f32:Count: 3 diff --git a/paddle/infrt/dialect/mlir_tests/dense_tensor.mlir b/paddle/infrt/tests/dialect/dense_tensor.mlir similarity index 96% rename from paddle/infrt/dialect/mlir_tests/dense_tensor.mlir rename to paddle/infrt/tests/dialect/dense_tensor.mlir index f923ef3efef..f1def17aa87 100644 --- a/paddle/infrt/dialect/mlir_tests/dense_tensor.mlir +++ b/paddle/infrt/tests/dialect/dense_tensor.mlir @@ -1,3 +1,4 @@ +// RUN: infrtexec -i %s | FileCheck %s // CHECK-LABEL: dense_shape0 func @dense_shape0() { %shape = ts.build_shape [1:i64, 57:i64] diff --git a/paddle/infrt/dialect/mlir_tests/rewrite_conv_bn.mlir b/paddle/infrt/tests/dialect/disabled_rewrite_conv_bn.mlir similarity index 100% rename from paddle/infrt/dialect/mlir_tests/rewrite_conv_bn.mlir rename to paddle/infrt/tests/dialect/disabled_rewrite_conv_bn.mlir diff --git a/paddle/infrt/dialect/mlir_tests/tensor_map.mlir b/paddle/infrt/tests/dialect/disabled_tensor_map.mlir similarity index 100% rename from paddle/infrt/dialect/mlir_tests/tensor_map.mlir rename to paddle/infrt/tests/dialect/disabled_tensor_map.mlir diff --git a/paddle/infrt/dialect/mlir_tests/trt_ops.mlir b/paddle/infrt/tests/dialect/disabled_trt_ops.mlir similarity index 100% rename from paddle/infrt/dialect/mlir_tests/trt_ops.mlir rename to paddle/infrt/tests/dialect/disabled_trt_ops.mlir diff --git a/paddle/infrt/dialect/mlir_tests/paddle_ops.mlir b/paddle/infrt/tests/dialect/paddle_ops.mlir similarity index 81% rename from paddle/infrt/dialect/mlir_tests/paddle_ops.mlir rename to paddle/infrt/tests/dialect/paddle_ops.mlir index 6618fe66bda..ca61ddc0b70 100644 --- a/paddle/infrt/dialect/mlir_tests/paddle_ops.mlir +++ b/paddle/infrt/tests/dialect/paddle_ops.mlir @@ -1,3 +1,5 @@ +// RUN: infrtopt %s | FileCheck %s +// CHECK-LABEL: @ops func @ops() { %a = pd.feed() {name="input0"} : tensor %b = pd.feed() {name="input1"}: tensor diff --git a/paddle/infrt/tests/dialect/rewrite.mlir b/paddle/infrt/tests/dialect/rewrite.mlir new file mode 100644 index 00000000000..9fbb09e2244 --- /dev/null +++ b/paddle/infrt/tests/dialect/rewrite.mlir @@ -0,0 +1,28 @@ +// RUN: infrtopt --canonicalize %s | FileCheck %s +// CHECK-LABEL: @main +func @main() -> tensor { + %a = "pd.feed"() {name="input0"} : () -> tensor + %b = "pd.feed"() {name="input1"} : () -> tensor + %bias = "pd.feed"() {name="input2"} : () -> tensor + + %b1 = "pd.feed"() {name="input3"} : () -> tensor + %b2 = "pd.feed"() {name="input4"} : () -> tensor + %bias1 = "pd.feed"() {name="input5"} : () -> tensor + %bias2 = "pd.feed"() {name="input6"} : () -> tensor + + // CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor, tensor, tensor) -> tensor + %c = "pd.matmul_v2"(%a, %b) {transpose_y=false} : (tensor, tensor) -> tensor + %d = "pd.elementwise_add"(%c, %bias) {axis=1:si32} : (tensor, tensor) -> tensor + %e = "pd.relu6"(%d) {} : (tensor) -> tensor + + // CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor, tensor, tensor) -> tensor + %c1 = "pd.matmul_v2"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor, tensor) -> tensor + %d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:si32} : (tensor, tensor) -> tensor + %e1 = "pd.relu"(%d1) {} : (tensor) -> tensor + + // CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor, tensor, tensor) -> tensor + %c2 = "pd.matmul_v2"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor, tensor) -> tensor + %d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:si32} : (tensor, tensor) -> tensor + %e2 = "pd.relu"(%d2) {} : (tensor) -> tensor + "pd.fetch"(%e2) {name="output"} :(tensor)->() +} diff --git a/paddle/infrt/dialect/mlir_tests/tensor_shape.mlir b/paddle/infrt/tests/dialect/tensor_shape.mlir similarity index 52% rename from paddle/infrt/dialect/mlir_tests/tensor_shape.mlir rename to paddle/infrt/tests/dialect/tensor_shape.mlir index 504b5b36be0..09210078b9d 100644 --- a/paddle/infrt/dialect/mlir_tests/tensor_shape.mlir +++ b/paddle/infrt/tests/dialect/tensor_shape.mlir @@ -1,5 +1,8 @@ +// RUN: infrtexec -i %s | FileCheck %s +// CHECK-LABEL: @build_tensor1 func @build_tensor1() { %a = ts.build_shape [1:i64, 57:i64, 92:i64] + // CHECK: shape[1,57,92] ts.print_shape %a infrt.return } diff --git a/paddle/infrt/dialect/mlir_tests/tensor_type.mlir b/paddle/infrt/tests/dialect/tensor_type.mlir similarity index 90% rename from paddle/infrt/dialect/mlir_tests/tensor_type.mlir rename to paddle/infrt/tests/dialect/tensor_type.mlir index c331097ab10..01a2f7df326 100644 --- a/paddle/infrt/dialect/mlir_tests/tensor_type.mlir +++ b/paddle/infrt/tests/dialect/tensor_type.mlir @@ -1,3 +1,4 @@ +// RUN: infrtexec -i %s | FileCheck %s // CHECK-LABEL: test_tensor_type func @test_tensor_type() { %a = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.tensor diff --git a/paddle/infrt/tests/lit.cfg.py.in b/paddle/infrt/tests/lit.cfg.py.in new file mode 100644 index 00000000000..19ee0076b55 --- /dev/null +++ b/paddle/infrt/tests/lit.cfg.py.in @@ -0,0 +1,30 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import lit.formats +import os + +config.name = "MLIR tests" +config.test_format = lit.formats.ShTest(True) +build_dir = "@CMAKE_BINARY_DIR@" +config.llvm_tools_dir = os.path.join(build_dir, "third_party/install/llvm/bin") +config.llvm_tools_dir = os.path.join(build_dir, "/third_party/install/llvm/lib") +infrtopt_bin = os.path.join(build_dir, "paddle/infrt/dialect/") +infrtexec_bin = os.path.join(build_dir, "paddle/infrt/host_context/") + +llvm_bin = os.path.join(build_dir, "third_party/install/llvm/bin/") +config.environment['PATH'] = os.path.pathsep.join( + (infrtopt_bin, infrtexec_bin, llvm_bin, config.environment['PATH'])) + +config.suffixes = ['.mlir'] diff --git a/paddle/scripts/infrt_build.sh b/paddle/scripts/infrt_build.sh index e34bf7cff86..e6e9759db8e 100755 --- a/paddle/scripts/infrt_build.sh +++ b/paddle/scripts/infrt_build.sh @@ -32,8 +32,8 @@ function update_pd_ops() { # compile and install paddle rm -rf ${PADDLE_ROOT}/build && mkdir -p ${PADDLE_ROOT}/build cd ${PADDLE_ROOT}/build - cmake .. -DWITH_PYTHON=ON -DWITH_GPU=OFF -DPYTHON_EXECUTABLE=`which python3` - make -j8 + cmake .. -DWITH_PYTHON=ON -DWITH_GPU=OFF -DPYTHON_EXECUTABLE=`which python3` -DWITH_XBYAK=OFF -DWITH_NCCL=OFF -DWITH_RCCL=OFF -DWITH_CRYPTO=OFF + make -j8 paddle_python cd ${PADDLE_ROOT}/build cd python/dist/ python3 -m pip uninstall -y paddlepaddle @@ -90,7 +90,7 @@ function infrt_gen_and_build() { exit 7; fi - make -j ${parallel_number} infrt infrtopt infrt-exec test_infrt_exec trt-exec infrt_lib_dist;build_error=$? + make -j ${parallel_number} infrt infrtopt infrtexec test_infrt_exec trt-exec infrt_lib_dist;build_error=$? if [ "$build_error" != 0 ];then exit 7; fi @@ -101,6 +101,9 @@ function infrt_gen_and_build() { } function test_infrt() { + # install llvm-lit toolkit + python3 -m pip install lit + mkdir -p ${PADDLE_ROOT}/build cd ${PADDLE_ROOT}/build if [ ${WITH_TESTING:-ON} == "ON" ] ; then diff --git a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py index 2688efcf63f..ed18e8666ae 100644 --- a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py +++ b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py @@ -1,11 +1,11 @@ # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,6 +16,8 @@ import paddle.fluid.framework as framework from paddle.fluid import core from paddle import compat as cpt +ops_having_canonicalization = {"elementwise_add", } + # collect original ops: op which has both inference and grid defination def get_original_ops(): @@ -120,7 +122,18 @@ def convert_op_proto_into_mlir(op_descs): |* *|\n\ \*===----------------------------------------------------------------------===*/\n" - start_ = comment_ + "#ifndef PD_OPS\n#define PD_OPS\ninclude \"mlir/Interfaces/InferTypeOpInterface.td\"\ninclude \"mlir/Interfaces/LoopLikeInterface.td\"\ninclude \"mlir/IR/OpBase.td\"\ninclude \"paddle/infrt/dialect/pd_op_base.td\"\n\n" + lines = [ + "#ifndef PD_OPS", + "#define PD_OPS", + "include \"mlir/Interfaces/InferTypeOpInterface.td\"", + "include \"mlir/Interfaces/LoopLikeInterface.td\"", + "include \"mlir/IR/OpBase.td\"", + "include \"paddle/infrt/dialect/pd_op_base.td\"", + "", + ] + + start_ = comment_ + "\n".join(lines) + with open(dst_dialect_file, 'w') as ops_mlir_file: ops_mlir_file.write(start_) @@ -134,6 +147,7 @@ def convert_op_proto_into_mlir(op_descs): "trainable_statistics", "use_global_stats", "is_test", "use_mkldnn", "use_cudnn" ] + original_ops_ = get_original_ops() automatically_generated_op_dialect = [] for op_type, op_proto in op_descs.items(): @@ -144,6 +158,7 @@ def convert_op_proto_into_mlir(op_descs): HEAD = "def PD_" + op_type.capitalize( ) + "Op : PD_Op<\"" + op_type + "\", [NoSideEffect]> {\n" SUMMARY = " let summary = \"" + op_type + " op\";\n" + CANONICALIZATION = "let hasCanonicalizer = 1;" if op_type in ops_having_canonicalization else "" # 2.2 Description DESCRIPTION = " let description = [{\n" @@ -245,6 +260,7 @@ def convert_op_proto_into_mlir(op_descs): ops_mlir_file.write(DESCRIPTION) ops_mlir_file.write(ARGUMENTS) ops_mlir_file.write(RESULTS) + ops_mlir_file.write(CANONICALIZATION) ops_mlir_file.write("}\n") print("Skipped ops num: " + str(len(skipped_op_list))) -- GitLab