Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
a1addeef
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a1addeef
编写于
1月 27, 2022
作者:
Y
Yan Chunwei
提交者:
GitHub
1月 27, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
INFRT/add LLVM lit to infrt (#39149)
上级
4748486e
变更
26
显示空白变更内容
内联
并排
Showing
26 changed file
with
154 addition
and
45 deletion
+154
-45
.gitignore
.gitignore
+7
-0
paddle/infrt/CMakeLists.txt
paddle/infrt/CMakeLists.txt
+4
-2
paddle/infrt/dialect/CMakeLists.txt
paddle/infrt/dialect/CMakeLists.txt
+1
-16
paddle/infrt/dialect/pd_extra_ops.td
paddle/infrt/dialect/pd_extra_ops.td
+18
-0
paddle/infrt/dialect/pd_ops.cc
paddle/infrt/dialect/pd_ops.cc
+9
-2
paddle/infrt/dialect/pd_ops.h
paddle/infrt/dialect/pd_ops.h
+2
-0
paddle/infrt/dialect/rewrite.td
paddle/infrt/dialect/rewrite.td
+4
-2
paddle/infrt/external_kernels/CMakeLists.txt
paddle/infrt/external_kernels/CMakeLists.txt
+1
-1
paddle/infrt/host_context/CMakeLists.txt
paddle/infrt/host_context/CMakeLists.txt
+2
-6
paddle/infrt/host_context/value.h
paddle/infrt/host_context/value.h
+2
-5
paddle/infrt/kernel/CMakeLists.txt
paddle/infrt/kernel/CMakeLists.txt
+1
-1
paddle/infrt/kernel/pten_kernels.cc
paddle/infrt/kernel/pten_kernels.cc
+6
-3
paddle/infrt/tests/CMakeLists.txt
paddle/infrt/tests/CMakeLists.txt
+4
-0
paddle/infrt/tests/dialect/basic.mlir
paddle/infrt/tests/dialect/basic.mlir
+1
-0
paddle/infrt/tests/dialect/benchmark.mlir
paddle/infrt/tests/dialect/benchmark.mlir
+1
-0
paddle/infrt/tests/dialect/dense_tensor.mlir
paddle/infrt/tests/dialect/dense_tensor.mlir
+1
-0
paddle/infrt/tests/dialect/disabled_rewrite_conv_bn.mlir
paddle/infrt/tests/dialect/disabled_rewrite_conv_bn.mlir
+0
-0
paddle/infrt/tests/dialect/disabled_tensor_map.mlir
paddle/infrt/tests/dialect/disabled_tensor_map.mlir
+0
-0
paddle/infrt/tests/dialect/disabled_trt_ops.mlir
paddle/infrt/tests/dialect/disabled_trt_ops.mlir
+0
-0
paddle/infrt/tests/dialect/paddle_ops.mlir
paddle/infrt/tests/dialect/paddle_ops.mlir
+2
-0
paddle/infrt/tests/dialect/rewrite.mlir
paddle/infrt/tests/dialect/rewrite.mlir
+28
-0
paddle/infrt/tests/dialect/tensor_shape.mlir
paddle/infrt/tests/dialect/tensor_shape.mlir
+3
-0
paddle/infrt/tests/dialect/tensor_type.mlir
paddle/infrt/tests/dialect/tensor_type.mlir
+1
-0
paddle/infrt/tests/lit.cfg.py.in
paddle/infrt/tests/lit.cfg.py.in
+30
-0
paddle/scripts/infrt_build.sh
paddle/scripts/infrt_build.sh
+6
-3
tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py
tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py
+20
-4
未找到文件。
.gitignore
浏览文件 @
a1addeef
...
...
@@ -39,3 +39,10 @@ model_test
Testing
tools/__pycache__
# This file is automatically generated.
# TODO(zhiqiang) Move this file to build directory.
paddle/infrt/dialect/pd_ops.td
.lit_test_times.txt
paddle/infrt/tests/dialect/Output
paddle/infrt/tests/lit.cfg.py
paddle/infrt/CMakeLists.txt
浏览文件 @
a1addeef
...
...
@@ -76,6 +76,7 @@ add_subdirectory(tensor)
add_subdirectory
(
support
)
add_subdirectory
(
external_kernels
)
add_subdirectory
(
paddle
)
add_subdirectory
(
tests
)
# MLIR td file generations
...
...
@@ -86,13 +87,14 @@ set(infrt_mlir_incs
tensor_shape_inc
dense_tensor_inc
pd_ops_inc
pd_extra_ops_inc
rewrite_inc
trt_ops_inc
)
message
(
STATUS
"infrt srcs:
\n
${
infrt_src
}
"
)
cc_library
(
infrt SHARED SRCS
${
infrt_src
}
DEPS glog boost
${
mlir_libs
}
paddle_framework_proto
pten dense_tensor
)
cc_library
(
infrt_static SRCS
${
infrt_src
}
DEPS glog boost
${
mlir_libs
}
paddle_framework_proto
pten dense_tensor
)
cc_library
(
infrt SHARED SRCS
${
infrt_src
}
DEPS glog boost
${
mlir_libs
}
paddle_framework_proto
)
cc_library
(
infrt_static SRCS
${
infrt_src
}
DEPS glog boost
${
mlir_libs
}
paddle_framework_proto
)
add_dependencies
(
infrt
${
infrt_mlir_incs
}
)
add_custom_target
(
test_infrt_exec DEPENDS
${
INFRT_TEST_TARGETS
}
)
paddle/infrt/dialect/CMakeLists.txt
浏览文件 @
a1addeef
...
...
@@ -20,6 +20,7 @@ mlir_tablegen_on(tensor_shape DIALECT ts)
mlir_tablegen_on
(
dense_tensor DIALECT dt
)
mlir_tablegen_on
(
pd_op_base DIALECT pd
)
mlir_tablegen_on
(
pd_ops
)
mlir_tablegen_on
(
pd_extra_ops
)
mlir_add_rewriter
(
rewrite
)
# TODO(Superjomn) add a cmake function cc_executable to ecapsulate the following code
...
...
@@ -30,22 +31,6 @@ add_executable(print-ir print_ir.cc)
target_link_libraries
(
print-ir infrt
${
mlir_libs
}
)
add_dependencies
(
print-ir pd_ops_inc
)
# MLIR opt tests
# %{
set
(
infrt_opt_path
${
CMAKE_CURRENT_BINARY_DIR
}
/infrtopt
)
add_test
(
test_infrt_mlir_opt_on_basic
${
infrt_opt_path
}
${
CMAKE_CURRENT_SOURCE_DIR
}
/mlir_tests/basic.mlir
)
add_test
(
test_infrt_mlir_opt_on_tensor_shape
${
infrt_opt_path
}
${
CMAKE_CURRENT_SOURCE_DIR
}
/mlir_tests/tensor_shape.mlir
)
add_test
(
test_infrt_mlir_opt_on_paddle_op
${
infrt_opt_path
}
${
CMAKE_CURRENT_SOURCE_DIR
}
/mlir_tests/paddle_ops.mlir
)
# %}
cc_test_tiny
(
test_infrt_mlir_loader SRCS mlir_loader_test.cc DEPS infrt
${
MLIR_IR_LIBS
}
)
# execute mlir and run FileCheck
infrt_exec_check
(
test_infrt_tensor_type mlir_tests/tensor_type.mlir
)
infrt_exec_check
(
test_infrt__basic mlir_tests/basic.mlir
)
infrt_exec_check
(
test_infrt_benchmark mlir_tests/benchmark.mlir
)
infrt_exec_check
(
test_infrt_mlir_dense_tensor mlir_tests/dense_tensor.mlir
)
add_subdirectory
(
tensorrt
)
paddle/infrt/dialect/pd_extra_ops.td
0 → 100644
浏览文件 @
a1addeef
#ifndef PD_EXTRA_OPS
#define PD_EXTRA_OPS
include "mlir/Interfaces/InferTypeOpInterface.td"
include "mlir/Interfaces/LoopLikeInterface.td"
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/pd_op_base.td"
def PD_FusedFC : PD_Op<"FC", [NoSideEffect]> {
let summary = "Computes the Fully Connected result of two tensors";
let description = [{
}];
let arguments = (ins PD_Tensor:$input, PD_Tensor:$w, PD_Tensor:$bias, DefaultValuedAttr<I32Attr, "1">:$in_num_col_dims);
let results = (outs PD_Tensor:$out);
}
#endif
paddle/infrt/dialect/pd_ops.cc
浏览文件 @
a1addeef
...
...
@@ -20,6 +20,8 @@
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_extra_ops.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/rewrite.hpp.inc" // NOLINT
...
...
@@ -31,8 +33,10 @@ PaddleDialect::PaddleDialect(MLIRContext *context)
addOperations
<
#define GET_OP_LIST
#include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT
,
#define GET_OP_LIST
#include "paddle/infrt/dialect/pd_extra_ops.cpp.inc" // NOLINT
>
();
#undef GET_OP_LIST
}
mlir
::
Operation
*
PaddleDialect
::
materializeConstant
(
mlir
::
OpBuilder
&
builder
,
...
...
@@ -81,11 +85,14 @@ LogicalResult ElementwiseAdd::inferReturnTypes(
inferredReturnTypes.push_back(operands[0].getType());
return success();
}
void ElementwiseAdd::getCanonicalizationPatterns(
*/
void
Elementwise_addOp
::
getCanonicalizationPatterns
(
mlir
::
OwningRewritePatternList
&
results
,
mlir
::
MLIRContext
*
context
)
{
results
.
insert
<
FuseMulAdd
>
(
context
);
}
/*
mlir::OpFoldResult ElementwiseAdd::fold(
llvm::ArrayRef<mlir::Attribute> operands) {
if (getElementTypeOrSelf(getType()).isa<FloatType>()) {
...
...
paddle/infrt/dialect/pd_ops.h
浏览文件 @
a1addeef
...
...
@@ -57,3 +57,5 @@ class PaddleDialect : public Dialect {
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_ops.hpp.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_extra_ops.hpp.inc"
paddle/infrt/dialect/rewrite.td
浏览文件 @
a1addeef
...
...
@@ -4,7 +4,8 @@
include "paddle/infrt/dialect/infrt_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/pd_ops.td"
/*
include "paddle/infrt/dialect/pd_extra_ops.td"
//===----------------------------------------------------------------------===//
// This is to fuse the composition: 'Matmul o ElementwiseAdd' into 'PD_FusedFC'.
//
...
...
@@ -23,10 +24,11 @@ include "paddle/infrt/dialect/pd_ops.td"
// 1. Make the constrait more completely.
// 2. Consider the case of : out = bias + z
//===----------------------------------------------------------------------===//
def FuseMulAdd : Pat<(PD_Elementwise
Add (PD_MatmulOp $x, $y, ConstBoolAttrFalse:$_, ConstBoolAttrFalse:$_, $alpha
), $bias, $axis),
def FuseMulAdd : Pat<(PD_Elementwise
_addOp (PD_Matmul_v2Op $x, $y, ConstBoolAttrFalse:$_, ConstBoolAttrFalse:$_
), $bias, $axis),
(PD_FusedFC $x, $y, $bias, (INFRT_createI32Attr<"1">))>;
/*
//===----------------------------------------------------------------------===//
// This is to fuse the composition: 'FusedFC o Relu' into 'FusedRepeatedFCRelu'.
//
...
...
paddle/infrt/external_kernels/CMakeLists.txt
浏览文件 @
a1addeef
...
...
@@ -9,5 +9,5 @@ message(STATUS "basic_mlir: ${basic_mlir}")
message
(
STATUS
"external_kernels_lib:
${
external_kernels_lib
}
"
)
add_test
(
NAME run_and_check_external_kernels
COMMAND sh -c
"
${
CMAKE_BINARY_DIR
}
/infrt/host_context/infrt
-
exec -i
${
basic_mlir
}
--shared_libs=
${
external_kernels_lib
}
|
${
LLVM_PATH
}
/bin/FileCheck
${
basic_mlir
}
"
COMMAND sh -c
"
${
CMAKE_BINARY_DIR
}
/infrt/host_context/infrtexec -i
${
basic_mlir
}
--shared_libs=
${
external_kernels_lib
}
|
${
LLVM_PATH
}
/bin/FileCheck
${
basic_mlir
}
"
)
paddle/infrt/host_context/CMakeLists.txt
浏览文件 @
a1addeef
...
...
@@ -21,9 +21,5 @@ cc_test_tiny(test_infrt_op_executable SRCS op_executable_test.cc DEPS infrt ${ML
cc_test_tiny
(
test_infrt_core_runtime SRCS core_runtime_test.cc DEPS infrt
${
MLIR_IR_LIBS
}
)
cc_test_tiny
(
test_infrt_mlir_to_runtime_translate SRCS mlir_to_runtime_translate_test.cc DEPS infrt
${
MLIR_IR_LIBS
}
)
infrt_exec_check
(
test_infrt_mlir_exec_on_basic mlir_tests/basic.mlir
)
infrt_exec_check
(
test_infrt_mlir_exec_on_shape mlir_tests/shape.mlir
)
infrt_exec_check
(
test_infrt_mlir_exec_on_dense_tensor mlir_tests/dense_tensor.mlir
)
add_executable
(
infrt-exec mlir_exec.cc
)
target_link_libraries
(
infrt-exec infrt
${
MLIR_IR_LIBS
}
)
add_executable
(
infrtexec mlir_exec.cc
)
target_link_libraries
(
infrtexec infrt
${
MLIR_IR_LIBS
}
)
paddle/infrt/host_context/value.h
浏览文件 @
a1addeef
...
...
@@ -29,9 +29,6 @@
#include "paddle/infrt/tensor/tensor_map.h"
#include "paddle/infrt/tensor/tensor_shape.h"
#include "paddle/pten/backends/cpu/cpu_context.h"
#include "paddle/pten/core/dense_tensor.h"
namespace
infrt
{
namespace
host_context
{
...
...
@@ -48,8 +45,8 @@ using ValueVariantType = Variant<int16_t,
tensor
::
DenseHostTensor
,
MlirFunctionExecutable
*
,
tensor
::
TensorMap
,
pten
::
CPUContext
,
pten
::
DenseTensor
,
//
pten::CPUContext,
//
pten::DenseTensor,
std
::
vector
<
int16_t
>
,
std
::
vector
<
int32_t
>
,
std
::
vector
<
int64_t
>
,
...
...
paddle/infrt/kernel/CMakeLists.txt
浏览文件 @
a1addeef
...
...
@@ -2,7 +2,7 @@ core_gather_headers()
gather_srcs
(
infrt_src SRCS
basic_kernels.cc
pten_kernels.cc
#
pten_kernels.cc
test_kernels.cc
tensor_shape_kernels.cc
tensor_kernels.cc
...
...
paddle/infrt/kernel/pten_kernels.cc
浏览文件 @
a1addeef
...
...
@@ -12,14 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/kernel/pten_kernels.h"
#include <iostream>
#include <string>
#include "paddle/infrt/host_context/kernel_registry.h"
#include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/infrt/kernel/pten_kernels.h"
#include "paddle/pten/backends/cpu/cpu_context.h"
#include "paddle/pten/kernels/math_kernel.h"
// Disable temporarily.
// #include "paddle/pten/backends/cpu/cpu_context.h"
// #include "paddle/pten/kernels/math_kernel.h"
using
infrt
::
host_context
::
Attribute
;
...
...
paddle/infrt/tests/CMakeLists.txt
0 → 100644
浏览文件 @
a1addeef
configure_file
(
lit.cfg.py.in
"
${
CMAKE_SOURCE_DIR
}
/paddle/infrt/tests/lit.cfg.py"
)
add_test
(
NAME test_infrt_by_lit COMMAND sh -c
"lit -v
${
CMAKE_SOURCE_DIR
}
/paddle/infrt/tests --filter-out
\"
disabled_*
\"
"
DEPENDS infrtopt infrtexec
)
paddle/infrt/
dialect/mlir_tests
/basic.mlir
→
paddle/infrt/
tests/dialect
/basic.mlir
浏览文件 @
a1addeef
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: @basic_f32
func @basic_f32() -> f32 {
%v0 = infrt.constant.f32 1.0
...
...
paddle/infrt/
dialect/mlir_tests
/benchmark.mlir
→
paddle/infrt/
tests/dialect
/benchmark.mlir
浏览文件 @
a1addeef
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: @benchmark
func @benchmark() {
// CHECK-LABEL: BM:add.f32:Count: 3
...
...
paddle/infrt/
dialect/mlir_tests
/dense_tensor.mlir
→
paddle/infrt/
tests/dialect
/dense_tensor.mlir
浏览文件 @
a1addeef
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: dense_shape0
func @dense_shape0() {
%shape = ts.build_shape [1:i64, 57:i64]
...
...
paddle/infrt/
dialect/mlir_tests/
rewrite_conv_bn.mlir
→
paddle/infrt/
tests/dialect/disabled_
rewrite_conv_bn.mlir
浏览文件 @
a1addeef
文件已移动
paddle/infrt/
dialect/mlir_tests/
tensor_map.mlir
→
paddle/infrt/
tests/dialect/disabled_
tensor_map.mlir
浏览文件 @
a1addeef
文件已移动
paddle/infrt/
dialect/mlir_tests/
trt_ops.mlir
→
paddle/infrt/
tests/dialect/disabled_
trt_ops.mlir
浏览文件 @
a1addeef
文件已移动
paddle/infrt/
dialect/mlir_tests
/paddle_ops.mlir
→
paddle/infrt/
tests/dialect
/paddle_ops.mlir
浏览文件 @
a1addeef
// RUN: infrtopt %s | FileCheck %s
// CHECK-LABEL: @ops
func @ops() {
%a = pd.feed() {name="input0"} : tensor<?xf32>
%b = pd.feed() {name="input1"}: tensor<?xf32>
...
...
paddle/infrt/
dialect/mlir_tests
/rewrite.mlir
→
paddle/infrt/
tests/dialect
/rewrite.mlir
浏览文件 @
a1addeef
// RUN: infrtopt --canonicalize %s | FileCheck %s
// CHECK-LABEL: @main
func @main() -> tensor<?xf32> {
%a = "pd.feed"() {name="input0"} : () -> tensor<?xf32>
...
...
@@ -9,16 +10,19 @@ func @main() -> tensor<?xf32> {
%bias1 = "pd.feed"() {name="input5"} : () -> tensor<?xf32>
%bias2 = "pd.feed"() {name="input6"} : () -> tensor<?xf32>
%c = "pd.matmul"(%a, %b) {transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d = "pd.elementwise_add"(%c, %bias) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
// CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c = "pd.matmul_v2"(%a, %b) {transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d = "pd.elementwise_add"(%c, %bias) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e = "pd.relu6"(%d) {} : (tensor<?xf32>) -> tensor<?xf32>
%c1 = "pd.matmul"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
// CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c1 = "pd.matmul_v2"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e1 = "pd.relu"(%d1) {} : (tensor<?xf32>) -> tensor<?xf32>
%c2 = "pd.matmul"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
// CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c2 = "pd.matmul_v2"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e2 = "pd.relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32>
"pd.fetch"(%e2) {name="output"} :(tensor<?xf32>)->()
}
paddle/infrt/
dialect/mlir_tests
/tensor_shape.mlir
→
paddle/infrt/
tests/dialect
/tensor_shape.mlir
浏览文件 @
a1addeef
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: @build_tensor1
func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92]
ts.print_shape %a
infrt.return
}
paddle/infrt/
dialect/mlir_tests
/tensor_type.mlir
→
paddle/infrt/
tests/dialect
/tensor_type.mlir
浏览文件 @
a1addeef
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: test_tensor_type
func @test_tensor_type() {
%a = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.tensor<X86, NCHW, F32>
...
...
paddle/infrt/tests/lit.cfg.py.in
0 → 100644
浏览文件 @
a1addeef
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lit.formats
import os
config.name = "MLIR tests"
config.test_format = lit.formats.ShTest(True)
build_dir = "@CMAKE_BINARY_DIR@"
config.llvm_tools_dir = os.path.join(build_dir, "third_party/install/llvm/bin")
config.llvm_tools_dir = os.path.join(build_dir, "/third_party/install/llvm/lib")
infrtopt_bin = os.path.join(build_dir, "paddle/infrt/dialect/")
infrtexec_bin = os.path.join(build_dir, "paddle/infrt/host_context/")
llvm_bin = os.path.join(build_dir, "third_party/install/llvm/bin/")
config.environment['PATH'] = os.path.pathsep.join(
(infrtopt_bin, infrtexec_bin, llvm_bin, config.environment['PATH']))
config.suffixes = ['.mlir']
paddle/scripts/infrt_build.sh
浏览文件 @
a1addeef
...
...
@@ -32,8 +32,8 @@ function update_pd_ops() {
# compile and install paddle
rm
-rf
${
PADDLE_ROOT
}
/build
&&
mkdir
-p
${
PADDLE_ROOT
}
/build
cd
${
PADDLE_ROOT
}
/build
cmake ..
-DWITH_PYTHON
=
ON
-DWITH_GPU
=
OFF
-DPYTHON_EXECUTABLE
=
`
which python3
`
make
-j8
cmake ..
-DWITH_PYTHON
=
ON
-DWITH_GPU
=
OFF
-DPYTHON_EXECUTABLE
=
`
which python3
`
-DWITH_XBYAK
=
OFF
-DWITH_NCCL
=
OFF
-DWITH_RCCL
=
OFF
-DWITH_CRYPTO
=
OFF
make
-j8
paddle_python
cd
${
PADDLE_ROOT
}
/build
cd
python/dist/
python3
-m
pip uninstall
-y
paddlepaddle
...
...
@@ -90,7 +90,7 @@ function infrt_gen_and_build() {
exit
7
;
fi
make
-j
${
parallel_number
}
infrt infrtopt infrt
-
exec test_infrt_exec trt-exec infrt_lib_dist
;
build_error
=
$?
make
-j
${
parallel_number
}
infrt infrtopt infrtexec test_infrt_exec trt-exec infrt_lib_dist
;
build_error
=
$?
if
[
"
$build_error
"
!=
0
]
;
then
exit
7
;
fi
...
...
@@ -101,6 +101,9 @@ function infrt_gen_and_build() {
}
function
test_infrt
()
{
# install llvm-lit toolkit
python3
-m
pip
install
lit
mkdir
-p
${
PADDLE_ROOT
}
/build
cd
${
PADDLE_ROOT
}
/build
if
[
${
WITH_TESTING
:-
ON
}
==
"ON"
]
;
then
...
...
tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py
浏览文件 @
a1addeef
...
...
@@ -16,6 +16,8 @@ import paddle.fluid.framework as framework
from
paddle.fluid
import
core
from
paddle
import
compat
as
cpt
ops_having_canonicalization
=
{
"elementwise_add"
,
}
# collect original ops: op which has both inference and grid defination
def
get_original_ops
():
...
...
@@ -120,7 +122,18 @@ def convert_op_proto_into_mlir(op_descs):
|* *|
\n\
\*===----------------------------------------------------------------------===*/
\n
"
start_
=
comment_
+
"#ifndef PD_OPS
\n
#define PD_OPS
\n
include
\"
mlir/Interfaces/InferTypeOpInterface.td
\"\n
include
\"
mlir/Interfaces/LoopLikeInterface.td
\"\n
include
\"
mlir/IR/OpBase.td
\"\n
include
\"
paddle/infrt/dialect/pd_op_base.td
\"\n\n
"
lines
=
[
"#ifndef PD_OPS"
,
"#define PD_OPS"
,
"include
\"
mlir/Interfaces/InferTypeOpInterface.td
\"
"
,
"include
\"
mlir/Interfaces/LoopLikeInterface.td
\"
"
,
"include
\"
mlir/IR/OpBase.td
\"
"
,
"include
\"
paddle/infrt/dialect/pd_op_base.td
\"
"
,
""
,
]
start_
=
comment_
+
"
\n
"
.
join
(
lines
)
with
open
(
dst_dialect_file
,
'w'
)
as
ops_mlir_file
:
ops_mlir_file
.
write
(
start_
)
...
...
@@ -134,6 +147,7 @@ def convert_op_proto_into_mlir(op_descs):
"trainable_statistics"
,
"use_global_stats"
,
"is_test"
,
"use_mkldnn"
,
"use_cudnn"
]
original_ops_
=
get_original_ops
()
automatically_generated_op_dialect
=
[]
for
op_type
,
op_proto
in
op_descs
.
items
():
...
...
@@ -144,6 +158,7 @@ def convert_op_proto_into_mlir(op_descs):
HEAD
=
"def PD_"
+
op_type
.
capitalize
(
)
+
"Op : PD_Op<
\"
"
+
op_type
+
"
\"
, [NoSideEffect]> {
\n
"
SUMMARY
=
" let summary =
\"
"
+
op_type
+
" op
\"
;
\n
"
CANONICALIZATION
=
"let hasCanonicalizer = 1;"
if
op_type
in
ops_having_canonicalization
else
""
# 2.2 Description
DESCRIPTION
=
" let description = [{
\n
"
...
...
@@ -245,6 +260,7 @@ def convert_op_proto_into_mlir(op_descs):
ops_mlir_file
.
write
(
DESCRIPTION
)
ops_mlir_file
.
write
(
ARGUMENTS
)
ops_mlir_file
.
write
(
RESULTS
)
ops_mlir_file
.
write
(
CANONICALIZATION
)
ops_mlir_file
.
write
(
"}
\n
"
)
print
(
"Skipped ops num: "
+
str
(
len
(
skipped_op_list
)))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录