Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
481db5e9
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
481db5e9
编写于
3月 14, 2022
作者:
王
王明冬
提交者:
GitHub
3月 14, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[infrt] unify the infrt dialect. test=develop (#40451)
上级
e5c59fc9
变更
90
隐藏空白更改
内联
并排
Showing
90 changed file
with
439 addition
and
675 deletion
+439
-675
cmake/external/llvm.cmake
cmake/external/llvm.cmake
+2
-1
paddle/infrt/CMakeLists.txt
paddle/infrt/CMakeLists.txt
+0
-1
paddle/infrt/api/infrt_api.cc
paddle/infrt/api/infrt_api.cc
+2
-2
paddle/infrt/dialect/CMakeLists.txt
paddle/infrt/dialect/CMakeLists.txt
+2
-8
paddle/infrt/dialect/dense_tensor.h
paddle/infrt/dialect/dense_tensor.h
+1
-1
paddle/infrt/dialect/dense_tensor.td
paddle/infrt/dialect/dense_tensor.td
+1
-1
paddle/infrt/dialect/infrt/CMakeLists.txt
paddle/infrt/dialect/infrt/CMakeLists.txt
+2
-16
paddle/infrt/dialect/infrt/common/CMakeLists.txt
paddle/infrt/dialect/infrt/common/CMakeLists.txt
+6
-0
paddle/infrt/dialect/infrt/common/types.cc
paddle/infrt/dialect/infrt/common/types.cc
+1
-1
paddle/infrt/dialect/infrt/common/types.h
paddle/infrt/dialect/infrt/common/types.h
+0
-0
paddle/infrt/dialect/infrt/common/utils.cc
paddle/infrt/dialect/infrt/common/utils.cc
+28
-0
paddle/infrt/dialect/infrt/common/utils.h
paddle/infrt/dialect/infrt/common/utils.h
+17
-1
paddle/infrt/dialect/infrt/ir/CMakeLists.txt
paddle/infrt/dialect/infrt/ir/CMakeLists.txt
+18
-0
paddle/infrt/dialect/infrt/ir/basic_kernels.cc
paddle/infrt/dialect/infrt/ir/basic_kernels.cc
+2
-61
paddle/infrt/dialect/infrt/ir/basic_kernels.h
paddle/infrt/dialect/infrt/ir/basic_kernels.h
+1
-1
paddle/infrt/dialect/infrt/ir/basic_kernels.td
paddle/infrt/dialect/infrt/ir/basic_kernels.td
+3
-36
paddle/infrt/dialect/infrt/ir/infrt_base.td
paddle/infrt/dialect/infrt/ir/infrt_base.td
+17
-0
paddle/infrt/dialect/infrt/ir/infrt_dialect.cc
paddle/infrt/dialect/infrt/ir/infrt_dialect.cc
+21
-9
paddle/infrt/dialect/infrt/ir/infrt_dialect.h
paddle/infrt/dialect/infrt/ir/infrt_dialect.h
+5
-5
paddle/infrt/dialect/infrt/ir/infrt_ops.td
paddle/infrt/dialect/infrt/ir/infrt_ops.td
+21
-1
paddle/infrt/dialect/infrt/ir/test_kernels.cc
paddle/infrt/dialect/infrt/ir/test_kernels.cc
+3
-3
paddle/infrt/dialect/infrt/ir/test_kernels.h
paddle/infrt/dialect/infrt/ir/test_kernels.h
+1
-1
paddle/infrt/dialect/infrt/ir/test_kernels.td
paddle/infrt/dialect/infrt/ir/test_kernels.td
+3
-3
paddle/infrt/dialect/infrt/pass/infrt_op_fuse.td
paddle/infrt/dialect/infrt/pass/infrt_op_fuse.td
+1
-1
paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.cc
paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.cc
+1
-1
paddle/infrt/dialect/infrt_base.cc
paddle/infrt/dialect/infrt_base.cc
+0
-56
paddle/infrt/dialect/infrt_base.h
paddle/infrt/dialect/infrt_base.h
+0
-83
paddle/infrt/dialect/infrt_base.td
paddle/infrt/dialect/infrt_base.td
+0
-33
paddle/infrt/dialect/init_dialects.cc
paddle/infrt/dialect/init_dialects.cc
+5
-6
paddle/infrt/dialect/init_dialects.h
paddle/infrt/dialect/init_dialects.h
+0
-0
paddle/infrt/dialect/mlir_loader.cc
paddle/infrt/dialect/mlir_loader.cc
+1
-1
paddle/infrt/dialect/mlir_loader_test.cc
paddle/infrt/dialect/mlir_loader_test.cc
+6
-6
paddle/infrt/dialect/opt.cc
paddle/infrt/dialect/opt.cc
+1
-1
paddle/infrt/dialect/pd_op_base.td
paddle/infrt/dialect/pd_op_base.td
+1
-1
paddle/infrt/dialect/pd_ops.cc
paddle/infrt/dialect/pd_ops.cc
+0
-1
paddle/infrt/dialect/pd_ops.h
paddle/infrt/dialect/pd_ops.h
+1
-1
paddle/infrt/dialect/pd_types.h
paddle/infrt/dialect/pd_types.h
+0
-56
paddle/infrt/dialect/phi/data_type.h
paddle/infrt/dialect/phi/data_type.h
+1
-1
paddle/infrt/dialect/phi/ir/infrt_phi_base.td
paddle/infrt/dialect/phi/ir/infrt_phi_base.td
+1
-1
paddle/infrt/dialect/phi/ir/infrt_phi_kernel.td
paddle/infrt/dialect/phi/ir/infrt_phi_kernel.td
+1
-1
paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td
paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td
+1
-1
paddle/infrt/dialect/phi/ir/phi_base.h
paddle/infrt/dialect/phi/ir/phi_base.h
+1
-1
paddle/infrt/dialect/phi/ir/phi_kernels.h
paddle/infrt/dialect/phi/ir/phi_kernels.h
+1
-1
paddle/infrt/dialect/phi/pass/kernel_op_desc.h
paddle/infrt/dialect/phi/pass/kernel_op_desc.h
+1
-1
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.cc
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.cc
+52
-28
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h
+4
-34
paddle/infrt/dialect/phi/phi_ir_exec.cc
paddle/infrt/dialect/phi/phi_ir_exec.cc
+1
-1
paddle/infrt/dialect/print_ir.cc
paddle/infrt/dialect/print_ir.cc
+1
-1
paddle/infrt/dialect/rewrite.td
paddle/infrt/dialect/rewrite.td
+1
-1
paddle/infrt/dialect/tensor_shape.td
paddle/infrt/dialect/tensor_shape.td
+1
-1
paddle/infrt/dialect/tensorrt/pd_lower_to_trt.td
paddle/infrt/dialect/tensorrt/pd_lower_to_trt.td
+1
-1
paddle/infrt/dialect/tensorrt/trt_graph_fuse_pass.h
paddle/infrt/dialect/tensorrt/trt_graph_fuse_pass.h
+6
-7
paddle/infrt/dialect/tensorrt/trt_graph_split_pass.h
paddle/infrt/dialect/tensorrt/trt_graph_split_pass.h
+3
-4
paddle/infrt/dialect/tensorrt/trt_op_converter_pass.cc
paddle/infrt/dialect/tensorrt/trt_op_converter_pass.cc
+1
-2
paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h
paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h
+5
-5
paddle/infrt/dialect/tensorrt/trt_op_teller_pass.cc
paddle/infrt/dialect/tensorrt/trt_op_teller_pass.cc
+2
-2
paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h
paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h
+5
-6
paddle/infrt/dialect/tensorrt/trt_ops.h
paddle/infrt/dialect/tensorrt/trt_ops.h
+2
-2
paddle/infrt/external_kernels/basic.mlir
paddle/infrt/external_kernels/basic.mlir
+3
-3
paddle/infrt/external_kernels/fc.mlir
paddle/infrt/external_kernels/fc.mlir
+25
-25
paddle/infrt/external_kernels/paddle.mlir
paddle/infrt/external_kernels/paddle.mlir
+32
-32
paddle/infrt/host_context/mlir_exec.cc
paddle/infrt/host_context/mlir_exec.cc
+1
-1
paddle/infrt/host_context/mlir_tests/basic.mlir
paddle/infrt/host_context/mlir_tests/basic.mlir
+12
-12
paddle/infrt/host_context/mlir_tests/dense_tensor.mlir
paddle/infrt/host_context/mlir_tests/dense_tensor.mlir
+4
-4
paddle/infrt/host_context/mlir_tests/shape.mlir
paddle/infrt/host_context/mlir_tests/shape.mlir
+2
-2
paddle/infrt/host_context/mlir_to_runtime_translate.cc
paddle/infrt/host_context/mlir_to_runtime_translate.cc
+4
-4
paddle/infrt/host_context/mlir_to_runtime_translate.h
paddle/infrt/host_context/mlir_to_runtime_translate.h
+1
-1
paddle/infrt/host_context/mlir_to_runtime_translate_test.cc
paddle/infrt/host_context/mlir_to_runtime_translate_test.cc
+13
-13
paddle/infrt/host_context/paddle_mlir.cc
paddle/infrt/host_context/paddle_mlir.cc
+0
-1
paddle/infrt/host_context/paddle_mlir.h
paddle/infrt/host_context/paddle_mlir.h
+3
-3
paddle/infrt/host_context/value.h
paddle/infrt/host_context/value.h
+1
-1
paddle/infrt/kernel/basic_kernels.cc
paddle/infrt/kernel/basic_kernels.cc
+12
-12
paddle/infrt/kernel/control_flow_kernels.cc
paddle/infrt/kernel/control_flow_kernels.cc
+1
-1
paddle/infrt/kernel/phi/dense_tensor_kernels.h
paddle/infrt/kernel/phi/dense_tensor_kernels.h
+1
-1
paddle/infrt/kernel/test_kernels.cc
paddle/infrt/kernel/test_kernels.cc
+1
-1
paddle/infrt/tests/dialect/basic.mlir
paddle/infrt/tests/dialect/basic.mlir
+14
-14
paddle/infrt/tests/dialect/benchmark.mlir
paddle/infrt/tests/dialect/benchmark.mlir
+7
-7
paddle/infrt/tests/dialect/dense_tensor.mlir
paddle/infrt/tests/dialect/dense_tensor.mlir
+4
-4
paddle/infrt/tests/dialect/disabled_tensor_map.mlir
paddle/infrt/tests/dialect/disabled_tensor_map.mlir
+14
-14
paddle/infrt/tests/dialect/paddle_ops.mlir
paddle/infrt/tests/dialect/paddle_ops.mlir
+1
-1
paddle/infrt/tests/dialect/phi/dense_tensor.mlir
paddle/infrt/tests/dialect/phi/dense_tensor.mlir
+1
-1
paddle/infrt/tests/dialect/phi/phi_test.mlir
paddle/infrt/tests/dialect/phi/phi_test.mlir
+3
-3
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
+4
-4
paddle/infrt/tests/dialect/tensor/naive_kernels.mlir
paddle/infrt/tests/dialect/tensor/naive_kernels.mlir
+2
-2
paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in
paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in
+2
-2
paddle/infrt/tests/dialect/tensor/tensor_shape.mlir
paddle/infrt/tests/dialect/tensor/tensor_shape.mlir
+1
-1
paddle/infrt/tests/dialect/tensor/tensor_type.mlir
paddle/infrt/tests/dialect/tensor/tensor_type.mlir
+1
-1
paddle/infrt/tests/dialect/tensor_shape.mlir
paddle/infrt/tests/dialect/tensor_shape.mlir
+1
-1
paddle/infrt/tests/dialect/tensor_type.mlir
paddle/infrt/tests/dialect/tensor_type.mlir
+1
-1
paddle/infrt/tests/dialect/trt_ops.mlir
paddle/infrt/tests/dialect/trt_ops.mlir
+1
-1
未找到文件。
cmake/external/llvm.cmake
浏览文件 @
481db5e9
...
...
@@ -99,7 +99,8 @@ endfunction()
function
(
mlir_add_rewriter td_base
)
set
(
LLVM_TARGET_DEFINITIONS
${
td_base
}
.td
)
mlir_tablegen
(
${
td_base
}
.cpp.inc -gen-rewriters
"-I
${
CMAKE_SOURCE_DIR
}
/infrt/dialect/pass"
)
set
(
LLVM_TARGET_DEPENDS
${
LLVM_TARGET_DEPENDS
}
${
CMAKE_SOURCE_DIR
}
/paddle/infrt/dialect/infrt/ir/infrt_base.td
)
mlir_tablegen
(
${
td_base
}
.cpp.inc -gen-rewriters
)
add_public_tablegen_target
(
MLIR
${
td_base
}
IncGen
)
add_dependencies
(
mlir-headers MLIR
${
td_base
}
IncGen
)
endfunction
()
...
...
paddle/infrt/CMakeLists.txt
浏览文件 @
481db5e9
...
...
@@ -90,7 +90,6 @@ add_subdirectory(tests)
set
(
infrt_mlir_incs
basic_kernels_inc
test_kernels_inc
infrt_base_inc
tensor_shape_inc
dense_tensor_inc
pd_ops_inc
...
...
paddle/infrt/api/infrt_api.cc
浏览文件 @
481db5e9
...
...
@@ -24,7 +24,7 @@
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h"
#include "paddle/infrt/dialect/mlir_loader.h"
#include "paddle/infrt/host_context/core_runtime.h"
#include "paddle/infrt/host_context/kernel_registry.h"
...
...
@@ -144,7 +144,7 @@ class PredictExecutor : public MlirToRuntimeTranslator {
// process results
auto
&
last_op
=
predict_func
.
front
().
back
();
if
(
last_op
.
getName
().
getStringRef
()
==
"
I
nfrt.return"
)
{
if
(
last_op
.
getName
().
getStringRef
()
==
"
i
nfrt.return"
)
{
for
(
size_t
i
=
0
;
i
<
last_op
.
getNumOperands
();
++
i
)
{
auto
*
value
=
AddValue
(
mlir
::
Value
(
last_op
.
getOperand
(
i
)));
results_
.
push_back
(
ValueRef
(
value
));
...
...
paddle/infrt/dialect/CMakeLists.txt
浏览文件 @
481db5e9
...
...
@@ -2,26 +2,20 @@ core_gather_headers()
gather_srcs
(
infrt_src SRCS
dialect.cc
basic_kernels.cc
test_kernels.cc
infrt_base.cc
init_infrt_dialects.cc
init_dialects.cc
tensor_shape.cc
dense_tensor.cc
mlir_loader.cc
diagnostic_utils.cc
pd_types.cc
pd_ops.cc
)
mlir_tablegen_on
(
basic_kernels
)
mlir_tablegen_on
(
test_kernels
)
mlir_tablegen_on
(
infrt_base DIALECT Infrt
)
mlir_tablegen_on
(
tensor_shape DIALECT ts
)
mlir_tablegen_on
(
dense_tensor DIALECT dt
)
mlir_tablegen_on
(
pd_op_base DIALECT pd
)
mlir_tablegen_on
(
pd_ops
)
mlir_tablegen_on
(
pd_extra_ops
)
mlir_add_rewriter
(
rewrite
)
# TODO(Superjomn) add a cmake function cc_executable to ecapsulate the following code
...
...
paddle/infrt/dialect/dense_tensor.h
浏览文件 @
481db5e9
...
...
@@ -19,7 +19,7 @@
#include <string>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h"
#include "paddle/infrt/dialect/dense_tensor_dialect.hpp.inc"
...
...
paddle/infrt/dialect/dense_tensor.td
浏览文件 @
481db5e9
...
...
@@ -2,7 +2,7 @@
#else
#define DT_OPS
include "paddle/infrt/dialect/infrt_base.td"
include "paddle/infrt/dialect/infrt
/ir/infrt
_base.td"
include "paddle/infrt/dialect/tensor_shape_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
...
...
paddle/infrt/dialect/infrt/CMakeLists.txt
浏览文件 @
481db5e9
core_gather_headers
()
gather_srcs
(
infrt_src SRCS
common_type.cc
infrt_dialect.cc
)
add_mlir_dialect
(
infrt_ops infrt
)
set
(
LLVM_TARGET_DEFINITIONS infrt_ops.td
)
mlir_tablegen
(
infrt_opsAttributes.h.inc -gen-attrdef-decls -dialect=infrt
)
mlir_tablegen
(
infrt_opsAttributes.cpp.inc -gen-attrdef-defs -dialect=infrt
)
add_public_tablegen_target
(
MLIRinfrt_opsAttributesIncGen
)
add_dependencies
(
mlir-headers MLIRinfrt_opsAttributesIncGen
)
add_subdirectory
(
common
)
add_subdirectory
(
ir
)
add_subdirectory
(
pass
)
paddle/infrt/dialect/infrt/common/CMakeLists.txt
0 → 100644
浏览文件 @
481db5e9
core_gather_headers
()
gather_srcs
(
infrt_src SRCS
types.cc
utils.cc
)
paddle/infrt/dialect/infrt/common
_type
.cc
→
paddle/infrt/dialect/infrt/common
/types
.cc
浏览文件 @
481db5e9
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/infrt/common
_type
.h"
#include "paddle/infrt/dialect/infrt/common
/types
.h"
namespace
infrt
{
...
...
paddle/infrt/dialect/infrt/common
_type
.h
→
paddle/infrt/dialect/infrt/common
/types
.h
浏览文件 @
481db5e9
文件已移动
paddle/infrt/dialect/infrt/common/utils.cc
0 → 100644
浏览文件 @
481db5e9
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/infrt/common/utils.h"
mlir
::
SmallVector
<
mlir
::
Value
,
4
>
infrt
::
cvtValueToValueRange
(
const
mlir
::
Value
&
operand
)
{
return
mlir
::
SmallVector
<
mlir
::
Value
,
4
>
(
1
,
operand
);
}
mlir
::
SmallVector
<
mlir
::
Value
,
4
>
infrt
::
concatTwoValueRange
(
mlir
::
ValueRange
operand_0
,
mlir
::
ValueRange
operand_1
)
{
mlir
::
SmallVector
<
mlir
::
Value
,
4
>
operands
;
operands
.
append
(
operand_0
.
begin
(),
operand_0
.
end
());
operands
.
append
(
operand_1
.
begin
(),
operand_1
.
end
());
return
operands
;
}
paddle/infrt/dialect/
pd_types.cc
→
paddle/infrt/dialect/
infrt/common/utils.h
浏览文件 @
481db5e9
...
...
@@ -12,4 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/pd_types.h"
#pragma once
#include <mlir/IR/Builders.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/DialectImplementation.h>
#include <mlir/IR/MLIRContext.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/IR/Types.h>
namespace
infrt
{
mlir
::
SmallVector
<
mlir
::
Value
,
4
>
cvtValueToValueRange
(
const
mlir
::
Value
&
operand
);
mlir
::
SmallVector
<
mlir
::
Value
,
4
>
concatTwoValueRange
(
mlir
::
ValueRange
operand_0
,
mlir
::
ValueRange
operand_1
);
}
// namespace infrt
paddle/infrt/dialect/infrt/ir/CMakeLists.txt
0 → 100644
浏览文件 @
481db5e9
core_gather_headers
()
gather_srcs
(
infrt_src SRCS
infrt_dialect.cc
basic_kernels.cc
test_kernels.cc
)
add_mlir_dialect
(
infrt_ops infrt
)
set
(
LLVM_TARGET_DEFINITIONS infrt_ops.td
)
mlir_tablegen
(
infrt_opsAttributes.h.inc -gen-attrdef-decls -dialect=infrt
)
mlir_tablegen
(
infrt_opsAttributes.cpp.inc -gen-attrdef-defs -dialect=infrt
)
add_public_tablegen_target
(
MLIRinfrt_opsAttributesIncGen
)
add_dependencies
(
mlir-headers MLIRinfrt_opsAttributesIncGen
)
mlir_tablegen_on
(
basic_kernels
)
mlir_tablegen_on
(
test_kernels
)
paddle/infrt/dialect/basic_kernels.cc
→
paddle/infrt/dialect/
infrt/ir/
basic_kernels.cc
浏览文件 @
481db5e9
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/
infrt/ir/
basic_kernels.h"
#include <llvm/ADT/STLExtras.h>
#include <mlir/IR/Attributes.h>
...
...
@@ -30,23 +30,6 @@ namespace infrt {
namespace
dialect
{
using
namespace
mlir
;
// NOLINT
static
ParseResult
parseCallOp
(
OpAsmParser
&
parser
,
// NOLINT
OperationState
&
result
)
{
// NOLINT
SymbolRefAttr
callee_attr
;
FunctionType
callee_type
;
SmallVector
<
OpAsmParser
::
OperandType
,
4
>
operands
;
auto
callee_loc
=
parser
.
getNameLoc
();
if
(
parser
.
parseAttribute
(
callee_attr
,
"callee"
,
result
.
attributes
)
||
parser
.
parseOperandList
(
operands
,
OpAsmParser
::
Delimiter
::
Paren
)
||
parser
.
parseOptionalAttrDict
(
result
.
attributes
)
||
parser
.
parseColonType
(
callee_type
)
||
parser
.
addTypesToList
(
callee_type
.
getResults
(),
result
.
types
)
||
parser
.
resolveOperands
(
operands
,
callee_type
.
getInputs
(),
callee_loc
,
result
.
operands
))
return
failure
();
return
success
();
}
static
ParseResult
parseConstantOp
(
Type
attrType
,
OpAsmParser
&
parser
,
// NOLINT
OperationState
&
result
)
{
// NOLINT
...
...
@@ -79,24 +62,6 @@ static ParseResult parseConstantI64Op(OpAsmParser &parser, // NOLINT
IntegerType
::
get
(
result
.
getContext
(),
64
),
parser
,
result
);
}
static
ParseResult
parseReturnOp
(
OpAsmParser
&
parser
,
// NOLINT
OperationState
&
result
)
{
// NOLINT
SmallVector
<
OpAsmParser
::
OperandType
,
2
>
opInfo
;
SmallVector
<
Type
,
2
>
types
;
llvm
::
SMLoc
loc
=
parser
.
getCurrentLocation
();
return
failure
(
parser
.
parseOperandList
(
opInfo
)
||
(
!
opInfo
.
empty
()
&&
parser
.
parseColonTypeList
(
types
))
||
parser
.
resolveOperands
(
opInfo
,
types
,
loc
,
result
.
operands
));
}
static
void
print
(
OpAsmPrinter
&
p
,
CallOp
op
)
{
// NOLINT
p
<<
op
->
getAttr
(
"callee"
)
<<
"("
;
p
.
printOperands
(
op
.
getOperands
());
p
<<
")"
;
p
.
printOptionalAttrDict
(
op
->
getAttrs
(),
{
"callee"
});
p
<<
" : "
;
}
static
void
printConstant
(
OpAsmPrinter
&
p
,
mlir
::
Operation
*
op
)
{
// NOLINT
p
<<
" "
;
p
.
printOptionalAttrDict
(
op
->
getAttrs
(),
/*elidedAttrs=*/
{
"value"
});
...
...
@@ -127,37 +92,13 @@ static void print(OpAsmPrinter &p, ConstantI64Op op) { // NOLINT
printConstant
(
p
,
op
);
}
static
void
print
(
OpAsmPrinter
&
p
,
ReturnOp
op
)
{
// NOLINT
if
(
op
.
getNumOperands
()
>
0
)
{
p
<<
' '
;
p
.
printOperands
(
op
.
getOperands
());
p
<<
" : "
;
llvm
::
interleaveComma
(
op
.
getOperands
(),
p
);
}
}
static
LogicalResult
verify
(
CallOp
op
)
{
return
success
();
}
static
LogicalResult
verify
(
ConstantF32Op
op
)
{
return
success
();
}
static
LogicalResult
verify
(
ConstantI32Op
op
)
{
return
success
();
}
static
LogicalResult
verify
(
ConstantF64Op
op
)
{
return
success
();
}
static
LogicalResult
verify
(
ConstantI64Op
op
)
{
return
success
();
}
static
LogicalResult
verify
(
ReturnOp
op
)
{
auto
function
=
dyn_cast
<
FuncOp
>
(
op
->
getParentOp
());
if
(
!
function
)
return
success
();
auto
results
=
function
.
getType
().
getResults
();
if
(
op
.
getNumOperands
()
!=
results
.
size
())
return
op
.
emitOpError
(
"has "
)
<<
op
.
getNumOperands
()
<<
" operands, but enclosing function returns "
<<
results
.
size
();
return
success
();
}
}
// namespace dialect
}
// namespace infrt
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/basic_kernels.cpp.inc"
#include "paddle/infrt/dialect/
infrt/ir/
basic_kernels.cpp.inc"
paddle/infrt/dialect/basic_kernels.h
→
paddle/infrt/dialect/
infrt/ir/
basic_kernels.h
浏览文件 @
481db5e9
...
...
@@ -18,4 +18,4 @@
#include <mlir/Interfaces/SideEffectInterfaces.h>
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/basic_kernels.hpp.inc"
#include "paddle/infrt/dialect/
infrt/ir/
basic_kernels.hpp.inc"
paddle/infrt/dialect/basic_kernels.td
→
paddle/infrt/dialect/
infrt/ir/
basic_kernels.td
浏览文件 @
481db5e9
...
...
@@ -4,10 +4,10 @@
#else
#define BASIC_OPS
include "paddle/infrt/dialect/infrt_base.td"
include "paddle/infrt/dialect/infrt
/ir/infrt
_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
class INFRT_Op<string mnemonic, list<OpTrait> traits = []> : Op<I
NFRT
_Dialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
class INFRT_Op<string mnemonic, list<OpTrait> traits = []> : Op<I
nfrt
_Dialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
// Each registered op needs to provide all of a printer, parser and verifier.
let printer = [{ return infrt::dialect::print(p, *this); }];
...
...
@@ -15,23 +15,6 @@ class INFRT_Op<string mnemonic, list<OpTrait> traits = []> : Op<INFRT_Dialect, m
let parser = [{ return infrt::dialect::parse$cppClass(parser, result); }];
}
def CallOp : INFRT_Op<"call"> {
let summary = "call a host operation";
let description = [{
The "infrt.call" operation represents a direct call to a function. The operands and result types of the call must match the specified function type.
%2 = infrt.call @add(%0, %1) : (f32, f32) -> f32
}];
let arguments = (ins FlatSymbolRefAttr:$callee, Variadic<AnyType>:$operands);
let results = (outs Variadic<AnyType>);
let extraClassDeclaration = [{
mlir::StringRef getCallee() { return callee(); }
mlir::FunctionType getCalleeType();
}];
}
class ConstantOp<string suffix, Type baseType, Attr attr>
: INFRT_Op<"constant." # suffix, [NoSideEffect]> {
let summary = "constant value constructor in host";
...
...
@@ -45,22 +28,6 @@ def ConstantI64Op : ConstantOp<"i64", I64, I64Attr>;
def ConstantF32Op : ConstantOp<"f32", F32, F32Attr>;
def ConstantF64Op : ConstantOp<"f64", F64, F64Attr>;
def ReturnOp : INFRT_Op<"return", [Terminator]> {
let summary = "host executor return operation";
let description = [{
The "Infrt.return" operation represents a return operation within a function.
func @foo() : (i32, f8) {
Infrt.return %0, %1 : i32, f8
}
}];
let arguments = (ins Variadic<AnyType>:$operands);
let builders = [OpBuilder<(ins),
[{ build($_builder, $_state, llvm::None); }]>];
}
class AddOp<string suffix, Type type> : INFRT_Op<"add." # suffix, [NoSideEffect]> {
let summary = "infrt.add operation";
let description = [{
...
...
@@ -112,7 +79,7 @@ def PrintF32Op : PrintOp<"f32", F32>;
def PrintF64Op : PrintOp<"f64", F64>;
def PrintStringOp : INFRT_Op<"print_string"> {
let summary = "
I
nfrt.print_string";
let summary = "
i
nfrt.print_string";
let description = [{
An operation that prints a string.
}];
...
...
paddle/infrt/dialect/infrt/i
nfrt_ops
_base.td
→
paddle/infrt/dialect/infrt/i
r/infrt
_base.td
浏览文件 @
481db5e9
...
...
@@ -101,4 +101,21 @@ class Infrt_Attr<string name, list<Trait> traits = [],
: AttrDef<Infrt_Dialect, name, traits, baseCppClass> {
let mnemonic = ?;
}
// tools function. used for pattern rewriter
class INFRT_createI32Attr<string value> : NativeCodeCall<
"$_builder.getI32IntegerAttr(" # value # ")">;
class INFRT_createSI32Attr<string value> : NativeCodeCall<
"$_builder.getSI32IntegerAttr(" # value # ")">;
class INFRT_createF32Attr<string value> : NativeCodeCall<
"$_builder.getF32FloatAttr(" # value # ")">;
def INFRT_cvtValueToValueRange : NativeCodeCall<
"infrt::cvtValueToValueRange($0)">;
def INFRT_concatTwoValueRange : NativeCodeCall<
"infrt::concatTwoValueRange($0, $1)">;
#endif // INFRT_OPS_BASE
paddle/infrt/dialect/infrt/infrt_dialect.cc
→
paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.cc
浏览文件 @
481db5e9
...
...
@@ -12,40 +12,52 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h"
#include <llvm/ADT/TypeSwitch.h>
#include <mlir/IR/Builders.h>
#include <mlir/IR/BuiltinOps.h>
#include <mlir/IR/DialectImplementation.h>
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_opsDialect.cpp.inc"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_opsDialect.cpp.inc"
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.cpp.inc"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_opsTypes.cpp.inc"
#define GET_ATTRDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.cpp.inc"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_opsAttributes.cpp.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_ops.cpp.inc"
#include "paddle/infrt/dialect/infrt/ir/infrt_ops.cpp.inc"
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/ir/test_kernels.h"
namespace
infrt
{
void
InfrtDialect
::
initialize
()
{
addTypes
<
#define GET_TYPEDEF_LIST
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_opsTypes.cpp.inc" // NOLINT
>
();
addAttributes
<
#define GET_ATTRDEF_LIST
#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_opsAttributes.cpp.inc" // NOLINT
>
();
addOperations
<
#define GET_OP_LIST
#include "paddle/infrt/dialect/infrt/infrt_ops.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/infrt/ir/infrt_ops.cpp.inc" // NOLINT
>
();
addOperations
<
#define GET_OP_LIST
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.cpp.inc"
>
();
addOperations
<
#define GET_OP_LIST
#include "paddle/infrt/dialect/infrt/ir/test_kernels.cpp.inc"
>
();
}
...
...
@@ -128,7 +140,7 @@ mlir::Type InfrtDialect::parseType(::mlir::DialectAsmParser &parser) const {
void
InfrtDialect
::
printType
(
::
mlir
::
Type
type
,
::
mlir
::
DialectAsmPrinter
&
os
)
const
{
// print LoDTensorType, for example: !
I
nfrt.lod_tensor<3x64x3x3xf32,5>
// print LoDTensorType, for example: !
i
nfrt.lod_tensor<3x64x3x3xf32,5>
if
(
type
.
isa
<
infrt
::
LoDTensorType
>
())
{
auto
lod_tensor_type
=
type
.
cast
<
infrt
::
LoDTensorType
>
();
os
<<
"lod_tensor<"
;
...
...
paddle/infrt/dialect/infrt/infrt_dialect.h
→
paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h
浏览文件 @
481db5e9
...
...
@@ -22,14 +22,14 @@
#include <mlir/IR/Dialect.h>
#include <mlir/IR/OpDefinition.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/infrt/common
_type
.h"
#include "paddle/infrt/dialect/infrt/common
/types
.h"
#include "paddle/infrt/dialect/infrt/infrt_opsDialect.h.inc"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_opsDialect.h.inc"
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.h.inc"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_opsTypes.h.inc"
#define GET_ATTRDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.h.inc"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_opsAttributes.h.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_ops.h.inc"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_ops.h.inc"
paddle/infrt/dialect/infrt/infrt_ops.td
→
paddle/infrt/dialect/infrt/i
r/i
nfrt_ops.td
浏览文件 @
481db5e9
include "paddle/infrt/dialect/infrt/i
nfrt_ops
_base.td"
include "paddle/infrt/dialect/infrt/i
r/infrt
_base.td"
// Op definition
class Infrt_Op<string mnemonic, list<OpTrait> traits = []> : Op<Infrt_Dialect, mnemonic, traits> {
...
...
@@ -33,6 +33,26 @@ def Infrt_ReturnOp : Infrt_Op<"return", [Terminator]> {
let assemblyFormat = "attr-dict ($operands^ `:` type($operands))?";
}
def Infrt_CallOp : Infrt_Op<"call"> {
let summary = "call a host operation";
let description = [{
The "infrt.call" operation represents a direct call to a function. The operands and result types of the call must match the specified function type.
%2 = infrt.call @add(%0, %1) : (f32, f32) -> f32
}];
let arguments = (ins FlatSymbolRefAttr:$callee, Variadic<AnyType>:$operands);
let results = (outs Variadic<AnyType>);
//let extraClassDeclaration = [{
// mlir::StringRef getCallee() { return callee(); }
// mlir::FunctionType getCalleeType();
// }];
let assemblyFormat = [{
$callee `(` $operands `)` attr-dict `:` functional-type($operands, results)
}];
}
def Infrt_CvtTensorOp : Infrt_Op<"cvt_tensor", [NoSideEffect]> {
let summary = "convert tensor type op";
let description = [{convert tensor type op!}];
...
...
paddle/infrt/dialect/test_kernels.cc
→
paddle/infrt/dialect/
infrt/ir/
test_kernels.cc
浏览文件 @
481db5e9
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/test_kernels.h"
#include "paddle/infrt/dialect/
infrt/ir/
test_kernels.h"
#include <mlir/IR/Builders.h>
#include <mlir/IR/OpDefinition.h>
...
...
@@ -147,7 +147,7 @@ static mlir::LogicalResult verify(BenchmarkOp op) {
// Verify that the target benchmark region has exactly one return value.
auto
&
region
=
op
.
region
();
auto
&
last_op
=
region
.
front
().
back
();
if
(
last_op
.
getName
().
getStringRef
()
!=
"
I
nfrt.return"
)
{
if
(
last_op
.
getName
().
getStringRef
()
!=
"
i
nfrt.return"
)
{
return
op
.
emitOpError
(
"missing return statement"
);
}
if
(
last_op
.
getNumOperands
()
!=
1
)
{
...
...
@@ -161,4 +161,4 @@ static mlir::LogicalResult verify(BenchmarkOp op) {
}
// namespace infrt
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/test_kernels.cpp.inc"
#include "paddle/infrt/dialect/
infrt/ir/
test_kernels.cpp.inc"
paddle/infrt/dialect/test_kernels.h
→
paddle/infrt/dialect/
infrt/ir/
test_kernels.h
浏览文件 @
481db5e9
...
...
@@ -17,4 +17,4 @@
#include <mlir/Interfaces/SideEffectInterfaces.h>
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/test_kernels.hpp.inc"
#include "paddle/infrt/dialect/
infrt/ir/
test_kernels.hpp.inc"
paddle/infrt/dialect/test_kernels.td
→
paddle/infrt/dialect/
infrt/ir/
test_kernels.td
浏览文件 @
481db5e9
...
...
@@ -4,12 +4,12 @@
#else
#define TEST_OPS
include "paddle/infrt/dialect/infrt_base.td"
include "paddle/infrt/dialect/infrt
/ir/infrt
_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
// Base class for Test dialect ops.
class Test_Op<string mnemonic, list<OpTrait> traits = []> :
Op<I
NFRT
_Dialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
Op<I
nfrt
_Dialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
// Each registered op in the Test namespace needs to provide all of a printer,
// parser and verifier.
...
...
@@ -45,7 +45,7 @@ def BenchmarkOp : Test_Op<"benchmark"> {
// The following code benchmarks the infrt.add.i32 kernel.
%x = infrt.add.i32 %c, %c
// The benchmarked function needs to return exactly one value.
I
nfrt.return %x : i32
i
nfrt.return %x : i32
}
}];
...
...
paddle/infrt/dialect/infrt/pass/infrt_op_fuse.td
浏览文件 @
481db5e9
...
...
@@ -2,7 +2,7 @@
#define INFRT_OP_FUSE
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt/infrt_ops.td"
include "paddle/infrt/dialect/infrt/i
r/i
nfrt_ops.td"
include "paddle/infrt/dialect/pd_ops.td"
def FuseCvtTensorPattern : Pat<
...
...
paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.cc
浏览文件 @
481db5e9
...
...
@@ -15,7 +15,7 @@
#include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h"
#include <mlir/Transforms/GreedyPatternRewriteDriver.h>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h"
#include "paddle/infrt/dialect/pd_ops.h"
namespace
{
#include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse.cpp.inc" // NOLINT
...
...
paddle/infrt/dialect/infrt_base.cc
已删除
100644 → 0
浏览文件 @
e5c59fc9
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/test_kernels.h"
namespace
infrt
{
namespace
dialect
{
// ----INFRTDialect definition begin----
void
INFRTDialect
::
initialize
()
{
allowUnknownTypes
();
allowUnknownOperations
();
addOperations
<
#define GET_OP_LIST
#include "paddle/infrt/dialect/basic_kernels.cpp.inc"
>
();
addOperations
<
#define GET_OP_LIST
#include "paddle/infrt/dialect/test_kernels.cpp.inc"
>
();
}
mlir
::
Type
INFRTDialect
::
parseType
(
mlir
::
DialectAsmParser
&
parser
)
const
{
llvm
::
StringRef
keyword
;
if
(
parser
.
parseKeyword
(
&
keyword
))
return
mlir
::
Type
();
// parse TensorMapType, for example: !infrt.tensor_map
parser
.
emitError
(
parser
.
getCurrentLocation
(),
"unknown infrt type: "
)
<<
keyword
;
return
mlir
::
Type
();
}
void
INFRTDialect
::
printType
(
mlir
::
Type
type
,
mlir
::
DialectAsmPrinter
&
printer
)
const
{
// print TensorMapType, for example: !infrt.tensor_map
llvm_unreachable
(
"unknown infrt type."
);
}
// ----INFRTDialect definition end----
}
// namespace dialect
}
// namespace infrt
paddle/infrt/dialect/infrt_base.h
已删除
100644 → 0
浏览文件 @
e5c59fc9
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <mlir/IR/Builders.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/DialectImplementation.h>
#include <mlir/IR/MLIRContext.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/IR/Types.h>
#include "paddle/infrt/dialect/infrt_base.hpp.inc"
namespace
infrt
{
namespace
dialect
{
class
INFRTDialect
:
public
mlir
::
Dialect
{
explicit
INFRTDialect
(
mlir
::
MLIRContext
*
context
)
:
mlir
::
Dialect
(
getDialectNamespace
(),
context
,
mlir
::
TypeID
::
get
<
INFRTDialect
>
())
{
initialize
();
}
// parse types registered to the dialect.
mlir
::
Type
parseType
(
mlir
::
DialectAsmParser
&
parser
)
const
override
;
// print types registered to the dialect.
void
printType
(
mlir
::
Type
type
,
mlir
::
DialectAsmPrinter
&
printer
)
const
override
;
void
initialize
();
friend
class
mlir
::
MLIRContext
;
public:
static
::
llvm
::
StringRef
getDialectNamespace
()
{
return
"Infrt"
;
}
};
}
// namespace dialect
template
<
typename
T
>
static
mlir
::
IntegerAttr
createI32Attr
(
mlir
::
OpBuilder
&
b
,
// NOLINT
mlir
::
Location
loc
,
T
constant
)
{
return
b
.
getIntegerAttr
(
b
.
getI32Type
(),
constant
);
}
template
<
typename
T
>
static
mlir
::
IntegerAttr
createSI32Attr
(
mlir
::
OpBuilder
&
b
,
// NOLINT
mlir
::
Location
loc
,
T
constant
)
{
return
b
.
getSI32IntegerAttr
(
constant
);
}
template
<
typename
T
>
static
mlir
::
FloatAttr
createF32Attr
(
mlir
::
OpBuilder
&
b
,
// NOLINT
mlir
::
Location
loc
,
T
constant
)
{
return
b
.
getF32FloatAttr
(
constant
);
}
static
mlir
::
SmallVector
<
mlir
::
Value
,
4
>
cvtValueToValueRange
(
const
mlir
::
Value
&
operand
)
{
return
mlir
::
SmallVector
<
mlir
::
Value
,
4
>
(
1
,
operand
);
}
static
mlir
::
SmallVector
<
mlir
::
Value
,
4
>
concatTwoValueRange
(
mlir
::
ValueRange
operand_0
,
mlir
::
ValueRange
operand_1
)
{
mlir
::
SmallVector
<
mlir
::
Value
,
4
>
operands
;
operands
.
append
(
operand_0
.
begin
(),
operand_0
.
end
());
operands
.
append
(
operand_1
.
begin
(),
operand_1
.
end
());
return
operands
;
}
}
// namespace infrt
paddle/infrt/dialect/infrt_base.td
已删除
100644 → 0
浏览文件 @
e5c59fc9
#ifndef INFRT_BASE
#define INFRT_BASE
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt/infrt_ops_base.td"
def INFRT_Dialect : Dialect {
let name = "Infrt";
let description = [{
The INFRT host dialect.
}];
let cppNamespace = "::infrt::dialect";
}
def BufferType : OpaqueType<"b", "buffer", "buffer">;
class INFRT_createI32Attr<string value> : NativeCodeCall<
"infrt::createI32Attr($_builder, $_loc, " # value # ")">;
class INFRT_createSI32Attr<string value> : NativeCodeCall<
"infrt::createSI32Attr($_builder, $_loc, " # value # ")">;
class INFRT_createF32Attr<string value> : NativeCodeCall<
"infrt::createF32Attr($_builder, $_loc, " # value # ")">;
def INFRT_cvtValueToValueRange : NativeCodeCall<
"infrt::cvtValueToValueRange($0)">;
def INFRT_concatTwoValueRange : NativeCodeCall<
"infrt::concatTwoValueRange($0, $1)">;
#endif // INFRT_BASE
paddle/infrt/dialect/init_
infrt_
dialects.cc
→
paddle/infrt/dialect/init_dialects.cc
浏览文件 @
481db5e9
...
...
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/init_
infrt_
dialects.h"
#include "paddle/infrt/dialect/init_dialects.h"
#include <glog/logging.h>
#include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd_ops.h"
#include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h"
#include "paddle/infrt/dialect/phi/ir/phi_base.h"
...
...
@@ -30,8 +30,7 @@
namespace
infrt
{
void
registerCinnDialects
(
mlir
::
DialectRegistry
&
registry
)
{
// NOLINT
registry
.
insert
<
ts
::
TensorShapeDialect
,
dialect
::
INFRTDialect
,
infrt
::
InfrtDialect
,
InfrtDialect
,
dt
::
DTDialect
,
mlir
::
pd
::
PaddleDialect
,
#ifdef INFRT_WITH_PHI
...
...
paddle/infrt/dialect/init_
infrt_
dialects.h
→
paddle/infrt/dialect/init_dialects.h
浏览文件 @
481db5e9
文件已移动
paddle/infrt/dialect/mlir_loader.cc
浏览文件 @
481db5e9
...
...
@@ -28,7 +28,7 @@
#include <vector>
#include "paddle/infrt/dialect/diagnostic_utils.h"
#include "paddle/infrt/dialect/init_
infrt_
dialects.h"
#include "paddle/infrt/dialect/init_dialects.h"
namespace
infrt
{
namespace
dialect
{
...
...
paddle/infrt/dialect/mlir_loader_test.cc
浏览文件 @
481db5e9
...
...
@@ -22,7 +22,7 @@
#include <string>
#include "paddle/infrt/dialect/init_
infrt_
dialects.h"
#include "paddle/infrt/dialect/init_dialects.h"
namespace
infrt
{
namespace
dialect
{
...
...
@@ -32,13 +32,13 @@ TEST(MlirLoader, basic) {
auto
source
=
R"ROC(
func @main() -> f32 {
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%value = "
I
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%value = "
i
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
"
I
nfrt.print.f32"(%v0) : (f32) -> ()
"
i
nfrt.print.f32"(%v0) : (f32) -> ()
I
nfrt.return %value : f32
i
nfrt.return %value : f32
}
)ROC"
;
...
...
paddle/infrt/dialect/opt.cc
浏览文件 @
481db5e9
...
...
@@ -14,7 +14,7 @@
#include <mlir/Support/MlirOptMain.h>
#include <mlir/Transforms/Passes.h>
#include "paddle/infrt/dialect/init_
infrt_
dialects.h"
#include "paddle/infrt/dialect/init_dialects.h"
int
main
(
int
argc
,
char
**
argv
)
{
mlir
::
DialectRegistry
registry
;
...
...
paddle/infrt/dialect/pd_op_base.td
浏览文件 @
481db5e9
...
...
@@ -6,7 +6,7 @@
include "mlir/IR/OpBase.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt/i
nfrt_ops
_base.td"
include "paddle/infrt/dialect/infrt/i
r/infrt
_base.td"
def PD_Dialect : Dialect {
let name = "pd";
...
...
paddle/infrt/dialect/pd_ops.cc
浏览文件 @
481db5e9
...
...
@@ -16,7 +16,6 @@
#include <mlir/IR/Matchers.h>
#include <mlir/IR/PatternMatch.h>
#include "paddle/infrt/dialect/infrt_base.h"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT
...
...
paddle/infrt/dialect/pd_ops.h
浏览文件 @
481db5e9
...
...
@@ -28,7 +28,7 @@
#include <mlir/Interfaces/InferTypeOpInterface.h>
#include <mlir/Interfaces/LoopLikeInterface.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h"
namespace
mlir
{
namespace
pd
{
...
...
paddle/infrt/dialect/pd_types.h
已删除
100644 → 0
浏览文件 @
e5c59fc9
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file defines the types used in PaddlePaddle MLIR dialect.
// We borrowed much ideas from tensorflow mlir dialect (tf_types.h in
// tensorflow).
#pragma once
#include <mlir/IR/Diagnostics.h>
#include <mlir/IR/Location.h>
#include <mlir/IR/Operation.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/IR/Types.h>
namespace
mlir
{
namespace
PD
{
class
PaddleType
:
public
Type
{
public:
using
Type
::
Type
;
static
bool
classof
(
Type
type
);
};
namespace
detail
{
template
<
typename
Derived
>
class
PaddleTypeImpl
:
public
Type
::
TypeBase
<
Derived
,
PaddleType
,
TypeStorage
>
{
public:
using
Base
=
typename
Type
::
TypeBase
<
Derived
,
PaddleType
,
TypeStorage
>
;
using
PDBase
=
PaddleTypeImpl
<
Derived
>
;
using
Base
::
Base
;
};
}
// namespace detail
#define HANDLE_PD_TYPE(pdtype, enumerant, name) \
class pdtype##Type : public detail::PaddleTypeImpl<pdtype##Type> { \
public: \
using PDBase::PDBase; \
};
}
// namespace PD
}
// namespace mlir
paddle/infrt/dialect/phi/data_type.h
浏览文件 @
481db5e9
...
...
@@ -14,7 +14,7 @@
#pragma once
#include "paddle/infrt/dialect/infrt/common
_type
.h"
#include "paddle/infrt/dialect/infrt/common
/types
.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/layout.h"
...
...
paddle/infrt/dialect/phi/ir/infrt_phi_base.td
浏览文件 @
481db5e9
...
...
@@ -2,7 +2,7 @@
#define PHI_BASE
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt_base.td"
include "paddle/infrt/dialect/infrt
/ir/infrt
_base.td"
include "mlir/Interfaces/InferTypeOpInterface.td"
def PHI_Dialect : Dialect {
...
...
paddle/infrt/dialect/phi/ir/infrt_phi_kernel.td
浏览文件 @
481db5e9
...
...
@@ -3,7 +3,7 @@
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt_base.td"
include "paddle/infrt/dialect/infrt
/ir/infrt
_base.td"
include "paddle/infrt/dialect/phi/ir/infrt_phi_base.td"
def PHI_CPUKernelDialect : Dialect {
...
...
paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td
浏览文件 @
481db5e9
...
...
@@ -5,7 +5,7 @@
include "paddle/infrt/dialect/phi/ir/infrt_phi_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt_base.td"
include "paddle/infrt/dialect/infrt
/ir/infrt
_base.td"
def PHI_DenseTensorDialect : Dialect {
let name = "phi_dt";
...
...
paddle/infrt/dialect/phi/ir/phi_base.h
浏览文件 @
481db5e9
...
...
@@ -18,7 +18,7 @@
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include <string>
#include "paddle/infrt/dialect/infrt/common
_type
.h"
#include "paddle/infrt/dialect/infrt/common
/types
.h"
#include "paddle/infrt/dialect/phi/ir/infrt_phi_baseDialect.h.inc"
...
...
paddle/infrt/dialect/phi/ir/phi_kernels.h
浏览文件 @
481db5e9
...
...
@@ -30,7 +30,7 @@
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h"
#include "paddle/infrt/dialect/phi/ir/phi_base.h"
#include "paddle/infrt/dialect/phi/ir/phi_cpu_kernelsDialect.h.inc"
...
...
paddle/infrt/dialect/phi/pass/kernel_op_desc.h
浏览文件 @
481db5e9
...
...
@@ -16,7 +16,7 @@
#include <string>
#include <vector>
#include "paddle/infrt/dialect/infrt/common
_type
.h"
#include "paddle/infrt/dialect/infrt/common
/types
.h"
namespace
infrt
{
...
...
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.cc
浏览文件 @
481db5e9
...
...
@@ -24,13 +24,29 @@
#include <unordered_set>
#include <vector>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h"
#include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h"
#include "paddle/infrt/dialect/phi/pass/kernel_op_desc.h"
#include "paddle/infrt/dialect/phi/pass/proto_arg_map_context.h"
#include "paddle/phi/core/compat/op_utils.h"
#include "paddle/phi/ops/compat/signatures.h"
namespace
infrt
{
namespace
{
class
phiOpCvtPass
:
public
mlir
::
PassWrapper
<
phiOpCvtPass
,
mlir
::
FunctionPass
>
{
public:
::
llvm
::
StringRef
getName
()
const
override
{
return
"phiOpCvtPass"
;
}
void
runOnFunction
()
override
;
explicit
phiOpCvtPass
(
std
::
vector
<
infrt
::
Place
>
valid_places
=
std
::
vector
<
infrt
::
Place
>
())
:
valid_places_
(
valid_places
)
{}
private:
void
convertStage
();
void
diapatchStage
();
std
::
vector
<
infrt
::
Place
>
valid_places_
;
};
// Implementation of the phiOpCvtPass.
void
phiOpCvtPass
::
runOnFunction
()
{
convertStage
();
...
...
@@ -63,7 +79,7 @@ void phiOpCvtPass::convertStage() {
::
phi
::
KernelSignature
kernel_sign
=
::
phi
::
OpUtilsMap
::
Instance
().
GetArgumentMappingFn
(
op_name
)(
ProtoArgumentMappingContext
(
op
));
infrt
::
ProtoArgumentMappingContext
(
op
));
// resort input&output according to kernel_sign
::
llvm
::
SmallVector
<
mlir
::
Value
,
4
>
inputs
,
ori_output
;
::
llvm
::
SmallVector
<
mlir
::
Type
,
4
>
output_types
;
...
...
@@ -109,10 +125,10 @@ void phiOpCvtPass::diapatchStage() {
}
mlir
::
OpBuilder
builder
(
&
block
,
block
.
begin
());
std
::
map
<
TargetType
,
mlir
::
Value
>
phi_context
;
std
::
map
<
infrt
::
TargetType
,
mlir
::
Value
>
phi_context
;
for
(
infrt
::
KernelOp
kernel_op
:
worklist
)
{
std
::
string
kernel_name
=
kernel_op
.
name
().
str
();
std
::
vector
<
PhiKernelDesc
>
candidates
=
std
::
vector
<
infrt
::
PhiKernelDesc
>
candidates
=
getCandidateKernels
(
kernel_name
,
valid_places_
);
if
(
candidates
.
empty
())
{
LOG
(
FATAL
)
<<
"No candidate kernels for op:"
<<
kernel_name
;
...
...
@@ -121,12 +137,13 @@ void phiOpCvtPass::diapatchStage() {
builder
.
setInsertionPoint
(
kernel_op
);
// Todo: Implimentation the concrete pass pick strategy
const
PhiKernelDesc
&
phi_kernel_desc
=
candidates
.
front
();
const
infrt
::
PhiKernelDesc
&
phi_kernel_desc
=
candidates
.
front
();
kernel_name
=
getPhiTargetPrefix
(
phi_kernel_desc
.
kernelType
.
target
)
+
kernel_name
+
getPhiPrecisionSuffix
(
phi_kernel_desc
.
kernelType
.
precision
)
+
getPhiLayoutSuffix
(
phi_kernel_desc
.
kernelType
.
layout
);
kernel_name
=
infrt
::
getPhiTargetPrefix
(
phi_kernel_desc
.
kernelType
.
target
)
+
kernel_name
+
infrt
::
getPhiPrecisionSuffix
(
phi_kernel_desc
.
kernelType
.
precision
)
+
infrt
::
getPhiLayoutSuffix
(
phi_kernel_desc
.
kernelType
.
layout
);
mlir
::
OperationName
operation_name
(
kernel_name
,
kernel_op
.
getContext
());
mlir
::
OperationState
operation_state
(
kernel_op
.
getLoc
(),
operation_name
);
...
...
@@ -134,18 +151,18 @@ void phiOpCvtPass::diapatchStage() {
if
(
phi_context
.
find
(
phi_kernel_desc
.
kernelType
.
target
)
==
phi_context
.
end
())
{
switch
(
phi_kernel_desc
.
kernelType
.
target
)
{
case
TargetType
::
CPU
:
{
case
infrt
::
TargetType
::
CPU
:
{
auto
context_value
=
builder
.
create
<
infrt
::
phi
::
CreateCPUContextOp
>
(
kernel_op
.
getLoc
(),
phi
::
ContextType
::
get
(
kernel_op
.
getContext
(),
TargetType
::
CPU
))
infrt
::
phi
::
ContextType
::
get
(
kernel_op
.
getContext
(),
infrt
::
TargetType
::
CPU
))
.
output
();
phi_context
[
TargetType
::
CPU
]
=
context_value
;
phi_context
[
infrt
::
TargetType
::
CPU
]
=
context_value
;
}
break
;
case
TargetType
::
GPU
:
case
TargetType
::
UNK
:
case
infrt
::
TargetType
::
GPU
:
case
infrt
::
TargetType
::
UNK
:
default:
LOG
(
FATAL
)
<<
"Unsupported TargetType"
;
break
;
...
...
@@ -155,29 +172,30 @@ void phiOpCvtPass::diapatchStage() {
phi_context
.
at
(
phi_kernel_desc
.
kernelType
.
target
));
for
(
size_t
index
=
0
;
index
<
phi_kernel_desc
.
inputsType
.
size
();
++
index
)
{
mlir
::
Value
input
=
kernel_op
.
getOperand
(
index
);
auto
cvt_tensor_type_op
=
builder
.
create
<
CvtTensorOp
>
(
auto
cvt_tensor_type_op
=
builder
.
create
<
infrt
::
CvtTensorOp
>
(
kernel_op
.
getLoc
(),
DenseTensorType
::
get
(
kernel_op
.
getContext
(),
phi_kernel_desc
.
inputsType
[
index
].
target
,
phi_kernel_desc
.
inputsType
[
index
].
precision
,
phi_kernel_desc
.
inputsType
[
index
].
layout
),
infrt
::
DenseTensorType
::
get
(
kernel_op
.
getContext
(),
phi_kernel_desc
.
inputsType
[
index
].
target
,
phi_kernel_desc
.
inputsType
[
index
].
precision
,
phi_kernel_desc
.
inputsType
[
index
].
layout
),
input
);
operation_state
.
addOperands
(
cvt_tensor_type_op
.
output
());
}
for
(
size_t
index
=
0
;
index
<
phi_kernel_desc
.
outputsType
.
size
();
++
index
)
{
operation_state
.
addTypes
(
DenseTensorType
::
get
(
kernel_op
.
getContext
(),
phi_kernel_desc
.
outputsType
[
index
].
target
,
phi_kernel_desc
.
outputsType
[
index
].
precision
,
phi_kernel_desc
.
outputsType
[
index
].
layout
));
operation_state
.
addTypes
(
infrt
::
DenseTensorType
::
get
(
kernel_op
.
getContext
(),
phi_kernel_desc
.
outputsType
[
index
].
target
,
phi_kernel_desc
.
outputsType
[
index
].
precision
,
phi_kernel_desc
.
outputsType
[
index
].
layout
));
}
operation_state
.
addAttributes
(
kernel_op
.
attrsAttr
().
getValue
());
mlir
::
Operation
*
phi_operation
=
builder
.
createOperation
(
operation_state
);
for
(
size_t
index
=
0
;
index
<
phi_kernel_desc
.
outputsType
.
size
();
++
index
)
{
mlir
::
Value
input
=
phi_operation
->
getResult
(
index
);
auto
cvt_tensor_type_op
=
builder
.
create
<
CvtTensorOp
>
(
auto
cvt_tensor_type_op
=
builder
.
create
<
infrt
::
CvtTensorOp
>
(
kernel_op
.
getLoc
(),
kernel_op
.
getResultTypes
()[
index
],
input
);
kernel_op
.
getResult
(
index
).
replaceAllUsesWith
(
cvt_tensor_type_op
.
output
());
...
...
@@ -185,4 +203,10 @@ void phiOpCvtPass::diapatchStage() {
kernel_op
.
erase
();
}
}
}
// namespace infrt
}
// namespace
std
::
unique_ptr
<
mlir
::
Pass
>
infrt
::
createPhiOpCvtPass
(
std
::
vector
<
Place
>
valid_places
)
{
return
std
::
make_unique
<
phiOpCvtPass
>
(
valid_places
);
}
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h
浏览文件 @
481db5e9
...
...
@@ -14,44 +14,14 @@
#pragma once
#include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt/common
_type
.h"
#include "paddle/infrt/dialect/infrt/common
/types
.h"
namespace
infrt
{
/*
* phiOpCvtPass.
*
* Convert the general operators in pd Dialect to a infrt.kernelOp.
*
* source func:
*
* func @main() -> tensor<?xf32> {
* %a = "pd.feed"()...
* %c = "pd.conv2d"(%a) ...
* %d = "pd.conv3d"(%c) ...
* %f = "pd.conv2d"(%a) ...
* "pd.fetch" (%d, %f)
* }
*
* destination func:
* func @main() -> tensor<?xf32> {
* %a = "pd.feed"()...
* %c = "infrt.kernel"(%a){name = "conv2d"} ...
* %d = "infrt.kernel"(%c){name = "conv3d"}...
* %f = "infrt.kernel"(%a){name = "conv2d"}...
* "pd.fetch" (%d, %f)
* }
* Convert the general operators from pd Dialect to phi dialect.
*/
class
phiOpCvtPass
:
public
mlir
::
PassWrapper
<
phiOpCvtPass
,
mlir
::
FunctionPass
>
{
public:
::
llvm
::
StringRef
getName
()
const
override
{
return
"phiOpCvtPass"
;
}
void
runOnFunction
()
override
;
explicit
phiOpCvtPass
(
std
::
vector
<
Place
>
valid_places
=
std
::
vector
<
Place
>
())
:
valid_places_
(
valid_places
)
{}
std
::
unique_ptr
<
mlir
::
Pass
>
createPhiOpCvtPass
(
std
::
vector
<
Place
>
valid_places
=
std
::
vector
<
Place
>
());
private:
void
convertStage
();
void
diapatchStage
();
std
::
vector
<
Place
>
valid_places_
;
};
}
// namespace infrt
paddle/infrt/dialect/phi/phi_ir_exec.cc
浏览文件 @
481db5e9
...
...
@@ -38,7 +38,7 @@ int main(int argc, char** argv) {
std
::
vector
<
infrt
::
Place
>
valid_places
=
{{
infrt
::
TargetType
::
CPU
,
infrt
::
PrecisionType
::
FLOAT32
,
infrt
::
LayoutType
::
NCHW
}};
phi_pass_manager
.
addPass
(
std
::
make_unique
<
infrt
::
phiOpCvtPass
>
(
valid_places
));
phi_pass_manager
.
addPass
(
infrt
::
createPhiOpCvtPass
(
valid_places
));
phi_pass_manager
.
addPass
(
infrt
::
createInfrtOpFusePass
());
if
(
mlir
::
failed
(
pm
.
run
(
*
module
)))
{
std
::
cout
<<
"
\n
pass failed!
\n
"
<<
std
::
endl
;
...
...
paddle/infrt/dialect/print_ir.cc
浏览文件 @
481db5e9
...
...
@@ -31,7 +31,7 @@
#include <iostream>
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/init_
infrt_
dialects.h"
#include "paddle/infrt/dialect/init_dialects.h"
namespace
cl
=
llvm
::
cl
;
...
...
paddle/infrt/dialect/rewrite.td
浏览文件 @
481db5e9
#ifndef INFRT_REWRITE
#define INFRT_REWRITE
include "paddle/infrt/dialect/infrt_base.td"
include "paddle/infrt/dialect/infrt
/ir/infrt
_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/pd_ops.td"
include "paddle/infrt/dialect/pd_extra_ops.td"
...
...
paddle/infrt/dialect/tensor_shape.td
浏览文件 @
481db5e9
...
...
@@ -2,7 +2,7 @@
#else
#define INFRT_OPS
include "paddle/infrt/dialect/infrt_base.td"
include "paddle/infrt/dialect/infrt
/ir/infrt
_base.td"
include "paddle/infrt/dialect/tensor_shape_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
...
...
paddle/infrt/dialect/tensorrt/pd_lower_to_trt.td
浏览文件 @
481db5e9
...
...
@@ -2,7 +2,7 @@
#define PD_LOWER_TO_TRT
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt_base.td"
include "paddle/infrt/dialect/infrt
/ir/infrt
_base.td"
include "paddle/infrt/dialect/pd_ops.td"
include "paddle/infrt/dialect/tensorrt/trt_ops.td"
...
...
paddle/infrt/dialect/tensorrt/trt_graph_fuse_pass.h
浏览文件 @
481db5e9
...
...
@@ -14,7 +14,6 @@
#pragma once
#include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt_base.h"
namespace
infrt
{
namespace
trt
{
...
...
@@ -28,17 +27,17 @@ namespace trt {
* func @main(%a : tensor<?xf32>) -> tensor<?xf32> {
* %c = "pd.graph"(%a) {
* %m = "pd.conv2d"(%a)...
*
"infrt.return" (%m)
*
infrt.return %m...
* } ...
* %d = "pd.graph"(%c) {
* %m = "pd.conv3d"(%c)...
*
"infrt.return" (%m)
*
infrt.return %m...
* } ...
* %f = "pd.graph"(%a) {
* %m = "pd.conv2d"(%a)...
*
"infrt.return" (%m)
*
infrt.return %m...
* } ...
*
"infrt.return" (%d, %f)
..
*
infrt.return %d, %f :.
..
* }
*
* destination func:
...
...
@@ -47,9 +46,9 @@ namespace trt {
* %m = "pd.conv2d"(%a)...
* %n = "pd.conv3d"(%m)...
* %s = "pd.conv2d"(%a)...
*
"infrt.return" (%n, %s)
*
infrt.return %n, %s:...
* } ...
*
"infrt.return" (%d, %f)
*
infrt.return %d, %f:...
* }
*/
class
TRTGraphFusePass
...
...
paddle/infrt/dialect/tensorrt/trt_graph_split_pass.h
浏览文件 @
481db5e9
...
...
@@ -14,7 +14,6 @@
#pragma once
#include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt_base.h"
namespace
infrt
{
namespace
trt
{
...
...
@@ -31,9 +30,9 @@ namespace trt {
* %m = "pd.conv2d"(%a)...
* %n = "pd.conv3d"(%m)...
* %s = "pd.conv2d"(%a)...
*
"infrt.return" (%n, %s)
...
*
infrt.return %n, %s :
...
* } ...
*
"infrt.return" (%d, %f)
...
*
infrt.return %d, %f :
...
* }
*
* destination func:
...
...
@@ -41,7 +40,7 @@ namespace trt {
* %c = "pd.conv2d"(%a) ...
* %d = "pd.conv3d"(%c) ...
* %f = "pd.conv2d"(%a) ...
*
"infrt.return" (%d, %f)
...
*
infrt.return %d, %f:
...
* }
*/
class
TRTGraphSplitPass
...
...
paddle/infrt/dialect/tensorrt/trt_op_converter_pass.cc
浏览文件 @
481db5e9
...
...
@@ -14,7 +14,6 @@
#include "paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h"
#include <mlir/IR/Builders.h>
#include <mlir/Transforms/DialectConversion.h>
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/pd_ops.h"
#include "paddle/infrt/dialect/tensorrt/trt_dialect_types.h"
...
...
@@ -24,7 +23,7 @@ namespace trt {
#include "paddle/infrt/dialect/tensorrt/pd_lower_to_trt.cpp.inc" // NOLINT
struct
PD2TRT_GraphLower
:
public
::
mlir
::
RewritePattern
{
PD2TRT_GraphLower
(
::
mlir
::
MLIRContext
*
context
)
explicit
PD2TRT_GraphLower
(
::
mlir
::
MLIRContext
*
context
)
:
::
mlir
::
RewritePattern
(
"pd.graph"
,
1
,
context
,
{
"trt.create_engine"
})
{}
::
mlir
::
LogicalResult
matchAndRewrite
(
::
mlir
::
Operation
*
op
,
::
mlir
::
PatternRewriter
&
rewriter
)
const
override
{
...
...
paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h
浏览文件 @
481db5e9
...
...
@@ -15,7 +15,7 @@
#pragma once
#include "mlir/IR/Dialect.h"
#include "mlir/Pass/Pass.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h"
#include "paddle/infrt/dialect/tensorrt/trt_ops.h"
namespace
infrt
{
...
...
@@ -29,9 +29,9 @@ namespace trt {
* %m = "pd.conv2d"(%a)...
* %n = "pd.conv3d"(%m)...
* %s = "pd.conv2d"(%a)...
*
"infrt.return" (%n, %s)
...
*
infrt.return %n, %s:
...
* } ...
*
"infrt.return" (%d, %f)
...
*
infrt.return %d, %f:
...
* }
*
* destination ir:
...
...
@@ -40,10 +40,10 @@ namespace trt {
* %m = "trt.Convolution"(%a)...
* %n = "trt.Convolution"(%m)...
* %s = "trt.Convolution"(%a)...
*
"infrt.return" (%n, %s)
...
*
infrt.return %n, %s :
...
* }){run_once = true} ...
* %d, %f = "trt.execute"(%engine, %a)...
*
"infrt.return" (%d, %f)
...
*
infrt.return %d, %f :
...
* }
*/
struct
TRTOpConverterPass
...
...
paddle/infrt/dialect/tensorrt/trt_op_teller_pass.cc
浏览文件 @
481db5e9
...
...
@@ -15,8 +15,8 @@
#include "paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h"
#include <mlir/IR/Builders.h>
#include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/
infrt/ir/
basic_kernels.h"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h"
#include "paddle/infrt/dialect/pd_ops.h"
namespace
infrt
{
...
...
paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h
浏览文件 @
481db5e9
...
...
@@ -14,7 +14,6 @@
#pragma once
#include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt_base.h"
namespace
infrt
{
namespace
trt
{
...
...
@@ -29,24 +28,24 @@ namespace trt {
* %c = "pd.conv2d"(%a) ...
* %d = "pd.conv3d"(%c) ...
* %f = "pd.conv2d"(%a) ...
*
"infrt.return"(%d, %f)
...
*
infrt.return %d, %f:
...
* }
*
* destination func:
* func @main(%a : tensor<?xf32>) -> tensor<?xf32> {
* %c = "pd.graph"(%a) {
* %m = "pd.conv2d"(%a)...
*
"infrt.return" (%m)
*
infrt.return %m:...
* } ...
* %d = "pd.graph"(%c) {
* %m = "pd.conv3d"(%c)...
*
"infrt.return" (%m)
*
infrt.return %m:...
* } ...
* %f = "pd.graph"(%a) {
* %m = "pd.conv2d"(%a)...
*
"infrt.return" (%m)
*
infrt.return %m:...
* } ...
*
"infrt.return" (%d, %f)
*
infrt.return %d, %f:...
* }
* TODO(winter-wang): Supplementary how to judge the operators can be supported
* by tensorrt.
...
...
paddle/infrt/dialect/tensorrt/trt_ops.h
浏览文件 @
481db5e9
...
...
@@ -28,8 +28,8 @@
#include <mlir/Interfaces/InferTypeOpInterface.h>
#include <mlir/Interfaces/LoopLikeInterface.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/
infrt/ir/
basic_kernels.h"
#include "paddle/infrt/dialect/infrt/i
r/i
nfrt_dialect.h"
#include "paddle/infrt/dialect/pd_ops.h"
namespace
infrt
{
...
...
paddle/infrt/external_kernels/basic.mlir
浏览文件 @
481db5e9
// CHECK: basic
func @basic() -> f32 {
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%v2 = "external.add.f32"(%v0, %v1) : (f32, f32) -> f32
// CHECK: 1
...
...
@@ -17,5 +17,5 @@ func @basic() -> f32 {
// CHECK: 6
"external.print.f32"(%v3) : (f32) -> ()
I
nfrt.return %v3 : f32
i
nfrt.return %v3 : f32
}
paddle/infrt/external_kernels/fc.mlir
浏览文件 @
481db5e9
// CHECK-LABEL: @fc
func @fc(%input : !
Infrt.tensor<X86, NCHW, F32
>,
%w : !
Infrt.tensor<X86, NCHW, F32
>,
%bias : !
Infrt.tensor<X86, NCHW, F32>) -> !Infrt.tensor<X86, NCHW, F32
>
func @fc(%input : !
infrt.dense_tensor<CPU, FP32, NCHW
>,
%w : !
infrt.dense_tensor<CPU, FP32, NCHW
>,
%bias : !
infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW
>
{
%out = dt.create_uninit_tensor.f32 [30, 50] -> !
Infrt.tensor<X86, NCHW, F32
>
// dt.fill_tensor_with_constant.f32 (%out : !
Infrt.tensor<X86, NCHW, F32
>) {value=0.0:f32}
%out = dt.create_uninit_tensor.f32 [30, 50] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
// dt.fill_tensor_with_constant.f32 (%out : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=0.0:f32}
// fc1
"external.matmul"(%input, %w, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
"external.sigmoid"(%out, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
"external.matmul"(%input, %w, %out) {}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
"external.sigmoid"(%out, %out) {}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
// fc2
"external.matmul"(%out, %w, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
"external.sigmoid"(%out, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
"external.matmul"(%out, %w, %out) {}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
"external.sigmoid"(%out, %out) {}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
Infrt.return %out : !Infrt.tensor<X86, NCHW, F32
>
infrt.return %out : !infrt.dense_tensor<CPU, FP32, NCHW
>
}
// CHECK-LABEL: @benchmark
func @benchmark() {
%input = dt.create_uninit_tensor.f32 [30, 50] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%input : !
Infrt.tensor<X86, NCHW, F32
>) {value=1.0:f32}
%input = dt.create_uninit_tensor.f32 [30, 50] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%input : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=1.0:f32}
%w = dt.create_uninit_tensor.f32 [50, 50] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%w : !
Infrt.tensor<X86, NCHW, F32
>) {value=2.0:f32}
%w = dt.create_uninit_tensor.f32 [50, 50] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%w : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=2.0:f32}
%bias = dt.create_uninit_tensor.f32 [30, 50] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%bias : !
Infrt.tensor<X86, NCHW, F32
>) {value=3.0:f32}
%bias = dt.create_uninit_tensor.f32 [30, 50] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%bias : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=3.0:f32}
I
nfrt.benchmark "add.f32"(
%input:!
Infrt.tensor<X86, NCHW, F32
>,
%w:!
Infrt.tensor<X86, NCHW, F32
>,
%bias:!
Infrt.tensor<X86, NCHW, F32
>)
i
nfrt.benchmark "add.f32"(
%input:!
infrt.dense_tensor<CPU, FP32, NCHW
>,
%w:!
infrt.dense_tensor<CPU, FP32, NCHW
>,
%bias:!
infrt.dense_tensor<CPU, FP32, NCHW
>)
duration_secs = 100, max_count = 300000, num_warmup_runs = 3
{
%res =
Infrt.call @fc(%input, %w, %bias) : (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> (!Infrt.tensor<X86, NCHW, F32
>)
Infrt.return %res : !Infrt.tensor<X86, NCHW, F32
>
%res =
infrt.call @fc(%input, %w, %bias) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW
>)
infrt.return %res : !infrt.dense_tensor<CPU, FP32, NCHW
>
}
I
nfrt.return
i
nfrt.return
}
paddle/infrt/external_kernels/paddle.mlir
浏览文件 @
481db5e9
// CHECK: paddle_func
func @paddle_func() -> () {
%input = dt.create_uninit_tensor.f32 [3, 5] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%input : !
Infrt.tensor<X86, NCHW, F32
>) {value=1.0:f32}
%input = dt.create_uninit_tensor.f32 [3, 5] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%input : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=1.0:f32}
%w = dt.create_uninit_tensor.f32 [5, 4] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%w : !
Infrt.tensor<X86, NCHW, F32
>) {value=2.0:f32}
%w = dt.create_uninit_tensor.f32 [5, 4] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%w : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=2.0:f32}
%bias = dt.create_uninit_tensor.f32 [4] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%bias : !
Infrt.tensor<X86, NCHW, F32
>) {value=3.0:f32}
%bias = dt.create_uninit_tensor.f32 [4] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%bias : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=3.0:f32}
%out = dt.create_uninit_tensor.f32 [3, 4] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%out : !
Infrt.tensor<X86, NCHW, F32
>) {value=0.0:f32}
%out = dt.create_uninit_tensor.f32 [3, 4] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%out : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=0.0:f32}
"external.fc2"(%input, %w, %bias, %out) {in_num_col_dims=3:i32, test_attr=5:i32}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
"external.fc2"(%input, %w, %bias, %out) {in_num_col_dims=3:i32, test_attr=5:i32}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
// CHECK-LABEL: tensor: shape=shape[3,5], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%input : !
Infrt.tensor<X86, NCHW, F32
>)
dt.print_tensor (%input : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
// CHECK-LABEL: tensor: shape=shape[5,4], values=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
dt.print_tensor (%w : !
Infrt.tensor<X86, NCHW, F32
>)
dt.print_tensor (%bias : !
Infrt.tensor<X86, NCHW, F32
>)
dt.print_tensor (%out : !
Infrt.tensor<X86, NCHW, F32
>)
dt.print_tensor (%w : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
dt.print_tensor (%bias : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
dt.print_tensor (%out : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
// test external.matmul
%out1 = dt.create_uninit_tensor.f32 [3, 4] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%out1 : !
Infrt.tensor<X86, NCHW, F32
>) {value=0.0:f32}
"external.matmul"(%input, %w, %out1) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
dt.print_tensor (%out1 : !
Infrt.tensor<X86, NCHW, F32
>)
%out1 = dt.create_uninit_tensor.f32 [3, 4] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%out1 : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=0.0:f32}
"external.matmul"(%input, %w, %out1) {}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
dt.print_tensor (%out1 : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
// test external.elementwise_add
%out2 = dt.create_uninit_tensor.f32 [3, 4] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%out2 : !
Infrt.tensor<X86, NCHW, F32
>) {value=0.0:f32}
%bias1 = dt.create_uninit_tensor.f32 [3, 4] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%bias1 : !
Infrt.tensor<X86, NCHW, F32
>) {value=3.0:f32}
"external.elementwise_add"(%out1, %bias1, %out2) {axis=-1}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
dt.print_tensor (%out2 : !
Infrt.tensor<X86, NCHW, F32
>)
%out2 = dt.create_uninit_tensor.f32 [3, 4] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%out2 : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=0.0:f32}
%bias1 = dt.create_uninit_tensor.f32 [3, 4] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%bias1 : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=3.0:f32}
"external.elementwise_add"(%out1, %bias1, %out2) {axis=-1}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
dt.print_tensor (%out2 : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
// test external.relu
%out3 = dt.create_uninit_tensor.f32 [3, 4] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%out3 : !
Infrt.tensor<X86, NCHW, F32
>) {value=0.0:f32}
"external.relu"(%out1, %out3) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
dt.print_tensor (%out3 : !
Infrt.tensor<X86, NCHW, F32
>)
%out3 = dt.create_uninit_tensor.f32 [3, 4] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%out3 : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=0.0:f32}
"external.relu"(%out1, %out3) {}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
dt.print_tensor (%out3 : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
// test external.sigmoid
%out4 = dt.create_uninit_tensor.f32 [3, 4] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%out4 : !
Infrt.tensor<X86, NCHW, F32
>) {value=0.0:f32}
"external.sigmoid"(%out1, %out4) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
dt.print_tensor (%out4 : !
Infrt.tensor<X86, NCHW, F32
>)
%out4 = dt.create_uninit_tensor.f32 [3, 4] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%out4 : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=0.0:f32}
"external.sigmoid"(%out1, %out4) {}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
dt.print_tensor (%out4 : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
I
nfrt.return
i
nfrt.return
}
paddle/infrt/host_context/mlir_exec.cc
浏览文件 @
481db5e9
...
...
@@ -92,7 +92,7 @@ int main(int argc, char** argv) {
std
::
vector
<
infrt
::
Place
>
valid_places
=
{{
infrt
::
TargetType
::
CPU
,
infrt
::
PrecisionType
::
FLOAT32
,
infrt
::
LayoutType
::
NCHW
}};
phi_pass_manager
.
addPass
(
std
::
make_unique
<
infrt
::
phiOpCvtPass
>
(
valid_places
));
phi_pass_manager
.
addPass
(
infrt
::
createPhiOpCvtPass
(
valid_places
));
phi_pass_manager
.
addPass
(
infrt
::
createInfrtOpFusePass
());
#endif
...
...
paddle/infrt/host_context/mlir_tests/basic.mlir
浏览文件 @
481db5e9
// CHECK-LABEL: basic
func @basic() -> f32 {
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%v2 = "
I
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%v2 = "
i
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
// CHECK: 1
"
I
nfrt.print.f32"(%v0) : (f32) -> ()
"
i
nfrt.print.f32"(%v0) : (f32) -> ()
// CHECK: 2
"
I
nfrt.print.f32"(%v1) : (f32) -> ()
"
i
nfrt.print.f32"(%v1) : (f32) -> ()
// CHECK: 3
"
I
nfrt.print.f32"(%v2) : (f32) -> ()
"
i
nfrt.print.f32"(%v2) : (f32) -> ()
%v3 = "
I
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
%v3 = "
i
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
// CHECK: 6
"
I
nfrt.print.f32"(%v3) : (f32) -> ()
"
i
nfrt.print.f32"(%v3) : (f32) -> ()
I
nfrt.return %v3 : f32
i
nfrt.return %v3 : f32
}
// CHECK-LABEL: basic1
// Check the mlir executor can work with more than one function in a file.
func @basic1() -> () {
%v0 =
I
nfrt.constant.f32 1.0
"
I
nfrt.print.f32"(%v0) : (f32) -> ()
%v0 =
i
nfrt.constant.f32 1.0
"
i
nfrt.print.f32"(%v0) : (f32) -> ()
// CHECK: 1
I
nfrt.return
i
nfrt.return
}
\ No newline at end of file
paddle/infrt/host_context/mlir_tests/dense_tensor.mlir
浏览文件 @
481db5e9
// CHECK-LABEL: build_tensor1
func @build_tensor1() {
%a = dt.create_uninit_tensor.f32 [3, 4] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%a : !
Infrt.tensor<X86, NCHW, F32
>) {value=1.0:f32}
%a = dt.create_uninit_tensor.f32 [3, 4] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%a : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=1.0:f32}
// CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%a : !
Infrt.tensor<X86, NCHW, F32
>)
dt.print_tensor (%a : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
I
nfrt.return
i
nfrt.return
}
paddle/infrt/host_context/mlir_tests/shape.mlir
浏览文件 @
481db5e9
...
...
@@ -3,5 +3,5 @@ func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92]
ts.print_shape %a
Infrt.return
}
\ No newline at end of file
infrt.return
}
paddle/infrt/host_context/mlir_to_runtime_translate.cc
浏览文件 @
481db5e9
...
...
@@ -75,7 +75,7 @@ struct MlirToRuntimeTranslator::Impl {
};
bool
MlirToRuntimeTranslator
::
EmitConstantOp
(
mlir
::
Operation
*
op
)
{
if
(
!
infrt
::
Startswith
(
op
->
getName
().
getStringRef
().
str
(),
"
I
nfrt.constant"
))
if
(
!
infrt
::
Startswith
(
op
->
getName
().
getStringRef
().
str
(),
"
i
nfrt.constant"
))
return
false
;
VLOG
(
3
)
<<
"Emitting constant op ["
<<
op
->
getName
().
getStringRef
().
str
()
<<
"]"
;
...
...
@@ -267,7 +267,7 @@ boost::optional<std::vector<double>> MlirToRuntimeTranslator::EmitAttribute(
}
static
bool
IsReturn
(
mlir
::
Operation
*
op
)
{
return
op
->
getName
().
getStringRef
()
==
"
I
nfrt.return"
;
return
op
->
getName
().
getStringRef
()
==
"
i
nfrt.return"
;
}
bool
MlirToRuntimeTranslator
::
EmitGeneralOp
(
mlir
::
Operation
*
op
)
{
...
...
@@ -405,7 +405,7 @@ bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) {
bool
MlirToRuntimeTranslator
::
EmitReturnOp
(
mlir
::
Operation
*
op
,
llvm
::
SmallVectorImpl
<
mlir
::
Value
>*
results
)
{
CHECK
(
results
);
if
(
op
->
getName
().
getStringRef
()
==
"
I
nfrt.return"
)
{
if
(
op
->
getName
().
getStringRef
()
==
"
i
nfrt.return"
)
{
for
(
size_t
i
=
0
;
i
<
op
->
getNumOperands
();
i
++
)
{
results
->
push_back
(
op
->
getOperand
(
i
));
}
...
...
@@ -478,7 +478,7 @@ bool MlirToRuntimeTranslator::EmitCallOp(mlir::Operation* op,
function_defs_t
*
function_table
)
{
CHECK
(
op
);
CHECK
(
function_table
);
if
(
op
->
getName
().
getStringRef
()
!=
"
I
nfrt.call"
)
return
false
;
if
(
op
->
getName
().
getStringRef
()
!=
"
i
nfrt.call"
)
return
false
;
impl_
->
cur_op
=
impl_
->
runtime
->
NewOpExecutable
(
op
->
getName
().
getStringRef
().
str
());
...
...
paddle/infrt/host_context/mlir_to_runtime_translate.h
浏览文件 @
481db5e9
...
...
@@ -57,7 +57,7 @@ class MlirToRuntimeTranslator {
protected:
//! Emit a "infrt.constant.*" operation, return true if succeed.
bool
EmitConstantOp
(
mlir
::
Operation
*
op
);
//! Emit a "
I
nfrt.return" operation.
//! Emit a "
i
nfrt.return" operation.
bool
EmitReturnOp
(
mlir
::
Operation
*
op
,
llvm
::
SmallVectorImpl
<
mlir
::
Value
>*
results
);
//! Emit a "ts.build_shape" operation.
...
...
paddle/infrt/host_context/mlir_to_runtime_translate_test.cc
浏览文件 @
481db5e9
...
...
@@ -37,14 +37,14 @@ TEST(MlirToRuntimeTranslate, basic) {
auto
source
=
R"ROC(
func @main() -> () {
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%v2 = "
I
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v3 = "
I
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%v2 = "
i
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v3 = "
i
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
"
I
nfrt.print.f32"(%v1) : (f32) -> ()
"
i
nfrt.print.f32"(%v1) : (f32) -> ()
I
nfrt.return
i
nfrt.return
}
)ROC"
;
...
...
@@ -63,14 +63,14 @@ TEST(TestMlir, basic) {
auto
source
=
R"ROC(
func @main() -> () {
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%v2 = "
I
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v3 = "
I
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%v2 = "
i
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v3 = "
i
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
"
I
nfrt.print.f32"(%v1) : (f32) -> ()
"
i
nfrt.print.f32"(%v1) : (f32) -> ()
I
nfrt.return
i
nfrt.return
}
)ROC"
;
...
...
@@ -101,7 +101,7 @@ func @predict(%a: !infrt.dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<
"!infrt.dense_tensor<CPU, FP32, NCHW>"
;
auto
end
=
R"ROC(
I
nfrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>
i
nfrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>
}
)ROC"
;
...
...
paddle/infrt/host_context/paddle_mlir.cc
浏览文件 @
481db5e9
...
...
@@ -19,7 +19,6 @@ MLIRModelGenImpl::MLIRModelGenImpl()
:
context_
(
infrt
::
Global
::
getMLIRContext
()),
builder_
(
context_
)
{
context_
->
allowUnregisteredDialects
();
context_
->
getOrLoadDialect
<
mlir
::
StandardOpsDialect
>
();
context_
->
getOrLoadDialect
<
infrt
::
dialect
::
INFRTDialect
>
();
context_
->
getOrLoadDialect
<
infrt
::
ts
::
TensorShapeDialect
>
();
context_
->
getOrLoadDialect
<
infrt
::
dt
::
DTDialect
>
();
context_
->
getOrLoadDialect
<
mlir
::
pd
::
PaddleDialect
>
();
...
...
paddle/infrt/host_context/paddle_mlir.h
浏览文件 @
481db5e9
...
...
@@ -25,10 +25,10 @@
#include "mlir/IR/MLIRContext.h"
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/common/string.h"
#include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/init_infrt_dialects.h"
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/init_dialects.h"
#include "paddle/infrt/dialect/pd_ops.h"
#include "paddle/infrt/dialect/tensor_shape.h"
#include "paddle/infrt/paddle/model_parser.h"
...
...
paddle/infrt/host_context/value.h
浏览文件 @
481db5e9
...
...
@@ -22,7 +22,7 @@
#include "paddle/infrt/common/object.h"
#include "paddle/infrt/common/shared.h"
#include "paddle/infrt/dialect/infrt/common
_type
.h"
#include "paddle/infrt/dialect/infrt/common
/types
.h"
#include "paddle/infrt/host_context/function.h"
#include "paddle/infrt/support/variant.h"
#include "paddle/infrt/tensor/dense_host_tensor.h"
...
...
paddle/infrt/kernel/basic_kernels.cc
浏览文件 @
481db5e9
...
...
@@ -63,24 +63,24 @@ static void PrintString(const std::string &str) {
void
RegisterBasicKernels
(
host_context
::
KernelRegistry
*
registry
)
{
RegisterIntBasicKernels
(
registry
);
RegisterFloatBasicKernels
(
registry
);
registry
->
AddKernel
(
"
I
nfrt.get_string"
,
INFRT_KERNEL
(
GetString
));
registry
->
AddKernel
(
"
I
nfrt.print_string"
,
INFRT_KERNEL
(
PrintString
));
registry
->
AddKernel
(
"
i
nfrt.get_string"
,
INFRT_KERNEL
(
GetString
));
registry
->
AddKernel
(
"
i
nfrt.print_string"
,
INFRT_KERNEL
(
PrintString
));
}
void
RegisterIntBasicKernels
(
host_context
::
KernelRegistry
*
registry
)
{
registry
->
AddKernel
(
"
I
nfrt.add.i32"
,
INFRT_KERNEL
(
add
<
int32_t
>
));
registry
->
AddKernel
(
"
I
nfrt.sub.i32"
,
INFRT_KERNEL
(
sub
<
int32_t
>
));
registry
->
AddKernel
(
"
I
nfrt.mul.i32"
,
INFRT_KERNEL
(
mul
<
int32_t
>
));
registry
->
AddKernel
(
"
I
nfrt.div.i32"
,
INFRT_KERNEL
(
div
<
int32_t
>
));
registry
->
AddKernel
(
"
I
nfrt.print.i32"
,
INFRT_KERNEL
(
print
<
int32_t
>
));
registry
->
AddKernel
(
"
i
nfrt.add.i32"
,
INFRT_KERNEL
(
add
<
int32_t
>
));
registry
->
AddKernel
(
"
i
nfrt.sub.i32"
,
INFRT_KERNEL
(
sub
<
int32_t
>
));
registry
->
AddKernel
(
"
i
nfrt.mul.i32"
,
INFRT_KERNEL
(
mul
<
int32_t
>
));
registry
->
AddKernel
(
"
i
nfrt.div.i32"
,
INFRT_KERNEL
(
div
<
int32_t
>
));
registry
->
AddKernel
(
"
i
nfrt.print.i32"
,
INFRT_KERNEL
(
print
<
int32_t
>
));
}
void
RegisterFloatBasicKernels
(
host_context
::
KernelRegistry
*
registry
)
{
registry
->
AddKernel
(
"
I
nfrt.add.f32"
,
INFRT_KERNEL
(
add
<
float
>
));
registry
->
AddKernel
(
"
I
nfrt.sub.f32"
,
INFRT_KERNEL
(
sub
<
float
>
));
registry
->
AddKernel
(
"
I
nfrt.mul.f32"
,
INFRT_KERNEL
(
mul
<
float
>
));
registry
->
AddKernel
(
"
I
nfrt.div.f32"
,
INFRT_KERNEL
(
div
<
float
>
));
registry
->
AddKernel
(
"
I
nfrt.print.f32"
,
INFRT_KERNEL
(
print
<
float
>
));
registry
->
AddKernel
(
"
i
nfrt.add.f32"
,
INFRT_KERNEL
(
add
<
float
>
));
registry
->
AddKernel
(
"
i
nfrt.sub.f32"
,
INFRT_KERNEL
(
sub
<
float
>
));
registry
->
AddKernel
(
"
i
nfrt.mul.f32"
,
INFRT_KERNEL
(
mul
<
float
>
));
registry
->
AddKernel
(
"
i
nfrt.div.f32"
,
INFRT_KERNEL
(
div
<
float
>
));
registry
->
AddKernel
(
"
i
nfrt.print.f32"
,
INFRT_KERNEL
(
print
<
float
>
));
}
}
// namespace kernel
...
...
paddle/infrt/kernel/control_flow_kernels.cc
浏览文件 @
481db5e9
...
...
@@ -37,7 +37,7 @@ static void INFRTCall(
}
void
RegisterControlFlowKernels
(
host_context
::
KernelRegistry
*
registry
)
{
registry
->
AddKernel
(
"
I
nfrt.call"
,
INFRT_KERNEL
(
INFRTCall
));
registry
->
AddKernel
(
"
i
nfrt.call"
,
INFRT_KERNEL
(
INFRTCall
));
}
}
// namespace kernel
...
...
paddle/infrt/kernel/phi/dense_tensor_kernels.h
浏览文件 @
481db5e9
...
...
@@ -15,7 +15,7 @@
#pragma once
#include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/infrt/dialect/infrt/common
_type
.h"
#include "paddle/infrt/dialect/infrt/common
/types
.h"
#include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/phi/core/dense_tensor.h"
...
...
paddle/infrt/kernel/test_kernels.cc
浏览文件 @
481db5e9
...
...
@@ -193,7 +193,7 @@ tensor::DenseHostTensor ShadowCopyTensor(tensor::DenseHostTensor src) {
}
void
RegisterTestKernels
(
host_context
::
KernelRegistry
*
registry
)
{
registry
->
AddKernel
(
"
I
nfrt.benchmark"
,
INFRT_KERNEL
(
benchmark
));
registry
->
AddKernel
(
"
i
nfrt.benchmark"
,
INFRT_KERNEL
(
benchmark
));
registry
->
AddKernel
(
"Infrt.test.shadow_copy_tensor"
,
INFRT_KERNEL
(
ShadowCopyTensor
));
}
...
...
paddle/infrt/tests/dialect/basic.mlir
浏览文件 @
481db5e9
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: @basic_f32
func @basic_f32() -> f32 {
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%value = "
I
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%value = "
i
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
// CHECK-NEXT: 3
"
I
nfrt.print.f32"(%value) : (f32) -> ()
"
i
nfrt.print.f32"(%value) : (f32) -> ()
I
nfrt.return %value : f32
i
nfrt.return %value : f32
}
/// ================================================================
/// @caller call the other function @callee
func @callee.add.f32(%x : f32, %y : f32, %y1 : f32) -> f32 {
%z = "
I
nfrt.add.f32"(%x, %y) : (f32, f32) -> f32
%z1 = "
I
nfrt.add.f32"(%z, %y1) : (f32, f32) -> f32
I
nfrt.return %z1 : f32
%z = "
i
nfrt.add.f32"(%x, %y) : (f32, f32) -> f32
%z1 = "
i
nfrt.add.f32"(%z, %y1) : (f32, f32) -> f32
i
nfrt.return %z1 : f32
}
// CHECK-LABEL: @caller.add.f32
func @caller.add.f32() -> f32 {
%x =
I
nfrt.constant.f32 1.0
%y =
I
nfrt.constant.f32 2.0
%y1 =
I
nfrt.constant.f32 3.0
%z =
I
nfrt.call @callee.add.f32(%x, %y, %y1) : (f32, f32, f32) -> f32
%x =
i
nfrt.constant.f32 1.0
%y =
i
nfrt.constant.f32 2.0
%y1 =
i
nfrt.constant.f32 3.0
%z =
i
nfrt.call @callee.add.f32(%x, %y, %y1) : (f32, f32, f32) -> f32
// CHECK-NEXT: 6
"
I
nfrt.print.f32"(%z) : (f32) -> ()
I
nfrt.return %z : f32
"
i
nfrt.print.f32"(%z) : (f32) -> ()
i
nfrt.return %z : f32
}
/// <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
paddle/infrt/tests/dialect/benchmark.mlir
浏览文件 @
481db5e9
...
...
@@ -12,13 +12,13 @@ func @benchmark() {
// CHECK-LABEL: BM:add.f32:CPU 95%(ns)
// CHECK-LABEL: BM:add.f32:CPU 99%(ns)
// CHECK-LABEL: BM:add.f32:CPU utilization(percent)
I
nfrt.benchmark "add.f32"() duration_secs = 1, max_count = 3, num_warmup_runs = 3
i
nfrt.benchmark "add.f32"() duration_secs = 1, max_count = 3, num_warmup_runs = 3
{
%0 =
I
nfrt.constant.f32 1.0
%1 =
I
nfrt.constant.f32 2.0
%res = "
I
nfrt.add.f32"(%0, %1) : (f32, f32) -> f32
"
I
nfrt.print.f32"(%res) : (f32) -> ()
I
nfrt.return %res : f32
%0 =
i
nfrt.constant.f32 1.0
%1 =
i
nfrt.constant.f32 2.0
%res = "
i
nfrt.add.f32"(%0, %1) : (f32, f32) -> f32
"
i
nfrt.print.f32"(%res) : (f32) -> ()
i
nfrt.return %res : f32
}
I
nfrt.return
i
nfrt.return
}
paddle/infrt/tests/dialect/dense_tensor.mlir
浏览文件 @
481db5e9
...
...
@@ -4,14 +4,14 @@ func @dense_shape0() {
%shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW>
I
nfrt.return
i
nfrt.return
}
func @predict(%a: !infrt.dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) {
%a0 = dt.shallow_copy_tensor %a : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW>
%b0 = dt.shallow_copy_tensor %b : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW>
I
nfrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>
i
nfrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>
}
...
...
@@ -19,6 +19,6 @@ func @main() {
%shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW>
%b, %c =
I
nfrt.call @predict(%a, %a) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>)
I
nfrt.return
%b, %c =
i
nfrt.call @predict(%a, %a) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>)
i
nfrt.return
}
paddle/infrt/tests/dialect/disabled_tensor_map.mlir
浏览文件 @
481db5e9
// CHECK-LABEL: @predict
func @predict(%input:!
Infrt.tensor<X86, NCHW, F32>, %map: !Infrt.tensor_map) -> (!Infrt.tensor<X86, NCHW, F32
>) {
%w = dt.get_param(%map, "create_parameter_0.w_0") -> !
Infrt.tensor<X86, NCHW, F32
>
%bias = dt.get_param(%map, "create_parameter_1.w_0") -> !
Infrt.tensor<X86, NCHW, F32
>
func @predict(%input:!
infrt.dense_tensor<CPU, FP32, NCHW>, %map: !infrt.dense_tensor_map) -> (!infrt.dense_tensor<CPU, FP32, NCHW
>) {
%w = dt.get_param(%map, "create_parameter_0.w_0") -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
%bias = dt.get_param(%map, "create_parameter_1.w_0") -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
%out = dt.create_uninit_tensor.f32 [3, 3] -> !
Infrt.tensor<X86, NCHW, F32
>
%out = dt.create_uninit_tensor.f32 [3, 3] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
// fc
"external.matmul"(%input, %w, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
"external.sigmoid"(%out, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32
>) -> ()
//dt.print_tensor (%out : !
Infrt.tensor<X86, NCHW, F32
>)
"external.matmul"(%input, %w, %out) {}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
"external.sigmoid"(%out, %out) {}: (!
infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) -> ()
//dt.print_tensor (%out : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
Infrt.return %out : !Infrt.tensor<X86, NCHW, F32
>
infrt.return %out : !infrt.dense_tensor<CPU, FP32, NCHW
>
}
// CHECK-LABEL: @main
func @main() {
%input = dt.create_uninit_tensor.f32 [3, 3] -> !
Infrt.tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%input : !
Infrt.tensor<X86, NCHW, F32
>) {value=1.0:f32}
%input = dt.create_uninit_tensor.f32 [3, 3] -> !
infrt.dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%input : !
infrt.dense_tensor<CPU, FP32, NCHW
>) {value=1.0:f32}
// CHECK-LABEL: loading params
%map = dt.load_params() {path="/Infrt/build/paddle/paddle_1.8_fc_model"}
%out =
Infrt.call @predict(%input, %map): (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor_map) -> (!Infrt.tensor<X86, NCHW, F32
>)
dt.print_tensor (%out : !
Infrt.tensor<X86, NCHW, F32
>)
%out =
infrt.call @predict(%input, %map): (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor_map) -> (!infrt.dense_tensor<CPU, FP32, NCHW
>)
dt.print_tensor (%out : !
infrt.dense_tensor<CPU, FP32, NCHW
>)
I
nfrt.return
i
nfrt.return
}
paddle/infrt/tests/dialect/paddle_ops.mlir
浏览文件 @
481db5e9
...
...
@@ -5,5 +5,5 @@ func @ops() {
%b = pd.feed() {name="input1"}: tensor<?xf32>
%d = pd.feed() {name="input3"}: !infrt.lod_tensor<3x4x9xf32, 0>
%c = "pd.matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
I
nfrt.return
i
nfrt.return
}
paddle/infrt/tests/dialect/phi/dense_tensor.mlir
浏览文件 @
481db5e9
...
...
@@ -11,6 +11,6 @@ func @sign_any_float32_execute() {
// CHECK: dense_tensor: shape=shape[1], values=[1]
"phi_dt.print_tensor" (%e) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
I
nfrt.return
i
nfrt.return
}
paddle/infrt/tests/dialect/phi/phi_test.mlir
浏览文件 @
481db5e9
...
...
@@ -2,14 +2,14 @@
module {
func @predict(%arg0: !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW> {
%2 = "pd.abs"(%arg0) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
I
nfrt.return %2 : !infrt.dense_tensor<CPU, FP32, NCHW>
i
nfrt.return %2 : !infrt.dense_tensor<CPU, FP32, NCHW>
}
func @main() {
%ctx = "phi_dt.create_context.cpu" (): () -> !phi.context<CPU>
%t = "phi_dt.create_dense_tensor" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[1:i64]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%2 =
I
nfrt.call@predict(%t) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
%2 =
i
nfrt.call@predict(%t) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
phi_dt.print_tensor(%2 : !infrt.dense_tensor<CPU, FP32, NCHW>)
I
nfrt.return
i
nfrt.return
}
}
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
浏览文件 @
481db5e9
...
...
@@ -3,14 +3,14 @@
func @dense_shape0() {
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW>
I
nfrt.return
i
nfrt.return
}
func @predict(%a: !infrt.dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) {
%a0 = dt.shallow_copy_tensor %a : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW>
%b0 = dt.shallow_copy_tensor %b : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW>
I
nfrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>
i
nfrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>
}
...
...
@@ -18,6 +18,6 @@ func @main() {
%shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW>
%b, %c =
I
nfrt.call @predict(%a, %a) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>)
I
nfrt.return
%b, %c =
i
nfrt.call @predict(%a, %a) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>)
i
nfrt.return
}
paddle/infrt/tests/dialect/tensor/naive_kernels.mlir
浏览文件 @
481db5e9
...
...
@@ -13,7 +13,7 @@ func @naive_elementwise_add() {
// CHECK: tensor: shape=shape[2,8], values=[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
dt.print_tensor (%c : !infrt.dense_tensor<CPU, FP32, NCHW>)
I
nfrt.return
i
nfrt.return
}
// RUN: infrtexec -i %s | FileCheck %s
...
...
@@ -31,5 +31,5 @@ func @naive_matmul() {
// CHECK: tensor: shape=shape[2,4], values=[16, 16, 16, 16, 16, 16, 16, 16]
dt.print_tensor (%c : !infrt.dense_tensor<CPU, FP32, NCHW>)
I
nfrt.return
i
nfrt.return
}
paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in
浏览文件 @
481db5e9
...
...
@@ -3,12 +3,12 @@
func @load_tensor_map() {
%map = dt.load_params(){path="@CMAKE_BINARY_DIR@/multi_fc_model"}
%size = dt.tensor_map_get_size(%map) -> i32
I
nfrt.print.i32 %size
i
nfrt.print.i32 %size
%a = dt.tensor_map_get_tensor(%map) {name="fc_bias"} -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: tensor: shape=shape[2], values=[0, 0]
dt.print_tensor (%a : !infrt.dense_tensor<CPU, FP32, NCHW>)
I
nfrt.return
i
nfrt.return
}
paddle/infrt/tests/dialect/tensor/tensor_shape.mlir
浏览文件 @
481db5e9
...
...
@@ -4,5 +4,5 @@ func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92]
ts.print_shape %a
I
nfrt.return
i
nfrt.return
}
paddle/infrt/tests/dialect/tensor/tensor_type.mlir
浏览文件 @
481db5e9
...
...
@@ -6,5 +6,5 @@ func @test_tensor_type() {
// CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%a : !infrt.dense_tensor<CPU, FP32, NCHW>)
I
nfrt.return
i
nfrt.return
}
paddle/infrt/tests/dialect/tensor_shape.mlir
浏览文件 @
481db5e9
...
...
@@ -4,5 +4,5 @@ func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92]
ts.print_shape %a
I
nfrt.return
i
nfrt.return
}
paddle/infrt/tests/dialect/tensor_type.mlir
浏览文件 @
481db5e9
...
...
@@ -6,5 +6,5 @@ func @test_tensor_type() {
// CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%a : !infrt.dense_tensor<CPU, FP32, NCHW>)
I
nfrt.return
i
nfrt.return
}
paddle/infrt/tests/dialect/trt_ops.mlir
浏览文件 @
481db5e9
...
...
@@ -12,5 +12,5 @@ func @main(%bias:tensor<?xf32>, %c:tensor<?xf32>, %b1:tensor<?xf32>, %b2:tensor<
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=-1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e2 = "pd.relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32>
"infrt.return"(%e2) : (tensor<?xf32>)->()
infrt.return %e2 : tensor<?xf32>
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录