Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
a6abb6e7
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a6abb6e7
编写于
2月 22, 2022
作者:
王
王明冬
提交者:
GitHub
2月 22, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add pten convert pass.test=develop (#39664)
上级
a710738e
变更
93
隐藏空白更改
内联
并排
Showing
93 changed file
with
1272 addition
and
699 deletion
+1272
-699
paddle/infrt/CMakeLists.txt
paddle/infrt/CMakeLists.txt
+9
-9
paddle/infrt/api/infrt_api.cc
paddle/infrt/api/infrt_api.cc
+1
-2
paddle/infrt/backends/host/phi_allocator.h
paddle/infrt/backends/host/phi_allocator.h
+1
-1
paddle/infrt/backends/host/phi_context.h
paddle/infrt/backends/host/phi_context.h
+1
-1
paddle/infrt/dialect/CMakeLists.txt
paddle/infrt/dialect/CMakeLists.txt
+3
-3
paddle/infrt/dialect/basic_kernels.cc
paddle/infrt/dialect/basic_kernels.cc
+2
-3
paddle/infrt/dialect/basic_kernels.td
paddle/infrt/dialect/basic_kernels.td
+4
-4
paddle/infrt/dialect/dense_tensor.cc
paddle/infrt/dialect/dense_tensor.cc
+1
-89
paddle/infrt/dialect/dense_tensor.h
paddle/infrt/dialect/dense_tensor.h
+2
-60
paddle/infrt/dialect/dense_tensor.td
paddle/infrt/dialect/dense_tensor.td
+12
-12
paddle/infrt/dialect/infrt/CMakeLists.txt
paddle/infrt/dialect/infrt/CMakeLists.txt
+9
-1
paddle/infrt/dialect/infrt/common_type.cc
paddle/infrt/dialect/infrt/common_type.cc
+88
-0
paddle/infrt/dialect/infrt/common_type.h
paddle/infrt/dialect/infrt/common_type.h
+47
-0
paddle/infrt/dialect/infrt/infrt_dialect.cc
paddle/infrt/dialect/infrt/infrt_dialect.cc
+90
-13
paddle/infrt/dialect/infrt/infrt_dialect.h
paddle/infrt/dialect/infrt/infrt_dialect.h
+6
-0
paddle/infrt/dialect/infrt/infrt_ops.td
paddle/infrt/dialect/infrt/infrt_ops.td
+9
-42
paddle/infrt/dialect/infrt/infrt_ops_base.td
paddle/infrt/dialect/infrt/infrt_ops_base.td
+49
-0
paddle/infrt/dialect/infrt_base.cc
paddle/infrt/dialect/infrt_base.cc
+0
-53
paddle/infrt/dialect/infrt_base.h
paddle/infrt/dialect/infrt_base.h
+1
-1
paddle/infrt/dialect/infrt_base.td
paddle/infrt/dialect/infrt_base.td
+2
-4
paddle/infrt/dialect/init_infrt_dialects.cc
paddle/infrt/dialect/init_infrt_dialects.cc
+5
-5
paddle/infrt/dialect/mlir_loader_test.cc
paddle/infrt/dialect/mlir_loader_test.cc
+5
-5
paddle/infrt/dialect/pd_op_base.td
paddle/infrt/dialect/pd_op_base.td
+1
-1
paddle/infrt/dialect/phi/CMakeLists.txt
paddle/infrt/dialect/phi/CMakeLists.txt
+18
-0
paddle/infrt/dialect/phi/infrt_phi_base.td
paddle/infrt/dialect/phi/infrt_phi_base.td
+10
-10
paddle/infrt/dialect/phi/infrt_phi_kernel.td
paddle/infrt/dialect/phi/infrt_phi_kernel.td
+31
-0
paddle/infrt/dialect/phi/infrt_phi_tensor.cc
paddle/infrt/dialect/phi/infrt_phi_tensor.cc
+8
-8
paddle/infrt/dialect/phi/infrt_phi_tensor.h
paddle/infrt/dialect/phi/infrt_phi_tensor.h
+4
-4
paddle/infrt/dialect/phi/infrt_phi_tensor.td
paddle/infrt/dialect/phi/infrt_phi_tensor.td
+11
-11
paddle/infrt/dialect/phi/pass/CMakeLists.txt
paddle/infrt/dialect/phi/pass/CMakeLists.txt
+7
-0
paddle/infrt/dialect/phi/pass/kernel_op_desc.cc
paddle/infrt/dialect/phi/pass/kernel_op_desc.cc
+133
-0
paddle/infrt/dialect/phi/pass/kernel_op_desc.h
paddle/infrt/dialect/phi/pass/kernel_op_desc.h
+32
-0
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.cc
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.cc
+116
-0
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h
+57
-0
paddle/infrt/dialect/phi/pass/proto_arg_map_context.cc
paddle/infrt/dialect/phi/pass/proto_arg_map_context.cc
+73
-0
paddle/infrt/dialect/phi/pass/proto_arg_map_context.h
paddle/infrt/dialect/phi/pass/proto_arg_map_context.h
+55
-0
paddle/infrt/dialect/phi/phi_base.cc
paddle/infrt/dialect/phi/phi_base.cc
+12
-12
paddle/infrt/dialect/phi/phi_base.h
paddle/infrt/dialect/phi/phi_base.h
+4
-4
paddle/infrt/dialect/phi/phi_exec.cc
paddle/infrt/dialect/phi/phi_exec.cc
+47
-0
paddle/infrt/dialect/pten/CMakeLists.txt
paddle/infrt/dialect/pten/CMakeLists.txt
+0
-13
paddle/infrt/dialect/pten/infrt_pten_kernel.td
paddle/infrt/dialect/pten/infrt_pten_kernel.td
+0
-26
paddle/infrt/dialect/test_kernels.cc
paddle/infrt/dialect/test_kernels.cc
+1
-1
paddle/infrt/dialect/test_kernels.td
paddle/infrt/dialect/test_kernels.td
+1
-1
paddle/infrt/external_kernels/basic.mlir
paddle/infrt/external_kernels/basic.mlir
+3
-3
paddle/infrt/external_kernels/fc.mlir
paddle/infrt/external_kernels/fc.mlir
+25
-25
paddle/infrt/external_kernels/paddle.mlir
paddle/infrt/external_kernels/paddle.mlir
+32
-32
paddle/infrt/host_context/mlir_exec.cc
paddle/infrt/host_context/mlir_exec.cc
+4
-4
paddle/infrt/host_context/mlir_tests/basic.mlir
paddle/infrt/host_context/mlir_tests/basic.mlir
+12
-12
paddle/infrt/host_context/mlir_tests/dense_tensor.mlir
paddle/infrt/host_context/mlir_tests/dense_tensor.mlir
+4
-4
paddle/infrt/host_context/mlir_tests/shape.mlir
paddle/infrt/host_context/mlir_tests/shape.mlir
+1
-1
paddle/infrt/host_context/mlir_to_runtime_translate.cc
paddle/infrt/host_context/mlir_to_runtime_translate.cc
+4
-4
paddle/infrt/host_context/mlir_to_runtime_translate.h
paddle/infrt/host_context/mlir_to_runtime_translate.h
+1
-1
paddle/infrt/host_context/mlir_to_runtime_translate_test.cc
paddle/infrt/host_context/mlir_to_runtime_translate_test.cc
+20
-18
paddle/infrt/host_context/value.cc
paddle/infrt/host_context/value.cc
+1
-1
paddle/infrt/host_context/value.h
paddle/infrt/host_context/value.h
+10
-10
paddle/infrt/kernel/CMakeLists.txt
paddle/infrt/kernel/CMakeLists.txt
+2
-2
paddle/infrt/kernel/basic_kernels.cc
paddle/infrt/kernel/basic_kernels.cc
+12
-12
paddle/infrt/kernel/control_flow_kernels.cc
paddle/infrt/kernel/control_flow_kernels.cc
+1
-1
paddle/infrt/kernel/phi/CMakeLists.txt
paddle/infrt/kernel/phi/CMakeLists.txt
+6
-6
paddle/infrt/kernel/phi/allocator_kernels.cc
paddle/infrt/kernel/phi/allocator_kernels.cc
+4
-4
paddle/infrt/kernel/phi/allocator_kernels.h
paddle/infrt/kernel/phi/allocator_kernels.h
+4
-4
paddle/infrt/kernel/phi/context_kernels.cc
paddle/infrt/kernel/phi/context_kernels.cc
+4
-4
paddle/infrt/kernel/phi/context_kernels.h
paddle/infrt/kernel/phi/context_kernels.h
+4
-4
paddle/infrt/kernel/phi/dense_tensor_kernels.cc
paddle/infrt/kernel/phi/dense_tensor_kernels.cc
+4
-4
paddle/infrt/kernel/phi/dense_tensor_kernels.h
paddle/infrt/kernel/phi/dense_tensor_kernels.h
+4
-4
paddle/infrt/kernel/phi/infershaped/infershape_launchers_test.cc
...infrt/kernel/phi/infershaped/infershape_launchers_test.cc
+3
-3
paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launcher.cc
...frt/kernel/phi/infershaped/infershaped_kernel_launcher.cc
+1
-1
paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launcher.h
...nfrt/kernel/phi/infershaped/infershaped_kernel_launcher.h
+0
-0
paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.h
...frt/kernel/phi/infershaped/infershaped_kernel_launchers.h
+0
-0
paddle/infrt/kernel/phi/infershaped/infershaped_utils.h
paddle/infrt/kernel/phi/infershaped/infershaped_utils.h
+0
-0
paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h
paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h
+2
-2
paddle/infrt/kernel/phi/registry.cc
paddle/infrt/kernel/phi/registry.cc
+15
-15
paddle/infrt/kernel/phi/registry.h
paddle/infrt/kernel/phi/registry.h
+2
-2
paddle/infrt/kernel/test_kernels.cc
paddle/infrt/kernel/test_kernels.cc
+2
-2
paddle/infrt/pass/CMakeLists.txt
paddle/infrt/pass/CMakeLists.txt
+1
-0
paddle/infrt/tests/dialect/basic.mlir
paddle/infrt/tests/dialect/basic.mlir
+14
-22
paddle/infrt/tests/dialect/benchmark.mlir
paddle/infrt/tests/dialect/benchmark.mlir
+7
-7
paddle/infrt/tests/dialect/dense_tensor.mlir
paddle/infrt/tests/dialect/dense_tensor.mlir
+9
-9
paddle/infrt/tests/dialect/disabled_tensor_map.mlir
paddle/infrt/tests/dialect/disabled_tensor_map.mlir
+15
-15
paddle/infrt/tests/dialect/disabled_trt_ops.mlir
paddle/infrt/tests/dialect/disabled_trt_ops.mlir
+3
-3
paddle/infrt/tests/dialect/paddle_ops.mlir
paddle/infrt/tests/dialect/paddle_ops.mlir
+2
-3
paddle/infrt/tests/dialect/pten/dense_tensor.mlir
paddle/infrt/tests/dialect/pten/dense_tensor.mlir
+5
-5
paddle/infrt/tests/dialect/pten/pten_pass.mlir
paddle/infrt/tests/dialect/pten/pten_pass.mlir
+10
-0
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
+9
-9
paddle/infrt/tests/dialect/tensor/naive_kernels.mlir
paddle/infrt/tests/dialect/tensor/naive_kernels.mlir
+14
-14
paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in
paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in
+5
-5
paddle/infrt/tests/dialect/tensor/tensor_shape.mlir
paddle/infrt/tests/dialect/tensor/tensor_shape.mlir
+1
-1
paddle/infrt/tests/dialect/tensor/tensor_type.mlir
paddle/infrt/tests/dialect/tensor/tensor_type.mlir
+4
-4
paddle/infrt/tests/dialect/tensor_shape.mlir
paddle/infrt/tests/dialect/tensor_shape.mlir
+1
-1
paddle/infrt/tests/dialect/tensor_type.mlir
paddle/infrt/tests/dialect/tensor_type.mlir
+4
-4
paddle/scripts/infrt_build.sh
paddle/scripts/infrt_build.sh
+1
-1
tools/infrt/get_phi_kernel_function.sh
tools/infrt/get_phi_kernel_function.sh
+3
-3
tools/infrt/get_phi_kernel_info.py
tools/infrt/get_phi_kernel_info.py
+9
-9
未找到文件。
paddle/infrt/CMakeLists.txt
浏览文件 @
a6abb6e7
...
...
@@ -2,13 +2,13 @@ if (NOT WITH_INFRT)
return
()
endif
()
option
(
INFRT_WITH_P
TEN
"Compile INFRT with PTEN
"
ON
)
option
(
INFRT_WITH_P
HI
"Compile INFRT with PHI
"
ON
)
#TODO(xiaowei) remove fluid
include_directories
(
${
PADDLE_SOURCE_DIR
}
/paddle/fluid/platform
)
if
(
INFRT_WITH_P
TEN
)
add_definitions
(
"-DINFRT_WITH_P
TEN
"
)
if
(
INFRT_WITH_P
HI
)
add_definitions
(
"-DINFRT_WITH_P
HI
"
)
endif
()
# compile flags
...
...
@@ -97,16 +97,16 @@ set(infrt_mlir_incs
rewrite_inc
trt_ops_inc
)
if
(
INFRT_WITH_P
TEN
)
set
(
p
ten
_libs pten
)
if
(
INFRT_WITH_P
HI
)
set
(
p
hi
_libs pten
)
set
(
infrt_mlir_incs
${
infrt_mlir_incs
}
MLIRinfrt_p
ten
_tensorIncGen
MLIRinfrt_p
ten
_baseIncGen
MLIRinfrt_p
hi
_tensorIncGen
MLIRinfrt_p
hi
_baseIncGen
)
endif
()
cc_library
(
infrt SHARED SRCS
${
infrt_src
}
DEPS glog boost
${
mlir_libs
}
${
p
ten
_libs
}
paddle_framework_proto infrt_naive
)
cc_library
(
infrt_static SRCS
${
infrt_src
}
DEPS glog boost
${
mlir_libs
}
${
p
ten
_libs
}
paddle_framework_proto
)
cc_library
(
infrt SHARED SRCS
${
infrt_src
}
DEPS glog boost
${
mlir_libs
}
${
p
hi
_libs
}
paddle_framework_proto infrt_naive
)
cc_library
(
infrt_static SRCS
${
infrt_src
}
DEPS glog boost
${
mlir_libs
}
${
p
hi
_libs
}
paddle_framework_proto
)
add_dependencies
(
infrt
${
infrt_mlir_incs
}
mlir-headers
)
add_custom_target
(
test_infrt_exec DEPENDS
${
INFRT_TEST_TARGETS
}
)
paddle/infrt/api/infrt_api.cc
浏览文件 @
a6abb6e7
...
...
@@ -42,7 +42,6 @@ using namespace infrt::host_context; // NOLINT
using
namespace
infrt
::
tensor
;
// NOLINT
using
namespace
infrt
::
tensor
;
// NOLINT
using
infrt
::
dt
::
TensorMapType
;
// NOLINT
using
infrt
::
dt
::
TensorType
;
// NOLINT
namespace
infrt
{
...
...
@@ -145,7 +144,7 @@ class PredictExecutor : public MlirToRuntimeTranslator {
// process results
auto
&
last_op
=
predict_func
.
front
().
back
();
if
(
last_op
.
getName
().
getStringRef
()
==
"
i
nfrt.return"
)
{
if
(
last_op
.
getName
().
getStringRef
()
==
"
I
nfrt.return"
)
{
for
(
size_t
i
=
0
;
i
<
last_op
.
getNumOperands
();
++
i
)
{
auto
*
value
=
AddValue
(
mlir
::
Value
(
last_op
.
getOperand
(
i
)));
results_
.
push_back
(
ValueRef
(
value
));
...
...
paddle/infrt/backends/host/p
ten
_allocator.h
→
paddle/infrt/backends/host/p
hi
_allocator.h
浏览文件 @
a6abb6e7
...
...
@@ -16,7 +16,7 @@ limitations under the License. */
namespace
infrt
{
namespace
backends
{
class
CpuP
ten
Allocator
:
public
phi
::
Allocator
{
class
CpuP
hi
Allocator
:
public
phi
::
Allocator
{
public:
static
void
deleter
(
phi
::
Allocation
*
ptr
)
{
::
operator
delete
(
ptr
);
}
...
...
paddle/infrt/backends/host/p
ten
_context.h
→
paddle/infrt/backends/host/p
hi
_context.h
浏览文件 @
a6abb6e7
...
...
@@ -16,7 +16,7 @@ limitations under the License. */
namespace
infrt
{
namespace
backends
{
class
CpuP
ten
Context
:
public
phi
::
CPUContext
{
class
CpuP
hi
Context
:
public
phi
::
CPUContext
{
public:
using
Base
=
phi
::
CPUContext
;
using
phi
::
CPUContext
::
SetEigenDevice
;
...
...
paddle/infrt/dialect/CMakeLists.txt
浏览文件 @
a6abb6e7
...
...
@@ -16,7 +16,7 @@ gather_srcs(infrt_src SRCS
mlir_tablegen_on
(
basic_kernels
)
mlir_tablegen_on
(
test_kernels
)
mlir_tablegen_on
(
infrt_base DIALECT
i
nfrt
)
mlir_tablegen_on
(
infrt_base DIALECT
I
nfrt
)
mlir_tablegen_on
(
tensor_shape DIALECT ts
)
mlir_tablegen_on
(
dense_tensor DIALECT dt
)
mlir_tablegen_on
(
pd_op_base DIALECT pd
)
...
...
@@ -36,6 +36,6 @@ cc_test_tiny(test_infrt_mlir_loader SRCS mlir_loader_test.cc DEPS infrt ${MLIR_I
add_subdirectory
(
infrt
)
add_subdirectory
(
tensorrt
)
if
(
INFRT_WITH_P
TEN
)
add_subdirectory
(
p
ten
)
if
(
INFRT_WITH_P
HI
)
add_subdirectory
(
p
hi
)
endif
()
paddle/infrt/dialect/basic_kernels.cc
浏览文件 @
a6abb6e7
...
...
@@ -90,7 +90,7 @@ static ParseResult parseReturnOp(OpAsmParser &parser, // NOLINT
}
static
void
print
(
OpAsmPrinter
&
p
,
CallOp
op
)
{
// NOLINT
p
<<
"infrt.call "
<<
op
->
getAttr
(
"callee"
)
<<
"("
;
p
<<
op
->
getAttr
(
"callee"
)
<<
"("
;
p
.
printOperands
(
op
.
getOperands
());
p
<<
")"
;
p
.
printOptionalAttrDict
(
op
->
getAttrs
(),
{
"callee"
});
...
...
@@ -98,7 +98,7 @@ static void print(OpAsmPrinter &p, CallOp op) { // NOLINT
}
static
void
printConstant
(
OpAsmPrinter
&
p
,
mlir
::
Operation
*
op
)
{
// NOLINT
p
<<
op
->
getName
()
<<
" "
;
p
<<
" "
;
p
.
printOptionalAttrDict
(
op
->
getAttrs
(),
/*elidedAttrs=*/
{
"value"
});
if
(
op
->
getAttrs
().
size
()
>
1
)
p
<<
' '
;
...
...
@@ -128,7 +128,6 @@ static void print(OpAsmPrinter &p, ConstantI64Op op) { // NOLINT
}
static
void
print
(
OpAsmPrinter
&
p
,
ReturnOp
op
)
{
// NOLINT
p
<<
"infrt.return"
;
if
(
op
.
getNumOperands
()
>
0
)
{
p
<<
' '
;
p
.
printOperands
(
op
.
getOperands
());
...
...
paddle/infrt/dialect/basic_kernels.td
浏览文件 @
a6abb6e7
...
...
@@ -48,10 +48,10 @@ def ConstantF64Op : ConstantOp<"f64", F64, F64Attr>;
def ReturnOp : INFRT_Op<"return", [Terminator]> {
let summary = "host executor return operation";
let description = [{
The "
i
nfrt.return" operation represents a return operation within a function.
The "
I
nfrt.return" operation represents a return operation within a function.
func @foo() : (i32, f8) {
i
nfrt.return %0, %1 : i32, f8
I
nfrt.return %0, %1 : i32, f8
}
}];
...
...
@@ -112,7 +112,7 @@ def PrintF32Op : PrintOp<"f32", F32>;
def PrintF64Op : PrintOp<"f64", F64>;
def GetStringOp : INFRT_Op<"get_string"> {
let summary = "
i
nfrt.get_string";
let summary = "
I
nfrt.get_string";
let description = [{
Get a !infrt.string value from the given string attribute.
}];
...
...
@@ -124,7 +124,7 @@ def GetStringOp : INFRT_Op<"get_string"> {
}
def PrintStringOp : INFRT_Op<"print_string"> {
let summary = "
i
nfrt.print_string";
let summary = "
I
nfrt.print_string";
let description = [{
An operation that prints a string.
}];
...
...
paddle/infrt/dialect/dense_tensor.cc
浏览文件 @
a6abb6e7
...
...
@@ -39,52 +39,6 @@ void DTDialect::initialize() {
>
();
}
llvm
::
Optional
<
TargetType
>
GetTargetType
(
mlir
::
StringRef
key
)
{
if
(
key
.
equals_insensitive
(
"x86"
))
return
TargetType
::
X86
;
else
if
(
key
.
equals_insensitive
(
"cuda"
))
return
TargetType
::
CUDA
;
else
return
llvm
::
None
;
}
llvm
::
Optional
<
LayoutType
>
GetLayoutType
(
mlir
::
StringRef
key
)
{
if
(
key
.
equals_insensitive
(
"nchw"
))
return
LayoutType
::
NCHW
;
else
if
(
key
.
equals_insensitive
(
"nhwc"
))
return
LayoutType
::
NHWC
;
else
return
llvm
::
None
;
}
llvm
::
Optional
<
PrecisionType
>
GetPrecisionType
(
mlir
::
StringRef
key
)
{
if
(
key
.
equals_insensitive
(
"i32"
))
return
PrecisionType
::
I32
;
else
if
(
key
.
equals_insensitive
(
"f32"
))
return
PrecisionType
::
F32
;
else
return
llvm
::
None
;
}
TensorType
TensorType
::
get
(
mlir
::
MLIRContext
*
ctx
,
TargetType
target
,
LayoutType
layout
,
PrecisionType
precision
)
{
return
Base
::
get
(
ctx
,
target
,
layout
,
precision
);
}
TargetType
TensorType
::
target
()
{
return
getImpl
()
->
target_
;
}
LayoutType
TensorType
::
layout
()
{
return
getImpl
()
->
layout_
;
}
PrecisionType
TensorType
::
precision
()
{
return
getImpl
()
->
precision_
;
}
mlir
::
raw_ostream
&
operator
<<
(
mlir
::
raw_ostream
&
os
,
TensorType
tensorType
)
{
os
<<
"TensorType<"
<<
tensorType
.
target
()
<<
", "
<<
tensorType
.
layout
()
<<
", "
<<
tensorType
.
precision
()
<<
">"
;
return
os
;
}
TensorMapType
TensorMapType
::
get
()
{
return
Base
::
get
(
::
infrt
::
Global
::
getMLIRContext
());
}
...
...
@@ -101,48 +55,6 @@ StringType StringType::get(mlir::MLIRContext *context) {
return
Base
::
get
(
context
);
}
mlir
::
raw_ostream
&
operator
<<
(
mlir
::
raw_ostream
&
os
,
TargetType
type
)
{
switch
(
type
)
{
case
(
TargetType
::
X86
):
os
<<
"X86"
;
break
;
case
(
TargetType
::
CUDA
):
os
<<
"CUDA"
;
break
;
default:
os
<<
"Unsupported"
;
}
return
os
;
}
mlir
::
raw_ostream
&
operator
<<
(
mlir
::
raw_ostream
&
os
,
LayoutType
type
)
{
switch
(
type
)
{
case
(
LayoutType
::
NCHW
):
os
<<
"NCHW"
;
break
;
case
(
LayoutType
::
NHWC
):
os
<<
"NHWC"
;
break
;
default:
os
<<
"Unsupported"
;
}
return
os
;
}
mlir
::
raw_ostream
&
operator
<<
(
mlir
::
raw_ostream
&
os
,
PrecisionType
type
)
{
switch
(
type
)
{
case
(
PrecisionType
::
I32
):
os
<<
"I32"
;
break
;
case
(
PrecisionType
::
F32
):
os
<<
"F32"
;
break
;
default:
os
<<
"Unsupported"
;
}
return
os
;
}
static
mlir
::
Type
getTensorType
(
mlir
::
MLIRContext
*
context
)
{
auto
t_dialect
=
mlir
::
Identifier
::
get
(
"t"
,
context
);
return
mlir
::
OpaqueType
::
get
(
t_dialect
,
"tensor"
);
...
...
@@ -165,7 +77,7 @@ static mlir::ParseResult parseCreateUninitTensorOp(
if
(
parser
.
parseArrow
())
return
mlir
::
failure
();
if
(
parser
.
parseType
(
outputRawTypes
[
0
]))
return
mlir
::
failure
();
if
(
!
outputRawTypes
[
0
].
isa
<
TensorType
>
())
if
(
!
outputRawTypes
[
0
].
isa
<
Dense
TensorType
>
())
return
parser
.
emitError
(
loc
,
"invalid kind of type specified"
);
result
.
addTypes
(
outputTypes
);
return
mlir
::
success
();
...
...
paddle/infrt/dialect/dense_tensor.h
浏览文件 @
a6abb6e7
...
...
@@ -19,68 +19,10 @@
#include <string>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
namespace
infrt
{
namespace
dt
{
enum
class
TargetType
:
uint8_t
{
X86
,
CUDA
};
enum
class
LayoutType
:
uint8_t
{
NCHW
,
NHWC
};
enum
class
PrecisionType
:
uint8_t
{
I32
,
F32
};
llvm
::
Optional
<
TargetType
>
GetTargetType
(
mlir
::
StringRef
key
);
llvm
::
Optional
<
LayoutType
>
GetLayoutType
(
mlir
::
StringRef
key
);
llvm
::
Optional
<
PrecisionType
>
GetPrecisionType
(
mlir
::
StringRef
key
);
mlir
::
raw_ostream
&
operator
<<
(
mlir
::
raw_ostream
&
os
,
TargetType
type
);
mlir
::
raw_ostream
&
operator
<<
(
mlir
::
raw_ostream
&
os
,
LayoutType
type
);
mlir
::
raw_ostream
&
operator
<<
(
mlir
::
raw_ostream
&
os
,
PrecisionType
type
);
namespace
detail
{
struct
TensorTypeStorage
:
public
mlir
::
TypeStorage
{
TensorTypeStorage
(
TargetType
target
,
LayoutType
layout
,
PrecisionType
precision
)
:
target_
(
target
),
layout_
(
layout
),
precision_
(
precision
)
{}
using
KeyTy
=
std
::
tuple
<
TargetType
,
LayoutType
,
PrecisionType
>
;
bool
operator
==
(
const
KeyTy
&
key
)
const
{
return
key
==
KeyTy
(
target_
,
layout_
,
precision_
);
}
static
llvm
::
hash_code
hashKey
(
const
KeyTy
&
key
)
{
return
llvm
::
hash_value
(
key
);
}
static
TensorTypeStorage
*
construct
(
mlir
::
TypeStorageAllocator
&
allocator
,
// NOLINT
const
KeyTy
&
key
)
{
return
new
(
allocator
.
allocate
<
TensorTypeStorage
>
())
TensorTypeStorage
(
std
::
get
<
0
>
(
key
),
std
::
get
<
1
>
(
key
),
std
::
get
<
2
>
(
key
));
}
TargetType
target_
;
LayoutType
layout_
;
PrecisionType
precision_
;
};
}
// namespace detail
class
TensorType
:
public
mlir
::
Type
::
TypeBase
<
TensorType
,
mlir
::
Type
,
detail
::
TensorTypeStorage
>
{
public:
using
Base
::
Base
;
static
TensorType
get
(
mlir
::
MLIRContext
*
ctx
,
TargetType
target
,
LayoutType
layout
,
PrecisionType
precision
);
TargetType
target
();
LayoutType
layout
();
PrecisionType
precision
();
};
mlir
::
raw_ostream
&
operator
<<
(
mlir
::
raw_ostream
&
os
,
TensorType
tensorType
);
class
TensorMapType
:
public
mlir
::
Type
::
TypeBase
<
TensorMapType
,
mlir
::
Type
,
mlir
::
TypeStorage
>
{
...
...
paddle/infrt/dialect/dense_tensor.td
浏览文件 @
a6abb6e7
...
...
@@ -28,7 +28,7 @@ class CreateUninitTensorOp<string dtype>
}];
let arguments = (ins I64ArrayAttr:$shape);
let results = (outs
TensorType
:$output);
let results = (outs
DenseTensor
:$output);
let parser = [{ return infrt::dt::parseCreateUninitTensorOp(parser, result); }];
let printer = [{ return infrt::dt::printCreateUninitTensorOp(p, *this); }];
...
...
@@ -43,8 +43,8 @@ def ShallowCopyTensorOp
An operation that copy a tensor shallowly.
}];
let arguments = (ins
TensorType
:$input);
let results = (outs
TensorType
:$output);
let arguments = (ins
DenseTensor
:$input);
let results = (outs
DenseTensor
:$output);
let assemblyFormat = "$input attr-dict `:` type($input) `->` type($output)";
}
...
...
@@ -59,7 +59,7 @@ class FillTensorWithConstantOp<string dtype> :
}];
let arguments = (ins
TensorType
:$input,
DenseTensor
:$input,
AnyAttr:$value
);
let results = (outs);
...
...
@@ -77,7 +77,7 @@ def PrintTensorOp : DT_Op<"print_tensor"> {
An operation that prints a tensor.
}];
let arguments = (ins
TensorType
:$input);
let arguments = (ins
DenseTensor
:$input);
let results = (outs);
let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict";
}
...
...
@@ -90,7 +90,7 @@ class SetTensorOp<string dtype> :
An operation that sets an input tensor with given values.
}];
let arguments = (ins
TensorType
);
let arguments = (ins
DenseTensor
);
let results = (outs);
let parser = [{ return infrt::dt::parseSetTensorOp(parser, result); }];
...
...
@@ -125,7 +125,7 @@ def TensorMapGetTensorOp : DT_Op<"tensor_map_get_tensor", [NoSideEffect]> {
TensorMapType:$map,
StrAttr:$name
);
let results = (outs
TensorType
:$output);
let results = (outs
DenseTensor
:$output);
let assemblyFormat = "`(` operands `)` attr-dict `->` type($output)";
let verifier = ?;
}
...
...
@@ -149,7 +149,7 @@ def GetTensorShapeOp : DT_Op<"get_tensor_shape", [NoSideEffect]> {
An operation that returns the shape of the input tensor.
}];
let arguments = (ins
TensorType
:$input);
let arguments = (ins
DenseTensor
:$input);
let results = (outs TS_Shape:$output);
let assemblyFormat = "$input attr-dict `:` type($input) `->` type($output)";
}
...
...
@@ -162,8 +162,8 @@ class NaiveElementwiseAddOp<string dtype> :
Naive elementwise_add operation.
Just for testing.
}];
let arguments = (ins
TensorType:$a, TensorType
:$b);
let results = (outs
TensorType
:$output);
let arguments = (ins
DenseTensor:$a, DenseTensor
:$b);
let results = (outs
DenseTensor
:$output);
let assemblyFormat = "`(` $a `,` $b `)` attr-dict `:` `(` type($a) `,` type($b) `)` `->` type($output)";
}
...
...
@@ -175,8 +175,8 @@ class NaiveMatmulOp<string dtype> :
Naive matmul operation.
Just for testing.
}];
let arguments = (ins
TensorType:$x, TensorType
:$w);
let results = (outs
TensorType
:$output);
let arguments = (ins
DenseTensor:$x, DenseTensor
:$w);
let results = (outs
DenseTensor
:$output);
let assemblyFormat = "`(` $x `,` $w `)` attr-dict `:` `(` type($x) `,` type($w) `)` `->` type($output)";
}
...
...
paddle/infrt/dialect/infrt/CMakeLists.txt
浏览文件 @
a6abb6e7
core_gather_headers
()
gather_srcs
(
infrt_src SRCS
common_type.cc
infrt_dialect.cc
)
add_mlir_dialect
(
infrt_ops Infrt
)
add_mlir_dialect
(
infrt_ops infrt
)
set
(
LLVM_TARGET_DEFINITIONS infrt_ops.td
)
mlir_tablegen
(
infrt_opsAttributes.h.inc -gen-attrdef-decls -dialect=infrt
)
mlir_tablegen
(
infrt_opsAttributes.cpp.inc -gen-attrdef-defs -dialect=infrt
)
add_public_tablegen_target
(
MLIRinfrt_opsAttributesIncGen
)
add_dependencies
(
mlir-headers MLIRinfrt_opsAttributesIncGen
)
paddle/infrt/dialect/infrt/common_type.cc
0 → 100644
浏览文件 @
a6abb6e7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/infrt/common_type.h"
namespace
infrt
{
llvm
::
Optional
<
TargetType
>
GetTargetType
(
llvm
::
StringRef
key
)
{
if
(
key
.
equals_insensitive
(
"CPU"
))
return
TargetType
::
CPU
;
else
if
(
key
.
equals_insensitive
(
"GPU"
))
return
TargetType
::
GPU
;
else
return
llvm
::
None
;
}
llvm
::
Optional
<
LayoutType
>
GetLayoutType
(
llvm
::
StringRef
key
)
{
if
(
key
.
equals_insensitive
(
"NCHW"
))
return
LayoutType
::
NCHW
;
else
if
(
key
.
equals_insensitive
(
"NHWC"
))
return
LayoutType
::
NHWC
;
else
return
llvm
::
None
;
}
llvm
::
Optional
<
PrecisionType
>
GetPrecisionType
(
llvm
::
StringRef
key
)
{
if
(
key
.
equals_insensitive
(
"FP32"
))
return
PrecisionType
::
FLOAT32
;
else
if
(
key
.
equals_insensitive
(
"FP16"
))
return
PrecisionType
::
FLOAT16
;
else
return
llvm
::
None
;
}
llvm
::
raw_ostream
&
operator
<<
(
llvm
::
raw_ostream
&
os
,
TargetType
type
)
{
switch
(
type
)
{
case
(
TargetType
::
CPU
):
os
<<
"CPU"
;
break
;
case
(
TargetType
::
GPU
):
os
<<
"GPU"
;
break
;
default:
os
<<
"Unsupported"
;
}
return
os
;
}
llvm
::
raw_ostream
&
operator
<<
(
llvm
::
raw_ostream
&
os
,
LayoutType
type
)
{
switch
(
type
)
{
case
(
LayoutType
::
NCHW
):
os
<<
"NCHW"
;
break
;
case
(
LayoutType
::
NHWC
):
os
<<
"NHWC"
;
break
;
default:
os
<<
"Unsupported"
;
}
return
os
;
}
llvm
::
raw_ostream
&
operator
<<
(
llvm
::
raw_ostream
&
os
,
PrecisionType
type
)
{
switch
(
type
)
{
case
(
PrecisionType
::
FLOAT32
):
os
<<
"FP32"
;
break
;
case
(
PrecisionType
::
FLOAT16
):
os
<<
"FP16"
;
break
;
default:
os
<<
"Unsupported"
;
}
return
os
;
}
}
// namespace infrt
paddle/infrt/dialect/infrt/common_type.h
0 → 100644
浏览文件 @
a6abb6e7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <llvm/ADT/Optional.h>
#include <llvm/ADT/StringRef.h>
#include <llvm/Support/raw_ostream.h>
namespace
infrt
{
enum
class
TargetType
:
uint8_t
{
CPU
,
GPU
,
UNK
};
enum
class
PrecisionType
:
uint8_t
{
FLOAT32
,
FLOAT16
,
UNK
};
enum
class
LayoutType
:
uint8_t
{
NCHW
,
NHWC
,
UNK
};
struct
Place
{
TargetType
target
;
PrecisionType
precision
;
LayoutType
layout
;
Place
(
TargetType
tar
,
PrecisionType
pre
,
LayoutType
lay
)
:
target
(
tar
),
precision
(
pre
),
layout
(
lay
)
{}
Place
()
:
target
(
TargetType
::
UNK
),
precision
(
PrecisionType
::
UNK
),
layout
(
LayoutType
::
UNK
)
{}
};
llvm
::
Optional
<
TargetType
>
GetTargetType
(
llvm
::
StringRef
key
);
llvm
::
Optional
<
LayoutType
>
GetLayoutType
(
llvm
::
StringRef
key
);
llvm
::
Optional
<
PrecisionType
>
GetPrecisionType
(
llvm
::
StringRef
key
);
llvm
::
raw_ostream
&
operator
<<
(
llvm
::
raw_ostream
&
os
,
TargetType
type
);
llvm
::
raw_ostream
&
operator
<<
(
llvm
::
raw_ostream
&
os
,
LayoutType
type
);
llvm
::
raw_ostream
&
operator
<<
(
llvm
::
raw_ostream
&
os
,
PrecisionType
type
);
}
// end namespace infrt
paddle/infrt/dialect/infrt/infrt_dialect.cc
浏览文件 @
a6abb6e7
...
...
@@ -23,6 +23,9 @@
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.cpp.inc"
#define GET_ATTRDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.cpp.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_ops.cpp.inc"
...
...
@@ -33,6 +36,12 @@ void InfrtDialect::initialize() {
#define GET_TYPEDEF_LIST
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.cpp.inc" // NOLINT
>
();
addAttributes
<
#define GET_ATTRDEF_LIST
#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.cpp.inc" // NOLINT
>
();
addOperations
<
#define GET_OP_LIST
#include "paddle/infrt/dialect/infrt/infrt_ops.cpp.inc" // NOLINT
...
...
@@ -57,36 +66,104 @@ mlir::Type InfrtDialect::parseType(::mlir::DialectAsmParser &parser) const {
// Parse the element type.
if
(
parser
.
parseType
(
elementType
))
return
nullptr
;
// parse ","
if
(
parser
.
parseComma
())
return
nullptr
;
// llvm::APInt lod_level;
if
(
parser
.
parseInteger
(
lod_level
))
return
nullptr
;
// parse optional lod_level
if
(
parser
.
parseOptionalComma
().
succeeded
())
{
// llvm::APInt lod_level;
if
(
parser
.
parseInteger
(
lod_level
))
return
nullptr
;
}
// parse ">"
if
(
parser
.
parseGreater
())
return
nullptr
;
return
LoDTensorType
::
get
(
parser
.
getContext
(),
shape
,
elementType
,
lod_level
);
}
if
(
keyword
==
"dense_tensor"
)
{
// parse DenseTensor, for example: !i=Infrt.tensor<X86, CUDA, F32>
llvm
::
StringRef
target
;
llvm
::
StringRef
layout
;
llvm
::
StringRef
precision
;
// parse "<"
if
(
parser
.
parseLess
())
return
mlir
::
Type
();
// parse target
if
(
parser
.
parseKeyword
(
&
target
))
return
mlir
::
Type
();
auto
targetType
=
GetTargetType
(
target
);
if
(
!
targetType
)
{
parser
.
emitError
(
parser
.
getCurrentLocation
(),
"unknown target type: "
)
<<
target
;
return
mlir
::
Type
();
}
// parse ","
if
(
parser
.
parseComma
())
return
mlir
::
Type
();
// parse precision
if
(
parser
.
parseKeyword
(
&
precision
))
return
mlir
::
Type
();
auto
precisionType
=
GetPrecisionType
(
precision
);
if
(
!
precisionType
)
{
parser
.
emitError
(
parser
.
getCurrentLocation
(),
"unknown precision type: "
)
<<
precision
;
return
mlir
::
Type
();
}
// parse ","
if
(
parser
.
parseComma
())
return
mlir
::
Type
();
// parse layout
if
(
parser
.
parseKeyword
(
&
layout
))
return
mlir
::
Type
();
auto
layoutType
=
GetLayoutType
(
layout
);
if
(
!
layoutType
)
{
parser
.
emitError
(
parser
.
getCurrentLocation
(),
"unknown layout type: "
)
<<
layout
;
return
mlir
::
Type
();
}
// parse ">"
if
(
parser
.
parseGreater
())
return
mlir
::
Type
();
return
DenseTensorType
::
get
(
parser
.
getContext
(),
*
targetType
,
*
precisionType
,
*
layoutType
);
}
// Todo: parse other type
return
mlir
::
Type
();
}
void
InfrtDialect
::
printType
(
::
mlir
::
Type
type
,
::
mlir
::
DialectAsmPrinter
&
os
)
const
{
// print
TensorType, for example: !infrt.tensor<X86, CUDA, F32
>
// print
LoDTensorType, for example: !Infrt.lod_tensor<3x64x3x3xf32,5
>
if
(
type
.
isa
<
infrt
::
LoDTensorType
>
())
{
auto
lod
TensorT
ype
=
type
.
cast
<
infrt
::
LoDTensorType
>
();
auto
lod
_tensor_t
ype
=
type
.
cast
<
infrt
::
LoDTensorType
>
();
os
<<
"lod_tensor<"
;
auto
shape
=
lodTensorType
.
getShape
();
for
(
auto
dim
=
shape
.
begin
(),
e
=
shape
.
end
()
-
1
;
dim
!=
e
;
++
dim
)
os
<<
*
dim
<<
'x'
;
os
<<
shape
.
back
()
<<
'x'
<<
lodTensorType
.
getElementType
()
<<
", "
<<
lodTensorType
.
getLod_level
()
<<
">"
;
auto
shape
=
lod_tensor_type
.
getShape
();
for
(
auto
dim
=
shape
.
begin
(),
e
=
shape
.
end
()
-
1
;
dim
!=
e
;
++
dim
)
{
*
dim
<
0
?
os
<<
'?'
:
os
<<
*
dim
;
os
<<
'x'
;
}
shape
.
back
()
<
0
?
os
<<
'?'
:
os
<<
shape
.
back
();
os
<<
'x'
<<
lod_tensor_type
.
getElementType
()
<<
", "
<<
lod_tensor_type
.
getLod_level
()
<<
">"
;
return
;
}
// print DenseTensorType, for example: !infrt.dense_tensor<CPU, FP32, NCHW>
if
(
type
.
isa
<
infrt
::
DenseTensorType
>
())
{
auto
dense_tensor_type
=
type
.
cast
<
infrt
::
DenseTensorType
>
();
os
<<
"dense_tensor<"
<<
dense_tensor_type
.
getTarget
()
<<
", "
<<
dense_tensor_type
.
getPrecision
()
<<
", "
<<
dense_tensor_type
.
getLayout
()
<<
">"
;
return
;
}
llvm_unreachable
(
"unknown infrt type."
);
}
// /// Parse an attribute registered to this dialect.
// ::mlir::Attribute InfrtDialect::parseAttribute(::mlir::DialectAsmParser
// &parser,
// ::mlir::Type type) const {
// return mlir::Attribute();
// }
// /// Print an attribute registered to this dialect.
// void InfrtDialect::printAttribute(::mlir::Attribute attr,
// ::mlir::DialectAsmPrinter &os) const {
// }
}
// namespace infrt
paddle/infrt/dialect/infrt/infrt_dialect.h
浏览文件 @
a6abb6e7
...
...
@@ -17,13 +17,19 @@
//===----------------------------------------------------------------------===//
// Dialect
//===----------------------------------------------------------------------===//
#include <llvm/ADT/StringMap.h>
#include <mlir/IR/BuiltinTypes.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/OpDefinition.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/infrt/common_type.h"
#include "paddle/infrt/dialect/infrt/infrt_opsDialect.h.inc"
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.h.inc"
#define GET_ATTRDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.h.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_ops.h.inc"
paddle/infrt/dialect/infrt/infrt_ops.td
浏览文件 @
a6abb6e7
#ifndef Infrt_OpS
#define Infrt_OpS
include "mlir/IR/OpBase.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
def Infrt_Dialect : Dialect {
let summary =
"A dialect containing the Infrt Attributes, Operations, and Types";
let name = "Infrt";
let cppNamespace = "::infrt";
}
// Type definitions
// Base class for Infrt dialect types.
class Infrt_Type<string name, list<Trait> traits = [],
string baseCppClass = "::mlir::Type">
: TypeDef<Infrt_Dialect, name, traits, baseCppClass> {
}
def LoDTensor : Infrt_Type<"LoDTensor"> {
let summary = "infrt lod tensor";
let description = [{lod_tensor<3x64x3x3xf32, 3>}];
let parameters = (ins
ArrayRefParameter<"int64_t">:$shape,
"mlir::Type":$elementType,
"int32_t":$lod_level
);
}
include "paddle/infrt/dialect/infrt/infrt_ops_base.td"
// Op definition
class Infrt_Op<string mnemonic, list<OpTrait> traits = []> : Op<Infrt_Dialect, mnemonic, traits> {
...
...
@@ -39,14 +9,11 @@ class Infrt_Op<string mnemonic, list<OpTrait> traits = []> : Op<Infrt_Dialect, m
// let parser = [{ return infrt::parse$cppClass(parser, result); }];
}
// def InfRT_KernelOp : Infrt_Op<"kernel", [NoSideEffect]> {
// let summary = "kernel op";
// let description = [{
// kernel op!
// }];
// let arguments = (ins StrAttr:$name, PD_Tensor:$X, PD_Tensor:$Y, DefaultValuedAttr<F32Attr, "1.0">:$Alpha, DefaultValuedAttr<F32Attr, "1.0">:$Beta);
//
// let results = (outs PD_Tensor:$Out);
// }
#endif // Infrt_OpS
def Infrt_KernelOp : Infrt_Op<"kernel", [NoSideEffect]> {
let summary = "kernel op";
let description = [{kernel op!}];
let arguments = (ins Variadic<AnyType>:$operands,
StrAttr:$name,
OptionalAttr<DictionaryAttr>:$attrs);
let results = (outs Variadic<AnyType>);
}
paddle/infrt/dialect/infrt/infrt_ops_base.td
0 → 100644
浏览文件 @
a6abb6e7
#ifndef INFRT_OPS_BASE
#define INFRT_OPS_BASE
include "mlir/IR/OpBase.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
def Infrt_Dialect : Dialect {
let summary =
"A dialect containing the Infrt Attributes, Operations, and Types";
let name = "infrt";
let cppNamespace = "::infrt";
}
// Type definitions
// Base class for Infrt dialect types.
class Infrt_Type<string name, list<Trait> traits = [],
string baseCppClass = "::mlir::Type">
: TypeDef<Infrt_Dialect, name, traits, baseCppClass> {
}
def LoDTensor : Infrt_Type<"LoDTensor"> {
let summary = "infrt lod tensor";
let description = [{lod_tensor<3x64x3x3xf32, 3>}];
let parameters = (ins
ArrayRefParameter<"int64_t">:$shape,
"mlir::Type":$elementType,
"int32_t":$lod_level
);
}
def DenseTensor : Infrt_Type<"DenseTensor"> {
let summary = "infrt dense tensor";
let description = [{dense_tensor<, 3>}];
let parameters = (ins
"TargetType":$target,
"PrecisionType":$precision,
"LayoutType":$layout
);
}
// Base class for infrt dialect attributes.
class Infrt_Attr<string name, list<Trait> traits = [],
string baseCppClass = "::mlir::Attribute">
: AttrDef<Infrt_Dialect, name, traits, baseCppClass> {
let mnemonic = ?;
}
#endif // INFRT_OPS_BASE
paddle/infrt/dialect/infrt_base.cc
浏览文件 @
a6abb6e7
...
...
@@ -27,7 +27,6 @@ void INFRTDialect::initialize() {
allowUnknownOperations
();
addTypes
<
infrt
::
dt
::
StringType
>
();
addTypes
<
infrt
::
dt
::
TensorType
>
();
addTypes
<
infrt
::
dt
::
TensorMapType
>
();
addOperations
<
...
...
@@ -43,51 +42,6 @@ void INFRTDialect::initialize() {
mlir
::
Type
INFRTDialect
::
parseType
(
mlir
::
DialectAsmParser
&
parser
)
const
{
llvm
::
StringRef
keyword
;
if
(
parser
.
parseKeyword
(
&
keyword
))
return
mlir
::
Type
();
// parse TensorType, for example: !infrt.tensor<X86, CUDA, F32>
if
(
keyword
==
"tensor"
)
{
llvm
::
StringRef
target
;
llvm
::
StringRef
layout
;
llvm
::
StringRef
precision
;
// parse "<"
if
(
parser
.
parseLess
())
return
mlir
::
Type
();
// parse target
if
(
parser
.
parseKeyword
(
&
target
))
return
mlir
::
Type
();
auto
targetType
=
infrt
::
dt
::
GetTargetType
(
target
);
if
(
!
targetType
)
{
parser
.
emitError
(
parser
.
getCurrentLocation
(),
"unknown target type: "
)
<<
target
;
return
mlir
::
Type
();
}
// parse ","
if
(
parser
.
parseComma
())
return
mlir
::
Type
();
// parse layout
if
(
parser
.
parseKeyword
(
&
layout
))
return
mlir
::
Type
();
auto
layoutType
=
infrt
::
dt
::
GetLayoutType
(
layout
);
if
(
!
layoutType
)
{
parser
.
emitError
(
parser
.
getCurrentLocation
(),
"unknown layout type: "
)
<<
layout
;
return
mlir
::
Type
();
}
// parse ","
if
(
parser
.
parseComma
())
return
mlir
::
Type
();
// parse precision
if
(
parser
.
parseKeyword
(
&
precision
))
return
mlir
::
Type
();
auto
precisionType
=
infrt
::
dt
::
GetPrecisionType
(
precision
);
if
(
!
precisionType
)
{
parser
.
emitError
(
parser
.
getCurrentLocation
(),
"unknown precision type: "
)
<<
precision
;
return
mlir
::
Type
();
}
// parse ">"
if
(
parser
.
parseGreater
())
return
mlir
::
Type
();
return
infrt
::
dt
::
TensorType
::
get
(
parser
.
getContext
(),
*
targetType
,
*
layoutType
,
*
precisionType
);
}
// parse TensorMapType, for example: !infrt.tensor_map
if
(
keyword
==
"tensor_map"
)
{
return
infrt
::
dt
::
TensorMapType
::
get
();
...
...
@@ -104,13 +58,6 @@ mlir::Type INFRTDialect::parseType(mlir::DialectAsmParser &parser) const {
void
INFRTDialect
::
printType
(
mlir
::
Type
type
,
mlir
::
DialectAsmPrinter
&
printer
)
const
{
// print TensorType, for example: !infrt.tensor<X86, CUDA, F32>
if
(
type
.
isa
<
infrt
::
dt
::
TensorType
>
())
{
auto
tensorType
=
type
.
cast
<
infrt
::
dt
::
TensorType
>
();
printer
<<
"tensor<"
<<
tensorType
.
target
()
<<
", "
<<
tensorType
.
layout
()
<<
", "
<<
tensorType
.
precision
()
<<
">"
;
return
;
}
// print TensorMapType, for example: !infrt.tensor_map
if
(
type
.
isa
<
infrt
::
dt
::
TensorMapType
>
())
{
printer
<<
"tensor_map"
;
...
...
paddle/infrt/dialect/infrt_base.h
浏览文件 @
a6abb6e7
...
...
@@ -43,7 +43,7 @@ class INFRTDialect : public mlir::Dialect {
friend
class
mlir
::
MLIRContext
;
public:
static
::
llvm
::
StringRef
getDialectNamespace
()
{
return
"
i
nfrt"
;
}
static
::
llvm
::
StringRef
getDialectNamespace
()
{
return
"
I
nfrt"
;
}
};
}
// namespace dialect
...
...
paddle/infrt/dialect/infrt_base.td
浏览文件 @
a6abb6e7
...
...
@@ -2,9 +2,10 @@
#define INFRT_BASE
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt/infrt_ops_base.td"
def INFRT_Dialect : Dialect {
let name = "
i
nfrt";
let name = "
I
nfrt";
let description = [{
The INFRT host dialect.
...
...
@@ -18,9 +19,6 @@ def StringType :
Type<CPred<"$_self.isa<::infrt::dt::StringType>()">, "!infrt.string type">,
BuildableType<"$_builder.getType<::infrt::dt::StringType>()">;
def TensorType :
Type<CPred<"$_self.isa<::infrt::dt::TensorType>()">, "!infrt.tensor type">;
def TensorMapType :
Type<CPred<"$_self.isa<::infrt::dt::TensorMapType>()">, "!infrt.tensor_map type">,
BuildableType<"$_builder.getType<::infrt::dt::TensorMapType>()">;
...
...
paddle/infrt/dialect/init_infrt_dialects.cc
浏览文件 @
a6abb6e7
...
...
@@ -21,8 +21,8 @@
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/pd_ops.h"
#include "paddle/infrt/dialect/p
ten/infrt_pten
_tensor.h"
#include "paddle/infrt/dialect/p
ten/pten
_base.h"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_tensor.h"
#include "paddle/infrt/dialect/p
hi/phi
_base.h"
#include "paddle/infrt/dialect/tensor_shape.h"
namespace
infrt
{
...
...
@@ -32,9 +32,9 @@ void registerCinnDialects(mlir::DialectRegistry ®istry) { // NOLINT
infrt
::
InfrtDialect
,
dt
::
DTDialect
,
mlir
::
pd
::
PaddleDialect
,
#ifdef INFRT_WITH_P
TEN
p
ten
::
PTEN
DenseTensorDialect
,
p
ten
::
PTEN
Dialect
#ifdef INFRT_WITH_P
HI
p
hi
::
PHI
DenseTensorDialect
,
p
hi
::
PHI
Dialect
#endif
>
();
}
...
...
paddle/infrt/dialect/mlir_loader_test.cc
浏览文件 @
a6abb6e7
...
...
@@ -32,13 +32,13 @@ TEST(MlirLoader, basic) {
auto
source
=
R"ROC(
func @main() -> f32 {
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%value = "
i
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%value = "
I
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
"
i
nfrt.print.f32"(%v0) : (f32) -> ()
"
I
nfrt.print.f32"(%v0) : (f32) -> ()
i
nfrt.return %value : f32
I
nfrt.return %value : f32
}
)ROC"
;
...
...
paddle/infrt/dialect/pd_op_base.td
浏览文件 @
a6abb6e7
...
...
@@ -6,7 +6,7 @@
include "mlir/IR/OpBase.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt/infrt_ops.td"
include "paddle/infrt/dialect/infrt/infrt_ops
_base
.td"
def PD_Dialect : Dialect {
let name = "pd";
...
...
paddle/infrt/dialect/phi/CMakeLists.txt
0 → 100644
浏览文件 @
a6abb6e7
if
(
NOT INFRT_WITH_PHI
)
return
()
endif
()
#mlir_tablegen_on(infrt_phi_base DIALECT phi)
add_mlir_dialect
(
infrt_phi_base phi
)
add_mlir_dialect
(
infrt_phi_tensor phi_dt
)
add_mlir_dialect
(
infrt_phi_kernel phi_kernel
)
#mlir_tablegen_on(infrt_phi_tensor)
gather_srcs
(
infrt_src SRCS
phi_base.cc infrt_phi_tensor.cc
infrt_phi_tensor.cc
)
add_subdirectory
(
pass
)
add_executable
(
phi-exec phi_exec.cc
)
target_link_libraries
(
phi-exec infrt
)
paddle/infrt/dialect/p
ten/infrt_pten
_base.td
→
paddle/infrt/dialect/p
hi/infrt_phi
_base.td
浏览文件 @
a6abb6e7
#ifndef P
TEN
_BASE
#define P
TEN
_BASE
#ifndef P
HI
_BASE
#define P
HI
_BASE
include "mlir/IR/OpBase.td"
def P
TEN
_Dialect : Dialect {
let name = "p
ten
";
def P
HI
_Dialect : Dialect {
let name = "p
hi
";
let description = [{
The P
TEN
host dialect.
The P
HI
host dialect.
}];
let cppNamespace = "::infrt::p
ten
";
let cppNamespace = "::infrt::p
hi
";
}
class AllocatorTypeOf<string place, list<Trait> traits=[]>:
TypeDef<P
TEN
_Dialect, place # "Allocator", traits> {
let summary = !strconcat("!p
ten
.allocator_", place, " type");
TypeDef<P
HI
_Dialect, place # "Allocator", traits> {
let summary = !strconcat("!p
hi
.allocator_", place, " type");
}
class ContextTypeOf<string place, list<Trait> traits=[]>:
TypeDef<P
TEN
_Dialect, place # "Context", traits> {
let summary = !strconcat("!p
ten
.context_", place, " type");
TypeDef<P
HI
_Dialect, place # "Context", traits> {
let summary = !strconcat("!p
hi
.context_", place, " type");
}
def CPU_Allocator : AllocatorTypeOf<"CPU">;
...
...
paddle/infrt/dialect/phi/infrt_phi_kernel.td
0 → 100644
浏览文件 @
a6abb6e7
#ifndef PHI_KERNEL
#define PHI_KERNEL
include "paddle/infrt/dialect/phi/infrt_phi_tensor.td"
def PHI_KernelDialect : Dialect {
let name = "phi_kernel";
let description = [{
The PHI Kernel dialect.
}];
let cppNamespace = "::infrt::phi";
}
// PHI Kernel related ops.
class PDT_Kernel<string mnemonic, list<OpTrait> traits = []> : Op<PHI_KernelDialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
}
def FakeKernelOp : PDT_Kernel<"phi.matmul.host.fp32"> {
let arguments = (ins CPU_Context:$dev_ctx, DenseTensor:$x, DenseTensor:$y, BoolAttr:$transpose_x, BoolAttr:$transpose_y);
let results = (outs DenseTensor:$output);
}
def PDCK_AbsOp : PDT_Kernel<"phi.abs.host.fp32"> {
let arguments = (ins CPU_Context:$dev_ctx, DenseTensor:$x);
let results = (outs DenseTensor:$output);
}
#endif
paddle/infrt/dialect/p
ten/infrt_pten
_tensor.cc
→
paddle/infrt/dialect/p
hi/infrt_phi
_tensor.cc
浏览文件 @
a6abb6e7
...
...
@@ -12,25 +12,25 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/p
ten/infrt_pten
_tensor.h"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_tensor.h"
#include <mlir/IR/BuiltinTypes.h>
#include "paddle/infrt/dialect/p
ten/infrt_pten
_tensorDialect.cpp.inc"
#include "paddle/infrt/dialect/p
ten/infrt_pten
_tensorTypes.cpp.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_tensorDialect.cpp.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_tensorTypes.cpp.inc"
namespace
infrt
{
namespace
p
ten
{
namespace
p
hi
{
void
P
TEN
DenseTensorDialect
::
initialize
()
{
void
P
HI
DenseTensorDialect
::
initialize
()
{
#define GET_OP_LIST
addOperations
<
#include "paddle/infrt/dialect/p
ten/infrt_pten
_tensor.cpp.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_tensor.cpp.inc"
>
();
}
}
// namespace p
ten
}
// namespace p
hi
}
// namespace infrt
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/p
ten/infrt_pten
_tensor.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/p
hi/infrt_phi
_tensor.cpp.inc" // NOLINT
paddle/infrt/dialect/p
ten/infrt_pten
_tensor.h
→
paddle/infrt/dialect/p
hi/infrt_phi
_tensor.h
浏览文件 @
a6abb6e7
...
...
@@ -29,11 +29,11 @@
#include <mlir/Interfaces/LoopLikeInterface.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/p
ten/infrt_pten
_tensorDialect.h.inc"
#include "paddle/infrt/dialect/p
ten/infrt_pten
_tensorTypes.h.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_tensorDialect.h.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_tensorTypes.h.inc"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/p
ten/pten
_base.h"
#include "paddle/infrt/dialect/p
hi/phi
_base.h"
// NOLINT
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/p
ten/infrt_pten
_tensor.h.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_tensor.h.inc"
paddle/infrt/dialect/p
ten/infrt_pten
_tensor.td
→
paddle/infrt/dialect/p
hi/infrt_phi
_tensor.td
浏览文件 @
a6abb6e7
#ifdef P
TEN
_TENSOR
#ifdef P
HI
_TENSOR
#else
#define P
TEN
_TENSOR
#define P
HI
_TENSOR
include "paddle/infrt/dialect/p
ten/infrt_pten
_base.td"
include "paddle/infrt/dialect/p
hi/infrt_phi
_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt_base.td"
def P
TEN
_DenseTensorDialect : Dialect {
let name = "p
ten
_dt";
def P
HI
_DenseTensorDialect : Dialect {
let name = "p
hi
_dt";
let description = [{
The P
TEN
DenseTensor dialect.
The P
HI
DenseTensor dialect.
}];
let cppNamespace = "::infrt::p
ten
";
let cppNamespace = "::infrt::p
hi
";
}
// P
TEN
DenseTensor related Op.
class PDT_Op<string mnemonic, list<OpTrait> traits = []> : Op<P
TEN
_DenseTensorDialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
// P
HI
DenseTensor related Op.
class PDT_Op<string mnemonic, list<OpTrait> traits = []> : Op<P
HI
_DenseTensorDialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
}
class CreateDenseTensorOp<string place, string dtype, string layout>
: PDT_Op<"create_dense_tensor." # place # "." # dtype # "." # layout, [NoSideEffect]> {
let arguments = (ins CPU_Allocator:$allocator, I64ArrayAttr:$dims, I64ArrayAttr:$lod);
let results = (outs
TensorType
:$output);
let results = (outs
DenseTensor
:$output);
}
class FillDenseTensorOp<Attr attr_type, string dtype> :
PDT_Op<"fill_dense_tensor." # dtype> {
let arguments = (ins
TensorType
:$input,
DenseTensor
:$input,
attr_type:$value
);
let results = (outs);
...
...
paddle/infrt/dialect/phi/pass/CMakeLists.txt
0 → 100644
浏览文件 @
a6abb6e7
core_gather_headers
()
gather_srcs
(
infrt_src SRCS
proto_arg_map_context.cc
phi_op_cvt_pass.cc
kernel_op_desc.cc
)
paddle/infrt/dialect/phi/pass/kernel_op_desc.cc
0 → 100644
浏览文件 @
a6abb6e7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/phi/pass/kernel_op_desc.h"
#include <glog/logging.h>
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/phi/core/kernel_registry.h"
namespace
infrt
{
phi
::
Backend
cvtTarget2Phi
(
TargetType
target
)
{
switch
(
target
)
{
case
TargetType
::
CPU
:
return
phi
::
Backend
::
CPU
;
case
TargetType
::
GPU
:
return
phi
::
Backend
::
GPU
;
default:
return
phi
::
Backend
::
UNDEFINED
;
}
}
TargetType
cvtTargetFromPhi
(
phi
::
Backend
backend
)
{
switch
(
backend
)
{
case
phi
::
Backend
::
CPU
:
return
TargetType
::
CPU
;
case
phi
::
Backend
::
GPU
:
return
TargetType
::
GPU
;
default:
return
TargetType
::
UNK
;
}
}
phi
::
DataType
cvtPrecision2Phi
(
PrecisionType
precision
)
{
switch
(
precision
)
{
case
PrecisionType
::
FLOAT32
:
return
phi
::
DataType
::
FLOAT32
;
break
;
case
PrecisionType
::
FLOAT16
:
return
phi
::
DataType
::
FLOAT16
;
default:
return
phi
::
DataType
::
UNDEFINED
;
}
}
PrecisionType
cvtPrecisionFromPhi
(
phi
::
DataType
datatype
)
{
switch
(
datatype
)
{
case
phi
::
DataType
::
FLOAT32
:
return
PrecisionType
::
FLOAT32
;
case
phi
::
DataType
::
FLOAT16
:
return
PrecisionType
::
FLOAT16
;
default:
return
PrecisionType
::
UNK
;
}
}
phi
::
DataLayout
cvtLayout2Phi
(
LayoutType
layout
)
{
switch
(
layout
)
{
case
LayoutType
::
NCHW
:
return
phi
::
DataLayout
::
NCHW
;
case
LayoutType
::
NHWC
:
return
phi
::
DataLayout
::
NHWC
;
default:
return
phi
::
DataLayout
::
UNDEFINED
;
}
}
LayoutType
cvtLayoutFromPhi
(
phi
::
DataLayout
layout
)
{
switch
(
layout
)
{
case
phi
::
DataLayout
::
NCHW
:
return
LayoutType
::
NCHW
;
case
phi
::
DataLayout
::
NHWC
:
return
LayoutType
::
NHWC
;
default:
return
LayoutType
::
UNK
;
}
}
phi
::
KernelKey
cvtPlace2Phi
(
const
Place
&
place
)
{
return
phi
::
KernelKey
(
cvtTarget2Phi
(
place
.
target
),
cvtLayout2Phi
(
place
.
layout
),
cvtPrecision2Phi
(
place
.
precision
));
}
Place
cvtPlaceFromPhi
(
phi
::
TensorArgDef
tensor_arg
)
{
return
Place
(
cvtTargetFromPhi
(
tensor_arg
.
backend
),
cvtPrecisionFromPhi
(
tensor_arg
.
dtype
),
cvtLayoutFromPhi
(
tensor_arg
.
layout
));
}
std
::
vector
<
PhiKernelDesc
>
getCandidateKernels
(
std
::
string
name
,
const
std
::
vector
<
Place
>&
valid_palces
)
{
std
::
vector
<
PhiKernelDesc
>
candidate_kernels
;
PhiKernelDesc
phi_kernel_desc
;
phi
::
KernelKeyMap
kernel_key_map
=
phi
::
KernelFactory
::
Instance
().
SelectKernelMap
(
name
);
for
(
const
Place
&
place
:
valid_palces
)
{
phi
::
KernelKey
kernel_key
=
cvtPlace2Phi
(
place
);
if
(
kernel_key_map
.
find
(
kernel_key
)
==
kernel_key_map
.
end
())
{
kernel_key
=
phi
::
KernelKey
(
kernel_key
.
backend
(),
phi
::
DataLayout
::
ALL_LAYOUT
,
kernel_key
.
dtype
());
if
(
kernel_key_map
.
find
(
kernel_key
)
==
kernel_key_map
.
end
())
continue
;
}
phi_kernel_desc
.
kernelType
=
place
;
phi_kernel_desc
.
inputsType
.
clear
();
phi_kernel_desc
.
outputsType
.
clear
();
phi
::
KernelArgsDef
args_def
=
kernel_key_map
.
at
(
kernel_key
).
args_def
();
const
paddle
::
SmallVector
<
phi
::
TensorArgDef
>&
input_arg
=
args_def
.
input_defs
();
const
paddle
::
SmallVector
<
phi
::
TensorArgDef
>&
output_arg
=
args_def
.
output_defs
();
for
(
auto
tensor_arg
:
input_arg
)
{
phi_kernel_desc
.
inputsType
.
emplace_back
(
cvtPlaceFromPhi
(
tensor_arg
));
}
for
(
auto
tensor_arg
:
output_arg
)
{
phi_kernel_desc
.
outputsType
.
emplace_back
(
cvtPlaceFromPhi
(
tensor_arg
));
}
candidate_kernels
.
emplace_back
(
phi_kernel_desc
);
}
return
candidate_kernels
;
}
}
// namespace infrt
paddle/infrt/dialect/phi/pass/kernel_op_desc.h
0 → 100644
浏览文件 @
a6abb6e7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/infrt/dialect/infrt/common_type.h"
namespace
infrt
{
struct
PhiKernelDesc
{
std
::
vector
<
Place
>
inputsType
;
// kernel input place
std
::
vector
<
Place
>
outputsType
;
// kernel output place
Place
kernelType
;
// kernel place
};
std
::
vector
<
PhiKernelDesc
>
getCandidateKernels
(
std
::
string
name
,
const
std
::
vector
<
Place
>&
valid_palces
);
}
// namespace infrt
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.cc
0 → 100644
浏览文件 @
a6abb6e7
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h"
#include <glog/logging.h>
#include <llvm/ADT/SetVector.h>
#include <mlir/Analysis/SliceAnalysis.h>
#include <mlir/IR/Builders.h>
#include <list>
#include <unordered_set>
#include <vector>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/phi/pass/kernel_op_desc.h"
#include "paddle/infrt/dialect/phi/pass/proto_arg_map_context.h"
#include "paddle/phi/core/compat/op_utils.h"
#include "paddle/phi/ops/compat/signatures.h"
namespace
infrt
{
// Implementation of the phiOpCvtPass.
void
phiOpCvtPass
::
runOnFunction
()
{
convertStage
();
diapatchStage
();
}
void
phiOpCvtPass
::
convertStage
()
{
mlir
::
Block
&
body
=
getFunction
().
front
();
std
::
vector
<
mlir
::
Operation
*>
worklist
;
for
(
auto
&
op
:
body
.
without_terminator
())
{
worklist
.
push_back
(
&
op
);
}
mlir
::
OpBuilder
builder
(
&
body
,
body
.
begin
());
while
(
!
worklist
.
empty
())
{
auto
*
op
=
worklist
.
back
();
worklist
.
pop_back
();
if
(
op
==
nullptr
)
continue
;
std
::
string
op_name
=
op
->
getName
().
getIdentifier
().
str
();
// only convert op in pd dialect.
if
(
op_name
.
substr
(
0
,
3
)
!=
"pd."
)
continue
;
op_name
=
op_name
.
substr
(
3
);
if
(
pd_dialect_inputs_info_map_
.
find
(
op_name
)
==
pd_dialect_inputs_info_map_
.
end
()
||
pd_dialect_outputs_info_map_
.
find
(
op_name
)
==
pd_dialect_outputs_info_map_
.
end
())
{
// Todo: print log
continue
;
}
phi
::
KernelSignature
kernel_sign
=
phi
::
OpUtilsMap
::
Instance
().
GetArgumentMappingFn
(
op_name
)(
ProtoArgumentMappingContext
(
op
));
// resort input&output according to kernel_sign
::
llvm
::
SmallVector
<
mlir
::
Value
,
4
>
inputs
,
ori_output
;
::
llvm
::
SmallVector
<
mlir
::
Type
,
4
>
output_types
;
for
(
const
std
::
string
&
str
:
std
::
get
<
0
>
(
kernel_sign
.
args
))
{
if
(
pd_dialect_inputs_info_map_
.
at
(
op_name
).
count
(
str
)
==
0
)
{
// Todo: print error log
return
;
}
uint8_t
index
=
pd_dialect_inputs_info_map_
.
at
(
op_name
).
at
(
str
);
inputs
.
push_back
(
op
->
getOperands
()[
index
]);
}
for
(
const
std
::
string
&
str
:
std
::
get
<
2
>
(
kernel_sign
.
args
))
{
if
(
pd_dialect_outputs_info_map_
.
at
(
op_name
).
count
(
str
)
==
0
)
{
// Todo: print error log
return
;
}
uint8_t
index
=
pd_dialect_outputs_info_map_
.
at
(
op_name
).
at
(
str
);
output_types
.
push_back
(
op
->
getResultTypes
()[
index
]);
ori_output
.
push_back
(
op
->
getResult
(
index
));
}
auto
loc
=
getFunction
().
getLoc
();
builder
.
setInsertionPoint
(
op
);
auto
kernel_op
=
builder
.
create
<
infrt
::
KernelOp
>
(
loc
,
output_types
,
inputs
,
kernel_sign
.
name
,
op
->
getAttrDictionary
());
for
(
size_t
index
=
0
;
index
<
ori_output
.
size
();
++
index
)
{
ori_output
[
index
].
replaceAllUsesWith
(
kernel_op
.
getResult
(
index
));
}
if
(
!
op
->
use_empty
())
{
// Todo: print error log
return
;
}
op
->
erase
();
}
}
void
phiOpCvtPass
::
diapatchStage
()
{
std
::
vector
<
infrt
::
KernelOp
>
worklist
;
mlir
::
Block
&
block
=
getFunction
().
front
();
for
(
auto
&
op
:
block
)
{
infrt
::
KernelOp
kernel_op
=
::
llvm
::
dyn_cast_or_null
<
infrt
::
KernelOp
>
(
&
op
);
if
(
nullptr
!=
kernel_op
)
worklist
.
push_back
(
kernel_op
);
}
// ToDo: implementation in the next PR
while
(
!
worklist
.
empty
())
{
// infrt::KernelOp kernel_op = worklist.back();
worklist
.
pop_back
();
// std::string kernel_name = kernel_op.name().str();
// std::vector<PhiKernelDesc> candidates =
// getCandidateKernels(kernel_name, valid_places_);
}
}
}
// namespace infrt
paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h
0 → 100644
浏览文件 @
a6abb6e7
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt/common_type.h"
namespace
infrt
{
/*
* phiOpCvtPass.
*
* Convert the general operators in pd Dialect to a infrt.kernelOp.
*
* source func:
*
* func @main() -> tensor<?xf32> {
* %a = "pd.feed"()...
* %c = "pd.conv2d"(%a) ...
* %d = "pd.conv3d"(%c) ...
* %f = "pd.conv2d"(%a) ...
* "pd.fetch" (%d, %f)
* }
*
* destination func:
* func @main() -> tensor<?xf32> {
* %a = "pd.feed"()...
* %c = "infrt.kernel"(%a){name = "conv2d"} ...
* %d = "infrt.kernel"(%c){name = "conv3d"}...
* %f = "infrt.kernel"(%a){name = "conv2d"}...
* "pd.fetch" (%d, %f)
* }
*/
class
phiOpCvtPass
:
public
mlir
::
PassWrapper
<
phiOpCvtPass
,
mlir
::
FunctionPass
>
{
public:
::
llvm
::
StringRef
getName
()
const
override
{
return
"phiOpCvtPass"
;
}
void
runOnFunction
()
override
;
explicit
phiOpCvtPass
(
std
::
vector
<
Place
>
valid_places
=
std
::
vector
<
Place
>
())
:
valid_places_
(
valid_places
)
{}
private:
void
convertStage
();
void
diapatchStage
();
std
::
vector
<
Place
>
valid_places_
;
};
}
// namespace infrt
paddle/infrt/dialect/phi/pass/proto_arg_map_context.cc
0 → 100644
浏览文件 @
a6abb6e7
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/phi/pass/proto_arg_map_context.h"
namespace
infrt
{
bool
ProtoArgumentMappingContext
::
HasInput
(
const
std
::
string
&
name
)
const
{
if
(
input_map_
.
find
(
name
)
==
input_map_
.
end
())
{
return
false
;
}
uint8_t
index
=
input_map_
.
at
(
name
);
return
static_cast
<
bool
>
(
op_
->
getOperand
(
index
));
}
bool
ProtoArgumentMappingContext
::
HasOutput
(
const
std
::
string
&
name
)
const
{
if
(
output_map_
.
find
(
name
)
==
output_map_
.
end
())
{
return
false
;
}
return
true
;
}
bool
ProtoArgumentMappingContext
::
HasAttr
(
const
std
::
string
&
name
)
const
{
return
op_
->
hasAttr
(
name
);
}
paddle
::
any
ProtoArgumentMappingContext
::
Attr
(
const
std
::
string
&
name
)
const
{
mlir
::
Attribute
attrs
=
op_
->
getAttr
(
name
);
if
(
mlir
::
StringAttr
str_attr
=
attrs
.
dyn_cast_or_null
<
mlir
::
StringAttr
>
())
{
return
paddle
::
any
(
str_attr
.
str
());
}
else
{
// ToDO: implementation in the ext PR.
return
paddle
::
any
(
0
);
}
}
size_t
ProtoArgumentMappingContext
::
InputSize
(
const
std
::
string
&
name
)
const
{
return
op_
->
getNumOperands
();
}
size_t
ProtoArgumentMappingContext
::
OutputSize
(
const
std
::
string
&
name
)
const
{
return
op_
->
getNumResults
();
}
bool
ProtoArgumentMappingContext
::
IsDenseTensorInput
(
const
std
::
string
&
name
)
const
{
return
true
;
}
bool
ProtoArgumentMappingContext
::
IsSelectedRowsInput
(
const
std
::
string
&
name
)
const
{
return
false
;
}
bool
ProtoArgumentMappingContext
::
IsDenseTensorOutput
(
const
std
::
string
&
name
)
const
{
return
true
;
}
bool
ProtoArgumentMappingContext
::
IsSelectedRowsOutput
(
const
std
::
string
&
name
)
const
{
return
false
;
}
}
// namespace infrt
paddle/infrt/dialect/phi/pass/proto_arg_map_context.h
0 → 100644
浏览文件 @
a6abb6e7
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <mlir/IR/Operation.h>
#include <unordered_map>
#include "paddle/infrt/dialect/pd_ops_info.h"
#include "paddle/phi/core/compat/arg_map_context.h"
namespace
infrt
{
class
ProtoArgumentMappingContext
:
public
phi
::
ArgumentMappingContext
{
public:
// only support op in pd dialect
explicit
ProtoArgumentMappingContext
(
mlir
::
Operation
*
op
)
:
op_
(
op
),
input_map_
(
pd_dialect_inputs_info_map_
.
at
(
op
->
getName
().
getIdentifier
().
str
().
substr
(
3
))),
output_map_
(
pd_dialect_outputs_info_map_
.
at
(
op
->
getName
().
getIdentifier
().
str
().
substr
(
3
)))
{}
bool
HasInput
(
const
std
::
string
&
name
)
const
override
;
bool
HasOutput
(
const
std
::
string
&
name
)
const
override
;
bool
HasAttr
(
const
std
::
string
&
name
)
const
override
;
// now we can't use Attribute here, it will cause phi relay on
// boost::variant and BlockDesc
paddle
::
any
Attr
(
const
std
::
string
&
name
)
const
override
;
size_t
InputSize
(
const
std
::
string
&
name
)
const
override
;
size_t
OutputSize
(
const
std
::
string
&
name
)
const
override
;
bool
IsDenseTensorInput
(
const
std
::
string
&
name
)
const
override
;
bool
IsSelectedRowsInput
(
const
std
::
string
&
name
)
const
override
;
bool
IsDenseTensorOutput
(
const
std
::
string
&
name
)
const
override
;
bool
IsSelectedRowsOutput
(
const
std
::
string
&
name
)
const
override
;
private:
mlir
::
Operation
*
op_
;
const
std
::
unordered_map
<
std
::
string
,
uint8_t
>&
input_map_
;
const
std
::
unordered_map
<
std
::
string
,
uint8_t
>&
output_map_
;
};
}
// namespace infrt
paddle/infrt/dialect/p
ten/pten
_base.cc
→
paddle/infrt/dialect/p
hi/phi
_base.cc
浏览文件 @
a6abb6e7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/p
ten/pten
_base.h"
#include "paddle/infrt/dialect/p
hi/phi
_base.h"
#include <mlir/IR/Builders.h>
#include <mlir/IR/Dialect.h>
...
...
@@ -21,14 +21,14 @@
#include <mlir/IR/TypeUtilities.h>
#include <mlir/IR/Types.h>
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/p
ten/infrt_pten
_base.cpp.inc"
#include "paddle/infrt/dialect/p
ten/infrt_pten
_baseDialect.cpp.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_base.cpp.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_baseDialect.cpp.inc"
namespace
infrt
{
namespace
p
ten
{
namespace
p
hi
{
void
P
TEN
Dialect
::
printType
(
::
mlir
::
Type
type
,
mlir
::
DialectAsmPrinter
&
os
)
const
{
void
P
HI
Dialect
::
printType
(
::
mlir
::
Type
type
,
mlir
::
DialectAsmPrinter
&
os
)
const
{
if
(
type
.
isa
<
CPUAllocatorType
>
())
{
os
<<
"CPU_Allocator"
;
return
;
...
...
@@ -48,18 +48,18 @@ void PTENDialect::printType(::mlir::Type type,
llvm_unreachable
(
"unexpected 'allocator/context' type kind"
);
}
void
P
TEN
Dialect
::
initialize
()
{
void
P
HI
Dialect
::
initialize
()
{
addOperations
<
#define GET_OP_LIST
#include "paddle/infrt/dialect/p
ten/infrt_pten
_base.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/p
hi/infrt_phi
_base.cpp.inc" // NOLINT
>
();
addTypes
<
#define GET_TYPEDEF_LIST
#include "paddle/infrt/dialect/p
ten/infrt_pten
_baseTypes.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/p
hi/infrt_phi
_baseTypes.cpp.inc" // NOLINT
>
();
}
mlir
::
Type
P
TEN
Dialect
::
parseType
(
mlir
::
DialectAsmParser
&
parser
)
const
{
mlir
::
Type
P
HI
Dialect
::
parseType
(
mlir
::
DialectAsmParser
&
parser
)
const
{
llvm
::
StringRef
keyword
;
if
(
parser
.
parseKeyword
(
&
keyword
))
return
mlir
::
Type
();
if
(
keyword
==
"CPU_allocator"
)
{
...
...
@@ -77,8 +77,8 @@ mlir::Type PTENDialect::parseType(mlir::DialectAsmParser& parser) const {
return
mlir
::
Type
();
}
}
// namespace p
ten
}
// namespace p
hi
}
// namespace infrt
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/p
ten/infrt_pten
_baseTypes.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/p
hi/infrt_phi
_baseTypes.cpp.inc" // NOLINT
paddle/infrt/dialect/p
ten/pten
_base.h
→
paddle/infrt/dialect/p
hi/phi
_base.h
浏览文件 @
a6abb6e7
...
...
@@ -19,12 +19,12 @@
#include <string>
#include "paddle/infrt/dialect/p
ten/infrt_pten
_base.h.inc"
#include "paddle/infrt/dialect/p
ten/infrt_pten
_baseDialect.h.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_base.h.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_baseDialect.h.inc"
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/p
ten/infrt_pten
_baseTypes.h.inc"
#include "paddle/infrt/dialect/p
hi/infrt_phi
_baseTypes.h.inc"
namespace
infrt
{
namespace
p
ten
{}
// namespace pten
namespace
p
hi
{}
// namespace phi
}
// namespace infrt
paddle/infrt/dialect/phi/phi_exec.cc
0 → 100644
浏览文件 @
a6abb6e7
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <llvm/Support/CommandLine.h>
#include <mlir/Pass/PassManager.h>
#include <iostream>
#include <string>
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/mlir_loader.h"
#include "paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h"
int
main
(
int
argc
,
char
**
argv
)
{
static
llvm
::
cl
::
opt
<
std
::
string
>
input_file
(
llvm
::
cl
::
Positional
,
llvm
::
cl
::
desc
(
"Specify input filename"
),
llvm
::
cl
::
init
(
"-"
));
llvm
::
cl
::
ParseCommandLineOptions
(
argc
,
argv
);
mlir
::
MLIRContext
*
context
=
infrt
::
Global
::
getMLIRContext
();
auto
module
=
infrt
::
dialect
::
LoadMlirFile
(
input_file
.
c_str
(),
context
);
module
->
dump
();
mlir
::
PassManager
pm
(
context
);
mlir
::
OpPassManager
&
phi_pass_manager
=
pm
.
nest
<
mlir
::
FuncOp
>
();
std
::
vector
<
infrt
::
Place
>
valid_places
=
{{
infrt
::
TargetType
::
CPU
,
infrt
::
PrecisionType
::
FLOAT32
,
infrt
::
LayoutType
::
NCHW
}};
phi_pass_manager
.
addPass
(
std
::
make_unique
<
infrt
::
phiOpCvtPass
>
(
valid_places
));
if
(
mlir
::
failed
(
pm
.
run
(
*
module
)))
{
std
::
cout
<<
"
\n
pass failed!
\n
"
<<
std
::
endl
;
return
4
;
}
module
->
dump
();
return
0
;
}
paddle/infrt/dialect/pten/CMakeLists.txt
已删除
100644 → 0
浏览文件 @
a710738e
if
(
NOT INFRT_WITH_PTEN
)
return
()
endif
()
#mlir_tablegen_on(infrt_pten_base DIALECT pten)
add_mlir_dialect
(
infrt_pten_base pten
)
add_mlir_dialect
(
infrt_pten_tensor pten_dt
)
add_mlir_dialect
(
infrt_pten_kernel pten_kernel
)
#mlir_tablegen_on(infrt_pten_tensor)
gather_srcs
(
infrt_src SRCS
pten_base.cc infrt_pten_tensor.cc
infrt_pten_tensor.cc
)
paddle/infrt/dialect/pten/infrt_pten_kernel.td
已删除
100644 → 0
浏览文件 @
a710738e
#ifndef PTEN_KERNEL
#define PTEN_KERNEL
include "paddle/infrt/dialect/pten/infrt_pten_tensor.td"
def PTEN_KernelDialect : Dialect {
let name = "pten_kernel";
let description = [{
The PTEN Kernel dialect.
}];
let cppNamespace = "::infrt::pten";
}
// PTEN Kernel related ops.
class PDT_Kernel<string mnemonic, list<OpTrait> traits = []> : Op<PTEN_KernelDialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
}
def FakeKernelOp : PDT_Kernel<"pten.matmul.host.fp32"> {
let arguments = (ins CPU_Context:$dev_ctx, TensorType:$x, TensorType:$y, BoolAttr:$transpose_x, BoolAttr:$transpose_y);
let results = (outs TensorType:$output);
}
#endif
paddle/infrt/dialect/test_kernels.cc
浏览文件 @
a6abb6e7
...
...
@@ -147,7 +147,7 @@ static mlir::LogicalResult verify(BenchmarkOp op) {
// Verify that the target benchmark region has exactly one return value.
auto
&
region
=
op
.
region
();
auto
&
last_op
=
region
.
front
().
back
();
if
(
last_op
.
getName
().
getStringRef
()
!=
"
i
nfrt.return"
)
{
if
(
last_op
.
getName
().
getStringRef
()
!=
"
I
nfrt.return"
)
{
return
op
.
emitOpError
(
"missing return statement"
);
}
if
(
last_op
.
getNumOperands
()
!=
1
)
{
...
...
paddle/infrt/dialect/test_kernels.td
浏览文件 @
a6abb6e7
...
...
@@ -45,7 +45,7 @@ def BenchmarkOp : Test_Op<"benchmark"> {
// The following code benchmarks the infrt.add.i32 kernel.
%x = infrt.add.i32 %c, %c
// The benchmarked function needs to return exactly one value.
i
nfrt.return %x : i32
I
nfrt.return %x : i32
}
}];
...
...
paddle/infrt/external_kernels/basic.mlir
浏览文件 @
a6abb6e7
// CHECK: basic
func @basic() -> f32 {
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%v2 = "external.add.f32"(%v0, %v1) : (f32, f32) -> f32
// CHECK: 1
...
...
@@ -17,5 +17,5 @@ func @basic() -> f32 {
// CHECK: 6
"external.print.f32"(%v3) : (f32) -> ()
i
nfrt.return %v3 : f32
I
nfrt.return %v3 : f32
}
paddle/infrt/external_kernels/fc.mlir
浏览文件 @
a6abb6e7
// CHECK-LABEL: @fc
func @fc(%input : !
i
nfrt.tensor<X86, NCHW, F32>,
%w : !
i
nfrt.tensor<X86, NCHW, F32>,
%bias : !
infrt.tensor<X86, NCHW, F32>) -> !i
nfrt.tensor<X86, NCHW, F32>
func @fc(%input : !
I
nfrt.tensor<X86, NCHW, F32>,
%w : !
I
nfrt.tensor<X86, NCHW, F32>,
%bias : !
Infrt.tensor<X86, NCHW, F32>) -> !I
nfrt.tensor<X86, NCHW, F32>
{
%out = dt.create_uninit_tensor.f32 [30, 50] -> !
i
nfrt.tensor<X86, NCHW, F32>
// dt.fill_tensor_with_constant.f32 (%out : !
i
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
%out = dt.create_uninit_tensor.f32 [30, 50] -> !
I
nfrt.tensor<X86, NCHW, F32>
// dt.fill_tensor_with_constant.f32 (%out : !
I
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
// fc1
"external.matmul"(%input, %w, %out) {}: (!
infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.sigmoid"(%out, %out) {}: (!
infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.matmul"(%input, %w, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.sigmoid"(%out, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
// fc2
"external.matmul"(%out, %w, %out) {}: (!
infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.sigmoid"(%out, %out) {}: (!
infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.matmul"(%out, %w, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.sigmoid"(%out, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
infrt.return %out : !i
nfrt.tensor<X86, NCHW, F32>
Infrt.return %out : !I
nfrt.tensor<X86, NCHW, F32>
}
// CHECK-LABEL: @benchmark
func @benchmark() {
%input = dt.create_uninit_tensor.f32 [30, 50] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%input : !
i
nfrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
%input = dt.create_uninit_tensor.f32 [30, 50] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%input : !
I
nfrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
%w = dt.create_uninit_tensor.f32 [50, 50] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%w : !
i
nfrt.tensor<X86, NCHW, F32>) {value=2.0:f32}
%w = dt.create_uninit_tensor.f32 [50, 50] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%w : !
I
nfrt.tensor<X86, NCHW, F32>) {value=2.0:f32}
%bias = dt.create_uninit_tensor.f32 [30, 50] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%bias : !
i
nfrt.tensor<X86, NCHW, F32>) {value=3.0:f32}
%bias = dt.create_uninit_tensor.f32 [30, 50] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%bias : !
I
nfrt.tensor<X86, NCHW, F32>) {value=3.0:f32}
i
nfrt.benchmark "add.f32"(
%input:!
i
nfrt.tensor<X86, NCHW, F32>,
%w:!
i
nfrt.tensor<X86, NCHW, F32>,
%bias:!
i
nfrt.tensor<X86, NCHW, F32>)
I
nfrt.benchmark "add.f32"(
%input:!
I
nfrt.tensor<X86, NCHW, F32>,
%w:!
I
nfrt.tensor<X86, NCHW, F32>,
%bias:!
I
nfrt.tensor<X86, NCHW, F32>)
duration_secs = 100, max_count = 300000, num_warmup_runs = 3
{
%res =
infrt.call @fc(%input, %w, %bias) : (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>) -> (!i
nfrt.tensor<X86, NCHW, F32>)
infrt.return %res : !i
nfrt.tensor<X86, NCHW, F32>
%res =
Infrt.call @fc(%input, %w, %bias) : (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> (!I
nfrt.tensor<X86, NCHW, F32>)
Infrt.return %res : !I
nfrt.tensor<X86, NCHW, F32>
}
i
nfrt.return
I
nfrt.return
}
paddle/infrt/external_kernels/paddle.mlir
浏览文件 @
a6abb6e7
// CHECK: paddle_func
func @paddle_func() -> () {
%input = dt.create_uninit_tensor.f32 [3, 5] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%input : !
i
nfrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
%input = dt.create_uninit_tensor.f32 [3, 5] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%input : !
I
nfrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
%w = dt.create_uninit_tensor.f32 [5, 4] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%w : !
i
nfrt.tensor<X86, NCHW, F32>) {value=2.0:f32}
%w = dt.create_uninit_tensor.f32 [5, 4] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%w : !
I
nfrt.tensor<X86, NCHW, F32>) {value=2.0:f32}
%bias = dt.create_uninit_tensor.f32 [4] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%bias : !
i
nfrt.tensor<X86, NCHW, F32>) {value=3.0:f32}
%bias = dt.create_uninit_tensor.f32 [4] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%bias : !
I
nfrt.tensor<X86, NCHW, F32>) {value=3.0:f32}
%out = dt.create_uninit_tensor.f32 [3, 4] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%out : !
i
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
%out = dt.create_uninit_tensor.f32 [3, 4] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%out : !
I
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
"external.fc2"(%input, %w, %bias, %out) {in_num_col_dims=3:i32, test_attr=5:i32}: (!
infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.fc2"(%input, %w, %bias, %out) {in_num_col_dims=3:i32, test_attr=5:i32}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
// CHECK-LABEL: tensor: shape=shape[3,5], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%input : !
i
nfrt.tensor<X86, NCHW, F32>)
dt.print_tensor (%input : !
I
nfrt.tensor<X86, NCHW, F32>)
// CHECK-LABEL: tensor: shape=shape[5,4], values=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
dt.print_tensor (%w : !
i
nfrt.tensor<X86, NCHW, F32>)
dt.print_tensor (%bias : !
i
nfrt.tensor<X86, NCHW, F32>)
dt.print_tensor (%out : !
i
nfrt.tensor<X86, NCHW, F32>)
dt.print_tensor (%w : !
I
nfrt.tensor<X86, NCHW, F32>)
dt.print_tensor (%bias : !
I
nfrt.tensor<X86, NCHW, F32>)
dt.print_tensor (%out : !
I
nfrt.tensor<X86, NCHW, F32>)
// test external.matmul
%out1 = dt.create_uninit_tensor.f32 [3, 4] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%out1 : !
i
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
"external.matmul"(%input, %w, %out1) {}: (!
infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
dt.print_tensor (%out1 : !
i
nfrt.tensor<X86, NCHW, F32>)
%out1 = dt.create_uninit_tensor.f32 [3, 4] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%out1 : !
I
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
"external.matmul"(%input, %w, %out1) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
dt.print_tensor (%out1 : !
I
nfrt.tensor<X86, NCHW, F32>)
// test external.elementwise_add
%out2 = dt.create_uninit_tensor.f32 [3, 4] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%out2 : !
i
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
%bias1 = dt.create_uninit_tensor.f32 [3, 4] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%bias1 : !
i
nfrt.tensor<X86, NCHW, F32>) {value=3.0:f32}
"external.elementwise_add"(%out1, %bias1, %out2) {axis=-1}: (!
infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
dt.print_tensor (%out2 : !
i
nfrt.tensor<X86, NCHW, F32>)
%out2 = dt.create_uninit_tensor.f32 [3, 4] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%out2 : !
I
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
%bias1 = dt.create_uninit_tensor.f32 [3, 4] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%bias1 : !
I
nfrt.tensor<X86, NCHW, F32>) {value=3.0:f32}
"external.elementwise_add"(%out1, %bias1, %out2) {axis=-1}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
dt.print_tensor (%out2 : !
I
nfrt.tensor<X86, NCHW, F32>)
// test external.relu
%out3 = dt.create_uninit_tensor.f32 [3, 4] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%out3 : !
i
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
"external.relu"(%out1, %out3) {}: (!
infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
dt.print_tensor (%out3 : !
i
nfrt.tensor<X86, NCHW, F32>)
%out3 = dt.create_uninit_tensor.f32 [3, 4] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%out3 : !
I
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
"external.relu"(%out1, %out3) {}: (!
Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
dt.print_tensor (%out3 : !
I
nfrt.tensor<X86, NCHW, F32>)
// test external.sigmoid
%out4 = dt.create_uninit_tensor.f32 [3, 4] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%out4 : !
i
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
"external.sigmoid"(%out1, %out4) {}: (!
infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
dt.print_tensor (%out4 : !
i
nfrt.tensor<X86, NCHW, F32>)
%out4 = dt.create_uninit_tensor.f32 [3, 4] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%out4 : !
I
nfrt.tensor<X86, NCHW, F32>) {value=0.0:f32}
"external.sigmoid"(%out1, %out4) {}: (!
Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
dt.print_tensor (%out4 : !
I
nfrt.tensor<X86, NCHW, F32>)
i
nfrt.return
I
nfrt.return
}
paddle/infrt/host_context/mlir_exec.cc
浏览文件 @
a6abb6e7
...
...
@@ -28,8 +28,8 @@
#include "paddle/infrt/kernel/tensor_kernels.h"
#include "paddle/infrt/kernel/tensor_shape_kernels.h"
#include "paddle/infrt/kernel/test_kernels.h"
#ifdef INFRT_WITH_P
TEN
#include "paddle/infrt/kernel/p
ten
/registry.h"
#ifdef INFRT_WITH_P
HI
#include "paddle/infrt/kernel/p
hi
/registry.h"
#endif
static
llvm
::
cl
::
list
<
std
::
string
>
cl_shared_libs
(
// NOLINT
...
...
@@ -56,8 +56,8 @@ int main(int argc, char** argv) {
kernel
::
RegisterTensorShapeKernels
(
&
registry
);
kernel
::
RegisterTensorKernels
(
&
registry
);
kernel
::
RegisterControlFlowKernels
(
&
registry
);
#ifdef INFRT_WITH_P
TEN
kernel
::
RegisterP
ten
Kernels
(
&
registry
);
#ifdef INFRT_WITH_P
HI
kernel
::
RegisterP
hi
Kernels
(
&
registry
);
#endif
// load extra shared library
...
...
paddle/infrt/host_context/mlir_tests/basic.mlir
浏览文件 @
a6abb6e7
// CHECK-LABEL: basic
func @basic() -> f32 {
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%v2 = "
i
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%v2 = "
I
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
// CHECK: 1
"
i
nfrt.print.f32"(%v0) : (f32) -> ()
"
I
nfrt.print.f32"(%v0) : (f32) -> ()
// CHECK: 2
"
i
nfrt.print.f32"(%v1) : (f32) -> ()
"
I
nfrt.print.f32"(%v1) : (f32) -> ()
// CHECK: 3
"
i
nfrt.print.f32"(%v2) : (f32) -> ()
"
I
nfrt.print.f32"(%v2) : (f32) -> ()
%v3 = "
i
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
%v3 = "
I
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
// CHECK: 6
"
i
nfrt.print.f32"(%v3) : (f32) -> ()
"
I
nfrt.print.f32"(%v3) : (f32) -> ()
i
nfrt.return %v3 : f32
I
nfrt.return %v3 : f32
}
// CHECK-LABEL: basic1
// Check the mlir executor can work with more than one function in a file.
func @basic1() -> () {
%v0 =
i
nfrt.constant.f32 1.0
"
i
nfrt.print.f32"(%v0) : (f32) -> ()
%v0 =
I
nfrt.constant.f32 1.0
"
I
nfrt.print.f32"(%v0) : (f32) -> ()
// CHECK: 1
i
nfrt.return
I
nfrt.return
}
\ No newline at end of file
paddle/infrt/host_context/mlir_tests/dense_tensor.mlir
浏览文件 @
a6abb6e7
// CHECK-LABEL: build_tensor1
func @build_tensor1() {
%a = dt.create_uninit_tensor.f32 [3, 4] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%a : !
i
nfrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
%a = dt.create_uninit_tensor.f32 [3, 4] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%a : !
I
nfrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
// CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%a : !
i
nfrt.tensor<X86, NCHW, F32>)
dt.print_tensor (%a : !
I
nfrt.tensor<X86, NCHW, F32>)
i
nfrt.return
I
nfrt.return
}
paddle/infrt/host_context/mlir_tests/shape.mlir
浏览文件 @
a6abb6e7
...
...
@@ -3,5 +3,5 @@ func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92]
ts.print_shape %a
i
nfrt.return
I
nfrt.return
}
\ No newline at end of file
paddle/infrt/host_context/mlir_to_runtime_translate.cc
浏览文件 @
a6abb6e7
...
...
@@ -74,7 +74,7 @@ struct MlirToRuntimeTranslator::Impl {
};
bool
MlirToRuntimeTranslator
::
EmitConstantOp
(
mlir
::
Operation
*
op
)
{
if
(
!
infrt
::
Startswith
(
op
->
getName
().
getStringRef
().
str
(),
"
i
nfrt.constant"
))
if
(
!
infrt
::
Startswith
(
op
->
getName
().
getStringRef
().
str
(),
"
I
nfrt.constant"
))
return
false
;
VLOG
(
3
)
<<
"Emitting constant op ["
<<
op
->
getName
().
getStringRef
().
str
()
<<
"]"
;
...
...
@@ -224,7 +224,7 @@ boost::optional<std::vector<double>> MlirToRuntimeTranslator::EmitAttribute(
}
static
bool
IsReturn
(
mlir
::
Operation
*
op
)
{
return
op
->
getName
().
getStringRef
()
==
"
i
nfrt.return"
;
return
op
->
getName
().
getStringRef
()
==
"
I
nfrt.return"
;
}
bool
MlirToRuntimeTranslator
::
EmitGeneralOp
(
mlir
::
Operation
*
op
)
{
...
...
@@ -345,7 +345,7 @@ bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) {
bool
MlirToRuntimeTranslator
::
EmitReturnOp
(
mlir
::
Operation
*
op
,
llvm
::
SmallVectorImpl
<
mlir
::
Value
>*
results
)
{
CHECK
(
results
);
if
(
op
->
getName
().
getStringRef
()
==
"
i
nfrt.return"
)
{
if
(
op
->
getName
().
getStringRef
()
==
"
I
nfrt.return"
)
{
for
(
size_t
i
=
0
;
i
<
op
->
getNumOperands
();
i
++
)
{
results
->
push_back
(
op
->
getOperand
(
i
));
}
...
...
@@ -418,7 +418,7 @@ bool MlirToRuntimeTranslator::EmitCallOp(mlir::Operation* op,
function_defs_t
*
function_table
)
{
CHECK
(
op
);
CHECK
(
function_table
);
if
(
op
->
getName
().
getStringRef
()
!=
"
i
nfrt.call"
)
return
false
;
if
(
op
->
getName
().
getStringRef
()
!=
"
I
nfrt.call"
)
return
false
;
impl_
->
cur_op
=
impl_
->
runtime
->
NewOpExecutable
(
op
->
getName
().
getStringRef
().
str
());
...
...
paddle/infrt/host_context/mlir_to_runtime_translate.h
浏览文件 @
a6abb6e7
...
...
@@ -57,7 +57,7 @@ class MlirToRuntimeTranslator {
protected:
//! Emit a "infrt.constant.*" operation, return true if succeed.
bool
EmitConstantOp
(
mlir
::
Operation
*
op
);
//! Emit a "
i
nfrt.return" operation.
//! Emit a "
I
nfrt.return" operation.
bool
EmitReturnOp
(
mlir
::
Operation
*
op
,
llvm
::
SmallVectorImpl
<
mlir
::
Value
>*
results
);
//! Emit a "ts.build_shape" operation.
...
...
paddle/infrt/host_context/mlir_to_runtime_translate_test.cc
浏览文件 @
a6abb6e7
...
...
@@ -37,14 +37,14 @@ TEST(MlirToRuntimeTranslate, basic) {
auto
source
=
R"ROC(
func @main() -> () {
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%v2 = "
i
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v3 = "
i
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%v2 = "
I
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v3 = "
I
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
"
i
nfrt.print.f32"(%v1) : (f32) -> ()
"
I
nfrt.print.f32"(%v1) : (f32) -> ()
i
nfrt.return
I
nfrt.return
}
)ROC"
;
...
...
@@ -63,14 +63,14 @@ TEST(TestMlir, basic) {
auto
source
=
R"ROC(
func @main() -> () {
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%v2 = "
i
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v3 = "
i
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%v2 = "
I
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v3 = "
I
nfrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
"
i
nfrt.print.f32"(%v1) : (f32) -> ()
"
I
nfrt.print.f32"(%v1) : (f32) -> ()
i
nfrt.return
I
nfrt.return
}
)ROC"
;
...
...
@@ -88,18 +88,20 @@ TEST(TestMlir, shadow_copy_tensor_profile) {
mlir
::
MLIRContext
*
context
=
infrt
::
Global
::
getMLIRContext
();
auto
head
=
R"ROC(
func @predict(%a: !infrt.
tensor<X86, NCHW, F32>, %b: !infrt.tensor<X86, NCHW, F32>) -> (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32
>) {
func @predict(%a: !infrt.
dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) {
)ROC"
;
auto
tpl0
=
"%a{0} = dt.shallow_copy_tensor %a : !infrt.tensor<X86, NCHW, F32> -> "
"!infrt.tensor<X86, NCHW, F32>"
;
"%a{0} = dt.shallow_copy_tensor %a : !infrt.dense_tensor<CPU, FP32, "
"NCHW> -> "
"!infrt.dense_tensor<CPU, FP32, NCHW>"
;
auto
tpl1
=
"%b{0} = dt.shallow_copy_tensor %b : !infrt.tensor<X86, NCHW, F32> -> "
"!infrt.tensor<X86, NCHW, F32>"
;
"%b{0} = dt.shallow_copy_tensor %b : !infrt.dense_tensor<CPU, FP32, "
"NCHW> -> "
"!infrt.dense_tensor<CPU, FP32, NCHW>"
;
auto
end
=
R"ROC(
infrt.return %a0, %b0: !infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32
>
Infrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>
}
)ROC"
;
...
...
paddle/infrt/host_context/value.cc
浏览文件 @
a6abb6e7
...
...
@@ -24,7 +24,7 @@ ValueRef::ValueRef(int64_t val) : Shared<Value>(new Value(val)) {}
ValueRef
::
ValueRef
(
float
val
)
:
Shared
<
Value
>
(
new
Value
(
val
))
{}
ValueRef
::
ValueRef
(
double
val
)
:
Shared
<
Value
>
(
new
Value
(
val
))
{}
ValueRef
::
ValueRef
(
bool
val
)
:
Shared
<
Value
>
(
new
Value
(
val
))
{}
ValueRef
::
ValueRef
(
backends
::
CpuP
ten
Context
&&
val
)
ValueRef
::
ValueRef
(
backends
::
CpuP
hi
Context
&&
val
)
:
Shared
<
Value
>
(
new
Value
(
std
::
move
(
val
)))
{}
ValueRef
::
ValueRef
(
::
phi
::
CPUContext
&&
val
)
:
Shared
<
Value
>
(
new
Value
(
std
::
move
(
val
)))
{}
...
...
paddle/infrt/host_context/value.h
浏览文件 @
a6abb6e7
...
...
@@ -29,9 +29,9 @@
#include "paddle/infrt/tensor/tensor_map.h"
#include "paddle/infrt/tensor/tensor_shape.h"
#ifdef INFRT_WITH_P
TEN
#include "paddle/infrt/backends/host/p
ten
_allocator.h"
#include "paddle/infrt/backends/host/p
ten
_context.h"
#ifdef INFRT_WITH_P
HI
#include "paddle/infrt/backends/host/p
hi
_allocator.h"
#include "paddle/infrt/backends/host/p
hi
_context.h"
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
...
...
@@ -61,11 +61,11 @@ using ValueVariantType =
tensor
::
DenseHostTensor
,
MlirFunctionExecutable
*
,
tensor
::
TensorMap
,
#ifdef INFRT_WITH_P
TEN
#ifdef INFRT_WITH_P
HI
::
phi
::
MetaTensor
,
::
phi
::
DenseTensor
,
backends
::
CpuP
ten
Allocator
,
backends
::
CpuP
ten
Context
,
backends
::
CpuP
hi
Allocator
,
backends
::
CpuP
hi
Context
,
::
phi
::
CPUContext
,
std
::
vector
<
phi
::
DenseTensor
>
,
paddle
::
experimental
::
ScalarBase
<
phi
::
DenseTensor
>
,
...
...
@@ -108,12 +108,12 @@ class Value : public common::Object {
explicit
Value
(
tensor
::
TensorShape
&&
x
)
:
data
(
std
::
move
(
x
))
{}
explicit
Value
(
tensor
::
DenseHostTensor
&&
x
)
:
data
(
std
::
move
(
x
))
{}
explicit
Value
(
MlirFunctionExecutable
*
x
)
:
data
(
x
)
{}
#ifdef INFRT_WITH_P
TEN
explicit
Value
(
backends
::
CpuP
ten
Context
&&
x
)
:
data
(
std
::
move
(
x
))
{}
#ifdef INFRT_WITH_P
HI
explicit
Value
(
backends
::
CpuP
hi
Context
&&
x
)
:
data
(
std
::
move
(
x
))
{}
explicit
Value
(
::
phi
::
CPUContext
&&
x
)
:
data
(
std
::
move
(
x
))
{}
explicit
Value
(
::
phi
::
DenseTensor
&&
x
)
:
data
(
std
::
move
(
x
))
{}
explicit
Value
(
::
phi
::
MetaTensor
&&
x
)
:
data
(
std
::
move
(
x
))
{}
explicit
Value
(
backends
::
CpuP
ten
Allocator
&&
x
)
:
data
(
std
::
move
(
x
))
{}
explicit
Value
(
backends
::
CpuP
hi
Allocator
&&
x
)
:
data
(
std
::
move
(
x
))
{}
#endif
template
<
typename
T
>
...
...
@@ -173,7 +173,7 @@ class ValueRef : common::Shared<Value> {
explicit
ValueRef
(
double
val
);
explicit
ValueRef
(
bool
val
);
explicit
ValueRef
(
::
phi
::
MetaTensor
&&
val
);
explicit
ValueRef
(
backends
::
CpuP
ten
Context
&&
x
);
explicit
ValueRef
(
backends
::
CpuP
hi
Context
&&
x
);
explicit
ValueRef
(
::
phi
::
CPUContext
&&
x
);
explicit
ValueRef
(
::
phi
::
DenseTensor
&&
x
);
...
...
paddle/infrt/kernel/CMakeLists.txt
浏览文件 @
a6abb6e7
add_subdirectory
(
p
ten
)
add_subdirectory
(
p
hi
)
core_gather_headers
()
gather_srcs
(
infrt_src SRCS
basic_kernels.cc
# p
ten
_kernels.cc
# p
hi
_kernels.cc
test_kernels.cc
tensor_shape_kernels.cc
tensor_kernels.cc
...
...
paddle/infrt/kernel/basic_kernels.cc
浏览文件 @
a6abb6e7
...
...
@@ -63,24 +63,24 @@ static void PrintString(const std::string &str) {
void
RegisterBasicKernels
(
host_context
::
KernelRegistry
*
registry
)
{
RegisterIntBasicKernels
(
registry
);
RegisterFloatBasicKernels
(
registry
);
registry
->
AddKernel
(
"
i
nfrt.get_string"
,
INFRT_KERNEL
(
GetString
));
registry
->
AddKernel
(
"
i
nfrt.print_string"
,
INFRT_KERNEL
(
PrintString
));
registry
->
AddKernel
(
"
I
nfrt.get_string"
,
INFRT_KERNEL
(
GetString
));
registry
->
AddKernel
(
"
I
nfrt.print_string"
,
INFRT_KERNEL
(
PrintString
));
}
void
RegisterIntBasicKernels
(
host_context
::
KernelRegistry
*
registry
)
{
registry
->
AddKernel
(
"
i
nfrt.add.i32"
,
INFRT_KERNEL
(
add
<
int32_t
>
));
registry
->
AddKernel
(
"
i
nfrt.sub.i32"
,
INFRT_KERNEL
(
sub
<
int32_t
>
));
registry
->
AddKernel
(
"
i
nfrt.mul.i32"
,
INFRT_KERNEL
(
mul
<
int32_t
>
));
registry
->
AddKernel
(
"
i
nfrt.div.i32"
,
INFRT_KERNEL
(
div
<
int32_t
>
));
registry
->
AddKernel
(
"
i
nfrt.print.i32"
,
INFRT_KERNEL
(
print
<
int32_t
>
));
registry
->
AddKernel
(
"
I
nfrt.add.i32"
,
INFRT_KERNEL
(
add
<
int32_t
>
));
registry
->
AddKernel
(
"
I
nfrt.sub.i32"
,
INFRT_KERNEL
(
sub
<
int32_t
>
));
registry
->
AddKernel
(
"
I
nfrt.mul.i32"
,
INFRT_KERNEL
(
mul
<
int32_t
>
));
registry
->
AddKernel
(
"
I
nfrt.div.i32"
,
INFRT_KERNEL
(
div
<
int32_t
>
));
registry
->
AddKernel
(
"
I
nfrt.print.i32"
,
INFRT_KERNEL
(
print
<
int32_t
>
));
}
void
RegisterFloatBasicKernels
(
host_context
::
KernelRegistry
*
registry
)
{
registry
->
AddKernel
(
"
i
nfrt.add.f32"
,
INFRT_KERNEL
(
add
<
float
>
));
registry
->
AddKernel
(
"
i
nfrt.sub.f32"
,
INFRT_KERNEL
(
sub
<
float
>
));
registry
->
AddKernel
(
"
i
nfrt.mul.f32"
,
INFRT_KERNEL
(
mul
<
float
>
));
registry
->
AddKernel
(
"
i
nfrt.div.f32"
,
INFRT_KERNEL
(
div
<
float
>
));
registry
->
AddKernel
(
"
i
nfrt.print.f32"
,
INFRT_KERNEL
(
print
<
float
>
));
registry
->
AddKernel
(
"
I
nfrt.add.f32"
,
INFRT_KERNEL
(
add
<
float
>
));
registry
->
AddKernel
(
"
I
nfrt.sub.f32"
,
INFRT_KERNEL
(
sub
<
float
>
));
registry
->
AddKernel
(
"
I
nfrt.mul.f32"
,
INFRT_KERNEL
(
mul
<
float
>
));
registry
->
AddKernel
(
"
I
nfrt.div.f32"
,
INFRT_KERNEL
(
div
<
float
>
));
registry
->
AddKernel
(
"
I
nfrt.print.f32"
,
INFRT_KERNEL
(
print
<
float
>
));
}
}
// namespace kernel
...
...
paddle/infrt/kernel/control_flow_kernels.cc
浏览文件 @
a6abb6e7
...
...
@@ -37,7 +37,7 @@ static void INFRTCall(
}
void
RegisterControlFlowKernels
(
host_context
::
KernelRegistry
*
registry
)
{
registry
->
AddKernel
(
"
i
nfrt.call"
,
INFRT_KERNEL
(
INFRTCall
));
registry
->
AddKernel
(
"
I
nfrt.call"
,
INFRT_KERNEL
(
INFRTCall
));
}
}
// namespace kernel
...
...
paddle/infrt/kernel/p
ten
/CMakeLists.txt
→
paddle/infrt/kernel/p
hi
/CMakeLists.txt
浏览文件 @
a6abb6e7
if
(
NOT INFRT_WITH_P
TEN
)
if
(
NOT INFRT_WITH_P
HI
)
return
()
endif
()
...
...
@@ -11,16 +11,16 @@ gather_srcs(infrt_src SRCS
allocator_kernels.cc
)
set
(
infrt_register_p
ten_kernels_gen_source_file
${
CMAKE_SOURCE_DIR
}
/paddle/infrt/kernel/pten
/infershaped/infershaped_kernel_launchers.cc
)
set
(
infrt_register_p
ten_kernels_gen_file
${
CMAKE_SOURCE_DIR
}
/tools/infrt/get_pten
_kernel_function.sh
)
set
(
infrt_register_p
hi_kernels_gen_source_file
${
CMAKE_SOURCE_DIR
}
/paddle/infrt/kernel/phi
/infershaped/infershaped_kernel_launchers.cc
)
set
(
infrt_register_p
hi_kernels_gen_file
${
CMAKE_SOURCE_DIR
}
/tools/infrt/get_phi
_kernel_function.sh
)
set
(
wrapped_infermeta_header_file
${
CMAKE_SOURCE_DIR
}
/paddle/phi/infermeta/generated.h
)
set
(
wrapped_infermeta_source_file
${
CMAKE_SOURCE_DIR
}
/paddle/phi/infermeta/generated.cc
)
add_custom_command
(
OUTPUT
${
infrt_register_p
ten
_kernels_gen_source_file
}
COMMAND sh
${
infrt_register_p
ten
_kernels_gen_file
}
OUTPUT
${
infrt_register_p
hi
_kernels_gen_source_file
}
COMMAND sh
${
infrt_register_p
hi
_kernels_gen_file
}
DEPENDS
${
wrapped_infermeta_header_file
}
${
wrapped_infermeta_source_file
}
COMMENT
"infrt generate
${
infrt_register_p
ten
_kernels_gen_source_file
}
"
COMMENT
"infrt generate
${
infrt_register_p
hi
_kernels_gen_source_file
}
"
VERBATIM
)
cc_library
(
infrt_naive SRCS infershaped/infershaped_kernel_launcher.cc
...
...
paddle/infrt/kernel/p
ten
/allocator_kernels.cc
→
paddle/infrt/kernel/p
hi
/allocator_kernels.cc
浏览文件 @
a6abb6e7
...
...
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/kernel/p
ten
/allocator_kernels.h"
#include "paddle/infrt/kernel/p
hi
/allocator_kernels.h"
namespace
infrt
{
namespace
kernel
{
namespace
p
ten
{
namespace
p
hi
{
backends
::
CpuP
ten
Allocator
CreateCpuAllocator
()
{
return
{};
}
backends
::
CpuP
hi
Allocator
CreateCpuAllocator
()
{
return
{};
}
}
// namespace p
ten
}
// namespace p
hi
}
// namespace kernel
}
// namespace infrt
paddle/infrt/kernel/p
ten
/allocator_kernels.h
→
paddle/infrt/kernel/p
hi
/allocator_kernels.h
浏览文件 @
a6abb6e7
...
...
@@ -14,15 +14,15 @@
#pragma once
#include "paddle/infrt/backends/host/p
ten
_allocator.h"
#include "paddle/infrt/backends/host/p
hi
_allocator.h"
#include "paddle/phi/core/dense_tensor.h"
namespace
infrt
{
namespace
kernel
{
namespace
p
ten
{
namespace
p
hi
{
backends
::
CpuP
ten
Allocator
CreateCpuAllocator
();
backends
::
CpuP
hi
Allocator
CreateCpuAllocator
();
}
// namespace p
ten
}
// namespace p
hi
}
// namespace kernel
}
// namespace infrt
paddle/infrt/kernel/p
ten
/context_kernels.cc
→
paddle/infrt/kernel/p
hi
/context_kernels.cc
浏览文件 @
a6abb6e7
...
...
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/kernel/p
ten
/context_kernels.h"
#include "paddle/infrt/kernel/p
hi
/context_kernels.h"
namespace
infrt
{
namespace
kernel
{
namespace
p
ten
{
namespace
p
hi
{
backends
::
CpuP
ten
Context
CreateCpuContext
()
{
return
{};
}
backends
::
CpuP
hi
Context
CreateCpuContext
()
{
return
{};
}
}
// namespace p
ten
}
// namespace p
hi
}
// namespace kernel
}
// namespace infrt
paddle/infrt/kernel/p
ten
/context_kernels.h
→
paddle/infrt/kernel/p
hi
/context_kernels.h
浏览文件 @
a6abb6e7
...
...
@@ -14,15 +14,15 @@
#pragma once
#include "paddle/infrt/backends/host/p
ten
_context.h"
#include "paddle/infrt/backends/host/p
hi
_context.h"
#include "paddle/phi/core/dense_tensor.h"
namespace
infrt
{
namespace
kernel
{
namespace
p
ten
{
namespace
p
hi
{
backends
::
CpuP
ten
Context
CreateCpuContext
();
backends
::
CpuP
hi
Context
CreateCpuContext
();
}
// namespace p
ten
}
// namespace p
hi
}
// namespace kernel
}
// namespace infrt
paddle/infrt/kernel/p
ten
/dense_tensor_kernels.cc
→
paddle/infrt/kernel/p
hi
/dense_tensor_kernels.cc
浏览文件 @
a6abb6e7
...
...
@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/kernel/p
ten
/dense_tensor_kernels.h"
#include "paddle/infrt/kernel/p
hi
/dense_tensor_kernels.h"
namespace
infrt
{
namespace
kernel
{
namespace
p
ten
{
namespace
p
hi
{
::
phi
::
DenseTensor
CreateDenseTensorCpuF32Nchw
(
backends
::
CpuP
ten
Allocator
*
allocator
,
backends
::
CpuP
hi
Allocator
*
allocator
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
dims
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
lod
)
{
return
::
phi
::
DenseTensor
(
allocator
,
...
...
@@ -32,6 +32,6 @@ namespace pten {
void
FillDenseTensorF32
(
::
phi
::
DenseTensor
*
dense_tensor
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
values
)
{}
}
// namespace p
ten
}
// namespace p
hi
}
// namespace kernel
}
// namespace infrt
paddle/infrt/kernel/p
ten
/dense_tensor_kernels.h
→
paddle/infrt/kernel/p
hi
/dense_tensor_kernels.h
浏览文件 @
a6abb6e7
...
...
@@ -14,22 +14,22 @@
#pragma once
#include "paddle/infrt/backends/host/p
ten
_allocator.h"
#include "paddle/infrt/backends/host/p
hi
_allocator.h"
#include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/phi/core/dense_tensor.h"
namespace
infrt
{
namespace
kernel
{
namespace
p
ten
{
namespace
p
hi
{
::
phi
::
DenseTensor
CreateDenseTensorCpuF32Nchw
(
backends
::
CpuP
ten
Allocator
*
allocator
,
backends
::
CpuP
hi
Allocator
*
allocator
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
dims
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
lod
);
void
FillDenseTensorF32
(
::
phi
::
DenseTensor
*
dense_tensor
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
values
);
}
// namespace p
ten
}
// namespace p
hi
}
// namespace kernel
}
// namespace infrt
paddle/infrt/kernel/p
ten
/infershaped/infershape_launchers_test.cc
→
paddle/infrt/kernel/p
hi
/infershaped/infershape_launchers_test.cc
浏览文件 @
a6abb6e7
...
...
@@ -14,9 +14,9 @@
#include <gtest/gtest.h>
#include "paddle/infrt/kernel/p
ten
/infershaped/infershaped_kernel_launcher.h"
#include "paddle/infrt/kernel/p
ten
/infershaped/infershaped_kernel_launchers.h"
#include "paddle/infrt/kernel/p
ten
/infershaped/infershaped_utils.h"
#include "paddle/infrt/kernel/p
hi
/infershaped/infershaped_kernel_launcher.h"
#include "paddle/infrt/kernel/p
hi
/infershaped/infershaped_kernel_launchers.h"
#include "paddle/infrt/kernel/p
hi
/infershaped/infershaped_utils.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/dense_tensor.h"
...
...
paddle/infrt/kernel/p
ten
/infershaped/infershaped_kernel_launcher.cc
→
paddle/infrt/kernel/p
hi
/infershaped/infershaped_kernel_launcher.cc
浏览文件 @
a6abb6e7
...
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/kernel/p
ten
/infershaped/infershaped_kernel_launcher.h"
#include "paddle/infrt/kernel/p
hi
/infershaped/infershaped_kernel_launcher.h"
#include "paddle/phi/core/dense_tensor.h"
namespace
infrt
{
...
...
paddle/infrt/kernel/p
ten
/infershaped/infershaped_kernel_launcher.h
→
paddle/infrt/kernel/p
hi
/infershaped/infershaped_kernel_launcher.h
浏览文件 @
a6abb6e7
文件已移动
paddle/infrt/kernel/p
ten
/infershaped/infershaped_kernel_launchers.h
→
paddle/infrt/kernel/p
hi
/infershaped/infershaped_kernel_launchers.h
浏览文件 @
a6abb6e7
文件已移动
paddle/infrt/kernel/p
ten
/infershaped/infershaped_utils.h
→
paddle/infrt/kernel/p
hi
/infershaped/infershaped_utils.h
浏览文件 @
a6abb6e7
文件已移动
paddle/infrt/kernel/p
ten/infershaped/pten
_kernel_launcher.h
→
paddle/infrt/kernel/p
hi/infershaped/phi
_kernel_launcher.h
浏览文件 @
a6abb6e7
...
...
@@ -16,8 +16,8 @@
#include <llvm/ADT/SmallVector.h>
#include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/infrt/kernel/p
ten
/infershaped/infershaped_kernel_launcher.h"
#include "paddle/infrt/kernel/p
ten
/infershaped/infershaped_utils.h"
#include "paddle/infrt/kernel/p
hi
/infershaped/infershaped_kernel_launcher.h"
#include "paddle/infrt/kernel/p
hi
/infershaped/infershaped_utils.h"
namespace
infrt
{
namespace
kernel
{
...
...
paddle/infrt/kernel/p
ten
/registry.cc
→
paddle/infrt/kernel/p
hi
/registry.cc
浏览文件 @
a6abb6e7
...
...
@@ -12,17 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/kernel/p
ten
/registry.h"
#include "paddle/infrt/kernel/p
hi
/registry.h"
#include <iostream>
#include <string>
#include "paddle/infrt/host_context/kernel_registry.h"
#include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/infrt/kernel/p
ten
/allocator_kernels.h"
#include "paddle/infrt/kernel/p
ten
/context_kernels.h"
#include "paddle/infrt/kernel/p
ten
/dense_tensor_kernels.h"
#include "paddle/infrt/kernel/p
ten/infershaped/pten
_kernel_launcher.h"
#include "paddle/infrt/kernel/p
hi
/allocator_kernels.h"
#include "paddle/infrt/kernel/p
hi
/context_kernels.h"
#include "paddle/infrt/kernel/p
hi
/dense_tensor_kernels.h"
#include "paddle/infrt/kernel/p
hi/infershaped/phi
_kernel_launcher.h"
#include "paddle/phi/include/infermeta.h"
#include "paddle/phi/include/kernels.h"
#include "paddle/phi/kernels/matmul_kernel.h"
...
...
@@ -32,18 +32,18 @@ using infrt::host_context::Attribute;
namespace
infrt
{
namespace
kernel
{
void
RegisterP
ten
Kernels
(
host_context
::
KernelRegistry
*
registry
)
{
registry
->
AddKernel
(
"p
ten
_dt.create_allocator.cpu"
,
INFRT_KERNEL
(
infrt
::
kernel
::
p
ten
::
CreateCpuAllocator
));
registry
->
AddKernel
(
"p
ten
_dt.create_context.cpu"
,
INFRT_KERNEL
(
infrt
::
kernel
::
p
ten
::
CreateCpuContext
));
void
RegisterP
hi
Kernels
(
host_context
::
KernelRegistry
*
registry
)
{
registry
->
AddKernel
(
"p
hi
_dt.create_allocator.cpu"
,
INFRT_KERNEL
(
infrt
::
kernel
::
p
hi
::
CreateCpuAllocator
));
registry
->
AddKernel
(
"p
hi
_dt.create_context.cpu"
,
INFRT_KERNEL
(
infrt
::
kernel
::
p
hi
::
CreateCpuContext
));
registry
->
AddKernel
(
"p
ten
_dt.create_dense_tensor.cpu.f32.nchw"
,
INFRT_KERNEL
(
infrt
::
kernel
::
p
ten
::
CreateDenseTensorCpuF32Nchw
));
registry
->
AddKernel
(
"p
ten
_dt.fill_dense_tensor.f32"
,
INFRT_KERNEL
(
infrt
::
kernel
::
p
ten
::
FillDenseTensorF32
));
"p
hi
_dt.create_dense_tensor.cpu.f32.nchw"
,
INFRT_KERNEL
(
infrt
::
kernel
::
p
hi
::
CreateDenseTensorCpuF32Nchw
));
registry
->
AddKernel
(
"p
hi
_dt.fill_dense_tensor.f32"
,
INFRT_KERNEL
(
infrt
::
kernel
::
p
hi
::
FillDenseTensorF32
));
registry
->
AddKernel
(
"p
ten
.matmul.host.fp32"
,
"p
hi
.matmul.host.fp32"
,
std
::
bind
(
&
kernel
::
KernelLauncherFunc
<
decltype
(
&::
phi
::
MatmulKernel
<
float
,
::
phi
::
CPUContext
>
),
&::
phi
::
MatmulKernel
<
float
,
::
phi
::
CPUContext
>
,
...
...
paddle/infrt/kernel/p
ten
/registry.h
→
paddle/infrt/kernel/p
hi
/registry.h
浏览文件 @
a6abb6e7
...
...
@@ -27,9 +27,9 @@ namespace infrt {
namespace
kernel
{
/**
* Register all the p
ten
kernels to registry.
* Register all the p
hi
kernels to registry.
*/
void
RegisterP
ten
Kernels
(
host_context
::
KernelRegistry
*
registry
);
void
RegisterP
hi
Kernels
(
host_context
::
KernelRegistry
*
registry
);
}
// namespace kernel
}
// namespace infrt
paddle/infrt/kernel/test_kernels.cc
浏览文件 @
a6abb6e7
...
...
@@ -193,8 +193,8 @@ tensor::DenseHostTensor ShadowCopyTensor(tensor::DenseHostTensor src) {
}
void
RegisterTestKernels
(
host_context
::
KernelRegistry
*
registry
)
{
registry
->
AddKernel
(
"
i
nfrt.benchmark"
,
INFRT_KERNEL
(
benchmark
));
registry
->
AddKernel
(
"
i
nfrt.test.shadow_copy_tensor"
,
registry
->
AddKernel
(
"
I
nfrt.benchmark"
,
INFRT_KERNEL
(
benchmark
));
registry
->
AddKernel
(
"
I
nfrt.test.shadow_copy_tensor"
,
INFRT_KERNEL
(
ShadowCopyTensor
));
}
...
...
paddle/infrt/pass/CMakeLists.txt
0 → 100755
浏览文件 @
a6abb6e7
add_subdirectory
(
phi
)
paddle/infrt/tests/dialect/basic.mlir
浏览文件 @
a6abb6e7
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: @basic_f32
func @basic_f32() -> f32 {
%v0 =
i
nfrt.constant.f32 1.0
%v1 =
i
nfrt.constant.f32 2.0
%value = "
i
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v0 =
I
nfrt.constant.f32 1.0
%v1 =
I
nfrt.constant.f32 2.0
%value = "
I
nfrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
// CHECK-NEXT: 3
"
i
nfrt.print.f32"(%value) : (f32) -> ()
"
I
nfrt.print.f32"(%value) : (f32) -> ()
i
nfrt.return %value : f32
I
nfrt.return %value : f32
}
/// ================================================================
/// @caller call the other function @callee
func @callee.add.f32(%x : f32, %y : f32, %y1 : f32) -> f32 {
%z = "
i
nfrt.add.f32"(%x, %y) : (f32, f32) -> f32
%z1 = "
i
nfrt.add.f32"(%z, %y1) : (f32, f32) -> f32
i
nfrt.return %z1 : f32
%z = "
I
nfrt.add.f32"(%x, %y) : (f32, f32) -> f32
%z1 = "
I
nfrt.add.f32"(%z, %y1) : (f32, f32) -> f32
I
nfrt.return %z1 : f32
}
// CHECK-LABEL: @caller.add.f32
func @caller.add.f32() -> f32 {
%x =
i
nfrt.constant.f32 1.0
%y =
i
nfrt.constant.f32 2.0
%y1 =
i
nfrt.constant.f32 3.0
%z =
i
nfrt.call @callee.add.f32(%x, %y, %y1) : (f32, f32, f32) -> f32
%x =
I
nfrt.constant.f32 1.0
%y =
I
nfrt.constant.f32 2.0
%y1 =
I
nfrt.constant.f32 3.0
%z =
I
nfrt.call @callee.add.f32(%x, %y, %y1) : (f32, f32, f32) -> f32
// CHECK-NEXT: 6
"
i
nfrt.print.f32"(%z) : (f32) -> ()
i
nfrt.return %z : f32
"
I
nfrt.print.f32"(%z) : (f32) -> ()
I
nfrt.return %z : f32
}
/// <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
// CHECK-LABEL: @string_test
func @string_test() {
%path = infrt.get_string("this is get_string op.")
// CHECK-LABEL: string = this is get_string op.
infrt.print_string(%path)
infrt.return
}
paddle/infrt/tests/dialect/benchmark.mlir
浏览文件 @
a6abb6e7
...
...
@@ -12,13 +12,13 @@ func @benchmark() {
// CHECK-LABEL: BM:add.f32:CPU 95%(ns)
// CHECK-LABEL: BM:add.f32:CPU 99%(ns)
// CHECK-LABEL: BM:add.f32:CPU utilization(percent)
i
nfrt.benchmark "add.f32"() duration_secs = 1, max_count = 3, num_warmup_runs = 3
I
nfrt.benchmark "add.f32"() duration_secs = 1, max_count = 3, num_warmup_runs = 3
{
%0 =
i
nfrt.constant.f32 1.0
%1 =
i
nfrt.constant.f32 2.0
%res = "
i
nfrt.add.f32"(%0, %1) : (f32, f32) -> f32
"
i
nfrt.print.f32"(%res) : (f32) -> ()
i
nfrt.return %res : f32
%0 =
I
nfrt.constant.f32 1.0
%1 =
I
nfrt.constant.f32 2.0
%res = "
I
nfrt.add.f32"(%0, %1) : (f32, f32) -> f32
"
I
nfrt.print.f32"(%res) : (f32) -> ()
I
nfrt.return %res : f32
}
i
nfrt.return
I
nfrt.return
}
paddle/infrt/tests/dialect/dense_tensor.mlir
浏览文件 @
a6abb6e7
...
...
@@ -2,23 +2,23 @@
// CHECK-LABEL: dense_shape0
func @dense_shape0() {
%shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.
tensor<X86, NCHW, F32
>
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
i
nfrt.return
I
nfrt.return
}
func @predict(%a: !infrt.
tensor<X86, NCHW, F32>, %b: !infrt.tensor<X86, NCHW, F32>) -> (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32
>) {
%a0 = dt.shallow_copy_tensor %a : !infrt.
tensor<X86, NCHW, F32> -> !infrt.tensor<X86, NCHW, F32
>
%b0 = dt.shallow_copy_tensor %b : !infrt.
tensor<X86, NCHW, F32> -> !infrt.tensor<X86, NCHW, F32
>
func @predict(%a: !infrt.
dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) {
%a0 = dt.shallow_copy_tensor %a : !infrt.
dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW
>
%b0 = dt.shallow_copy_tensor %b : !infrt.
dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW
>
infrt.return %a0, %b0: !infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32
>
Infrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>
}
func @main() {
%shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.
tensor<X86, NCHW, F32
>
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
%b, %c =
infrt.call @predict(%a, %a) : (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>) -> (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32
>)
i
nfrt.return
%b, %c =
Infrt.call @predict(%a, %a) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>)
I
nfrt.return
}
paddle/infrt/tests/dialect/disabled_tensor_map.mlir
浏览文件 @
a6abb6e7
// CHECK-LABEL: @predict
func @predict(%input:!
infrt.tensor<X86, NCHW, F32>, %map: !infrt.tensor_map) -> (!i
nfrt.tensor<X86, NCHW, F32>) {
%w = dt.get_param(%map, "create_parameter_0.w_0") -> !
i
nfrt.tensor<X86, NCHW, F32>
%bias = dt.get_param(%map, "create_parameter_1.w_0") -> !
i
nfrt.tensor<X86, NCHW, F32>
func @predict(%input:!
Infrt.tensor<X86, NCHW, F32>, %map: !Infrt.tensor_map) -> (!I
nfrt.tensor<X86, NCHW, F32>) {
%w = dt.get_param(%map, "create_parameter_0.w_0") -> !
I
nfrt.tensor<X86, NCHW, F32>
%bias = dt.get_param(%map, "create_parameter_1.w_0") -> !
I
nfrt.tensor<X86, NCHW, F32>
%out = dt.create_uninit_tensor.f32 [3, 3] -> !
i
nfrt.tensor<X86, NCHW, F32>
%out = dt.create_uninit_tensor.f32 [3, 3] -> !
I
nfrt.tensor<X86, NCHW, F32>
// fc
"external.matmul"(%input, %w, %out) {}: (!
infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.sigmoid"(%out, %out) {}: (!
infrt.tensor<X86, NCHW, F32>, !i
nfrt.tensor<X86, NCHW, F32>) -> ()
//dt.print_tensor (%out : !
i
nfrt.tensor<X86, NCHW, F32>)
"external.matmul"(%input, %w, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!
Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
"external.sigmoid"(%out, %out) {}: (!
Infrt.tensor<X86, NCHW, F32>, !I
nfrt.tensor<X86, NCHW, F32>) -> ()
//dt.print_tensor (%out : !
I
nfrt.tensor<X86, NCHW, F32>)
infrt.return %out : !i
nfrt.tensor<X86, NCHW, F32>
Infrt.return %out : !I
nfrt.tensor<X86, NCHW, F32>
}
// CHECK-LABEL: @main
func @main() {
%input = dt.create_uninit_tensor.f32 [3, 3] -> !
i
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%input : !
i
nfrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
%input = dt.create_uninit_tensor.f32 [3, 3] -> !
I
nfrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%input : !
I
nfrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
%path =
infrt.get_string("/i
nfrt/build/paddle/paddle_1.8_fc_model")
%path =
Infrt.get_string("/I
nfrt/build/paddle/paddle_1.8_fc_model")
// CHECK-LABEL: loading params
%map = dt.load_params(%path)
%out =
infrt.call @predict(%input, %map): (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor_map) -> (!i
nfrt.tensor<X86, NCHW, F32>)
dt.print_tensor (%out : !
i
nfrt.tensor<X86, NCHW, F32>)
%out =
Infrt.call @predict(%input, %map): (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor_map) -> (!I
nfrt.tensor<X86, NCHW, F32>)
dt.print_tensor (%out : !
I
nfrt.tensor<X86, NCHW, F32>)
i
nfrt.return
I
nfrt.return
}
paddle/infrt/tests/dialect/disabled_trt_ops.mlir
浏览文件 @
a6abb6e7
...
...
@@ -7,15 +7,15 @@ func @main() -> tensor<?xf32> {
%bias1 = "pd.feed"() {name="input4"} : () -> tensor<?xf32>
%bias2 = "pd.feed"() {name="input5"} : () -> tensor<?xf32>
%d = "pd.elementwise_add"(%c, %bias) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d = "pd.elementwise_add"(%c, %bias) {axis=1:
s
i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e = "pd.relu6"(%d) {} : (tensor<?xf32>) -> tensor<?xf32>
%c1 = "pd.matmul"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:
s
i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e1 = "pd.relu"(%d1) {} : (tensor<?xf32>) -> tensor<?xf32>
%c2 = "pd.matmul"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:
s
i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e2 = "pd.relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32>
"pd.fetch"(%e2) {name="output"} :(tensor<?xf32>)->()
...
...
paddle/infrt/tests/dialect/paddle_ops.mlir
浏览文件 @
a6abb6e7
...
...
@@ -3,8 +3,7 @@
func @ops() {
%a = pd.feed() {name="input0"} : tensor<?xf32>
%b = pd.feed() {name="input1"}: tensor<?xf32>
%d = pd.feed() {name="input3"}: !
I
nfrt.lod_tensor<3x4x9xf32, 0>
%d = pd.feed() {name="input3"}: !
i
nfrt.lod_tensor<3x4x9xf32, 0>
%c = "pd.matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
infrt.return
Infrt.return
}
paddle/infrt/tests/dialect/pten/dense_tensor.mlir
浏览文件 @
a6abb6e7
...
...
@@ -2,10 +2,10 @@
// CHECK-LABEL: @basic_tensor
func @basic_tensor() {
%a = "p
ten_dt.create_allocator.cpu" (): () -> !pten
.CPU_allocator
%b = "p
ten_dt.create_context.cpu" (): () -> !pten
.CPU_context
%c = "p
ten_dt.create_dense_tensor.cpu.f32.nchw" (%a) {dims=[1:i64], lod=[1:i64]}: (!pten.CPU_allocator) -> (!infrt.tensor<X86, NCHW, F32
>)
// "p
ten_dt.fill_dense_tensor.f32" (%c) {value=[1.0:f32]} : (!infrt.tensor<X86, NCHW, F32
>) -> ()
%a = "p
hi_dt.create_allocator.cpu" (): () -> !phi
.CPU_allocator
%b = "p
hi_dt.create_context.cpu" (): () -> !phi
.CPU_context
%c = "p
hi_dt.create_dense_tensor.cpu.f32.nchw" (%a) {dims=[1:i64], lod=[1:i64]}: (!phi.CPU_allocator) -> (!infrt.dense_tensor<CPU, FP32, NCHW
>)
// "p
hi_dt.fill_dense_tensor.f32" (%c) {value=[1.0:f32]} : (!Infrt.tensor<CPU, FP32, NCHW
>) -> ()
i
nfrt.return
I
nfrt.return
}
paddle/infrt/tests/dialect/pten/pten_pass.mlir
0 → 100644
浏览文件 @
a6abb6e7
// RUN: infrtopt %s | FileCheck %s
// CHECK-LABEL: @ops
func @ops() {
%a = pd.feed() {name="input0"} : !infrt.lod_tensor<?xf32,0>
%b = pd.feed() {name="input1"} : !infrt.lod_tensor<?xf32,0>
%d = pd.feed() {name="input3"} : !infrt.lod_tensor<3x4x9xf32, 0>
%g = "pd.elementwise_add"(%a, %b) {axis=1:si32} : (!infrt.lod_tensor<?xf32,0>, !infrt.lod_tensor<?xf32>) -> tensor<?xf32>
%h = "pd.abs"(%g):(tensor<?xf32>) -> tensor<?xf32>
"pd.fetch"(%h) {name="output"} :(tensor<?xf32>)->()
}
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
浏览文件 @
a6abb6e7
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: dense_shape0
func @dense_shape0() {
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.
tensor<X86, NCHW, F32
>
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
i
nfrt.return
I
nfrt.return
}
func @predict(%a: !infrt.
tensor<X86, NCHW, F32>, %b: !infrt.tensor<X86, NCHW, F32>) -> (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32
>) {
%a0 = dt.shallow_copy_tensor %a : !infrt.
tensor<X86, NCHW, F32> -> !infrt.tensor<X86, NCHW, F32
>
%b0 = dt.shallow_copy_tensor %b : !infrt.
tensor<X86, NCHW, F32> -> !infrt.tensor<X86, NCHW, F32
>
func @predict(%a: !infrt.
dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>) {
%a0 = dt.shallow_copy_tensor %a : !infrt.
dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW
>
%b0 = dt.shallow_copy_tensor %b : !infrt.
dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW
>
infrt.return %a0, %b0: !infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32
>
Infrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>
}
func @main() {
%shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.
tensor<X86, NCHW, F32
>
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
%b, %c =
infrt.call @predict(%a, %a) : (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>) -> (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32
>)
i
nfrt.return
%b, %c =
Infrt.call @predict(%a, %a) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW
>)
I
nfrt.return
}
paddle/infrt/tests/dialect/tensor/naive_kernels.mlir
浏览文件 @
a6abb6e7
...
...
@@ -2,34 +2,34 @@
// CHECK-LABEL: naive_elementwise_add
func @naive_elementwise_add() {
// create a
%a = dt.create_uninit_tensor.f32 [2:i64, 8:i64] -> !infrt.
tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%a : !infrt.
tensor<X86, NCHW, F32
>) {value=1.0:f32}
%a = dt.create_uninit_tensor.f32 [2:i64, 8:i64] -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%a : !infrt.
dense_tensor<CPU, FP32, NCHW
>) {value=1.0:f32}
// create b
%b = dt.create_uninit_tensor.f32 [2:i64, 8:i64] -> !infrt.
tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%b : !infrt.
tensor<X86, NCHW, F32
>) {value=2.0:f32}
%b = dt.create_uninit_tensor.f32 [2:i64, 8:i64] -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%b : !infrt.
dense_tensor<CPU, FP32, NCHW
>) {value=2.0:f32}
// get c
%c = dt.naive_elementwise_add.f32(%a, %b) {} : (!infrt.
tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>) -> !infrt.tensor<X86, NCHW, F32
>
%c = dt.naive_elementwise_add.f32(%a, %b) {} : (!infrt.
dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW
>
// CHECK: tensor: shape=shape[2,8], values=[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
dt.print_tensor (%c : !infrt.
tensor<X86, NCHW, F32
>)
dt.print_tensor (%c : !infrt.
dense_tensor<CPU, FP32, NCHW
>)
i
nfrt.return
I
nfrt.return
}
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: naive_matmul
func @naive_matmul() {
// create a
%a = dt.create_uninit_tensor.f32 [2:i64, 8:i64] -> !infrt.
tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%a : !infrt.
tensor<X86, NCHW, F32
>) {value=1.0:f32}
%a = dt.create_uninit_tensor.f32 [2:i64, 8:i64] -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%a : !infrt.
dense_tensor<CPU, FP32, NCHW
>) {value=1.0:f32}
// create b
%b = dt.create_uninit_tensor.f32 [8:i64, 4:i64] -> !infrt.
tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%b : !infrt.
tensor<X86, NCHW, F32
>) {value=2.0:f32}
%b = dt.create_uninit_tensor.f32 [8:i64, 4:i64] -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%b : !infrt.
dense_tensor<CPU, FP32, NCHW
>) {value=2.0:f32}
// get c
%c = dt.naive_matmul.f32(%a, %b) {} : (!infrt.
tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>) -> !infrt.tensor<X86, NCHW, F32
>
%c = dt.naive_matmul.f32(%a, %b) {} : (!infrt.
dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW
>
// CHECK: tensor: shape=shape[2,4], values=[16, 16, 16, 16, 16, 16, 16, 16]
dt.print_tensor (%c : !infrt.
tensor<X86, NCHW, F32
>)
dt.print_tensor (%c : !infrt.
dense_tensor<CPU, FP32, NCHW
>)
i
nfrt.return
I
nfrt.return
}
paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in
浏览文件 @
a6abb6e7
// RUN: infrtexec -i %s | FileCheck %s
func @load_tensor_map() {
%path =
i
nfrt.get_string("@CMAKE_BINARY_DIR@/multi_fc_model")
%path =
I
nfrt.get_string("@CMAKE_BINARY_DIR@/multi_fc_model")
%map = dt.load_params(%path)
%size = dt.tensor_map_get_size(%map) -> i32
i
nfrt.print.i32 %size
I
nfrt.print.i32 %size
%a = dt.tensor_map_get_tensor(%map) {name="fc_bias"} -> !infrt.
tensor<X86, NCHW, F32
>
%a = dt.tensor_map_get_tensor(%map) {name="fc_bias"} -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
// CHECK: tensor: shape=shape[2], values=[0, 0]
dt.print_tensor (%a : !infrt.
tensor<X86, NCHW, F32
>)
dt.print_tensor (%a : !infrt.
dense_tensor<CPU, FP32, NCHW
>)
i
nfrt.return
I
nfrt.return
}
paddle/infrt/tests/dialect/tensor/tensor_shape.mlir
浏览文件 @
a6abb6e7
...
...
@@ -4,5 +4,5 @@ func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92]
ts.print_shape %a
i
nfrt.return
I
nfrt.return
}
paddle/infrt/tests/dialect/tensor/tensor_type.mlir
浏览文件 @
a6abb6e7
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: test_tensor_type
func @test_tensor_type() {
%a = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.
tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%a : !infrt.
tensor<X86, NCHW, F32
>) {value=1.0:f32}
%a = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%a : !infrt.
dense_tensor<CPU, FP32, NCHW
>) {value=1.0:f32}
// CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%a : !infrt.
tensor<X86, NCHW, F32
>)
dt.print_tensor (%a : !infrt.
dense_tensor<CPU, FP32, NCHW
>)
i
nfrt.return
I
nfrt.return
}
paddle/infrt/tests/dialect/tensor_shape.mlir
浏览文件 @
a6abb6e7
...
...
@@ -4,5 +4,5 @@ func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92]
ts.print_shape %a
i
nfrt.return
I
nfrt.return
}
paddle/infrt/tests/dialect/tensor_type.mlir
浏览文件 @
a6abb6e7
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: test_tensor_type
func @test_tensor_type() {
%a = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.
tensor<X86, NCHW, F32
>
dt.fill_tensor_with_constant.f32 (%a : !infrt.
tensor<X86, NCHW, F32
>) {value=1.0:f32}
%a = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.
dense_tensor<CPU, FP32, NCHW
>
dt.fill_tensor_with_constant.f32 (%a : !infrt.
dense_tensor<CPU, FP32, NCHW
>) {value=1.0:f32}
// CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%a : !infrt.
tensor<X86, NCHW, F32
>)
dt.print_tensor (%a : !infrt.
dense_tensor<CPU, FP32, NCHW
>)
i
nfrt.return
I
nfrt.return
}
paddle/scripts/infrt_build.sh
浏览文件 @
a6abb6e7
...
...
@@ -90,7 +90,7 @@ function infrt_gen_and_build() {
exit
7
;
fi
make
-j
${
parallel_number
}
infrt infrtopt infrtexec test_infrt_exec trt-exec infrt_lib_dist paddle-mlir-convert
;
build_error
=
$?
make
-j
${
parallel_number
}
infrt infrtopt infrtexec test_infrt_exec trt-exec
phi-exec
infrt_lib_dist paddle-mlir-convert
;
build_error
=
$?
if
[
"
$build_error
"
!=
0
]
;
then
exit
7
;
fi
...
...
tools/infrt/get_p
ten
_kernel_function.sh
→
tools/infrt/get_p
hi
_kernel_function.sh
浏览文件 @
a6abb6e7
...
...
@@ -42,12 +42,12 @@ grep PD_REGISTER_INFER_META_FN ${temp_path}/generate.cc \
|
awk
-F
"
\(
|,|::|
\)
"
'{print $2, $4}'
>
${
temp_path
}
/wrap_info.txt
#step 3: merge all infos
# @input1 => p
ten
kernel infomation : kernel_name kernel_key(GPU/CPU, precision, layout)
# @input1 => p
hi
kernel infomation : kernel_name kernel_key(GPU/CPU, precision, layout)
# @input2 => information from api.yaml : kernel_name kernel_function_name inferMeta_function_name
# @input3 => information from wrapped_infermeta_gen : ensure the inferMeta function has
# same signature with kernel function
python3
${
PADDLE_ROOT
}
/tools/infrt/get_p
ten
_kernel_info.py
\
python3
${
PADDLE_ROOT
}
/tools/infrt/get_p
hi
_kernel_info.py
\
--paddle_root_path
${
PADDLE_ROOT
}
\
--kernel_info_file
$kernel_register_info_file
\
--infermeta_wrap_file
${
temp_path
}
/wrap_info.txt
\
--generate_file
${
PADDLE_ROOT
}
/paddle/infrt/kernel/p
ten
/infershaped/infershaped_kernel_launchers.cc
--generate_file
${
PADDLE_ROOT
}
/paddle/infrt/kernel/p
hi
/infershaped/infershaped_kernel_launchers.cc
tools/infrt/get_p
ten
_kernel_info.py
→
tools/infrt/get_p
hi
_kernel_info.py
浏览文件 @
a6abb6e7
...
...
@@ -21,7 +21,7 @@ from typing import List, Dict, Any
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
"gather p
ten
kernel and infermate info"
)
parser
=
argparse
.
ArgumentParser
(
"gather p
hi
kernel and infermate info"
)
parser
.
add_argument
(
"--paddle_root_path"
,
type
=
str
,
...
...
@@ -31,7 +31,7 @@ def parse_args():
"--kernel_info_file"
,
type
=
str
,
required
=
True
,
help
=
"kernel info file generated by get_p
ten
_kernel_function.sh."
)
help
=
"kernel info file generated by get_p
hi
_kernel_function.sh."
)
parser
.
add_argument
(
"--infermeta_wrap_file"
,
type
=
str
,
...
...
@@ -41,7 +41,7 @@ def parse_args():
"--generate_file"
,
type
=
str
,
required
=
True
,
default
=
"../paddle/infrt/kernel/p
ten
/infershaped/infershaped_kernel_launchers.cc"
,
default
=
"../paddle/infrt/kernel/p
hi
/infershaped/infershaped_kernel_launchers.cc"
,
help
=
"generated file."
)
args
=
parser
.
parse_args
()
return
args
...
...
@@ -84,15 +84,15 @@ def merge(infer_meta_data, kernel_data, wrap_data):
def
gen_warn_info
():
return
"""// Generated by tools/infrt/gen_p
ten
_kernel_register.py for infrt.
return
"""// Generated by tools/infrt/gen_p
hi
_kernel_register.py for infrt.
// DO NOT edit or include it within paddle.
"""
def
gen_include_headers
():
return
"""
#include "paddle/infrt/kernel/p
ten
/infershaped/infershaped_kernel_launchers.h"
#include "paddle/infrt/kernel/p
ten/infershaped/pten
_kernel_launcher.h"
#include "paddle/infrt/kernel/p
hi
/infershaped/infershaped_kernel_launchers.h"
#include "paddle/infrt/kernel/p
hi/infershaped/phi
_kernel_launcher.h"
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/include/kernels.h"
#include "paddle/phi/include/infermeta.h"
...
...
@@ -240,8 +240,8 @@ def gen_register_info(resources: List[List[str]]):
return
res
def
gen_p
ten
_kernel_register_code
(
resources
:
List
[
List
[
str
]],
src_file_path
:
str
):
def
gen_p
hi
_kernel_register_code
(
resources
:
List
[
List
[
str
]],
src_file_path
:
str
):
source_file
=
open
(
src_file_path
,
'w'
)
source_file
.
write
(
gen_warn_info
())
source_file
.
write
(
gen_include_headers
())
...
...
@@ -258,4 +258,4 @@ if __name__ == "__main__":
kernel_data
=
get_kernel_info
(
args
.
kernel_info_file
)
info_meta_wrap_data
=
get_kernel_info
(
args
.
infermeta_wrap_file
)
out
=
merge
(
infer_meta_data
,
kernel_data
,
info_meta_wrap_data
)
gen_p
ten
_kernel_register_code
(
out
,
args
.
generate_file
)
gen_p
hi
_kernel_register_code
(
out
,
args
.
generate_file
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录