Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
9fc89b34
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9fc89b34
编写于
3月 16, 2022
作者:
H
huzhiqiang
提交者:
GitHub
3月 16, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add model check (#40398)
上级
ac5cc136
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
186 addition
and
5 deletion
+186
-5
paddle/infrt/dialect/infrt/ir/infrt_dialect.cc
paddle/infrt/dialect/infrt/ir/infrt_dialect.cc
+7
-0
paddle/infrt/host_context/paddle_mlir.cc
paddle/infrt/host_context/paddle_mlir.cc
+11
-5
paddle/infrt/tests/CMakeLists.txt
paddle/infrt/tests/CMakeLists.txt
+2
-0
paddle/infrt/tests/model/abs_model.py
paddle/infrt/tests/model/abs_model.py
+38
-0
paddle/infrt/tests/model/test_abs.cc
paddle/infrt/tests/model/test_abs.cc
+126
-0
paddle/scripts/infrt_build.sh
paddle/scripts/infrt_build.sh
+2
-0
未找到文件。
paddle/infrt/dialect/infrt/ir/infrt_dialect.cc
浏览文件 @
9fc89b34
...
...
@@ -90,6 +90,9 @@ mlir::Type InfrtDialect::parseType(::mlir::DialectAsmParser &parser) const {
return
LoDTensorType
::
get
(
parser
.
getContext
(),
shape
,
elementType
,
lod_level
);
}
if
(
keyword
==
"dense_tensor_map"
)
{
return
DenseTensorMapType
::
get
(
parser
.
getContext
());
}
if
(
keyword
==
"dense_tensor"
)
{
// parse DenseTensor, for example: !i=Infrt.tensor<X86, CUDA, F32>
llvm
::
StringRef
target
;
...
...
@@ -158,6 +161,10 @@ void InfrtDialect::printType(::mlir::Type type,
<<
lod_tensor_type
.
getLod_level
()
<<
">"
;
return
;
}
if
(
type
.
isa
<
infrt
::
DenseTensorMapType
>
())
{
os
<<
"dense_tensor_map"
;
return
;
}
// print DenseTensorType, for example: !infrt.dense_tensor<CPU, FP32, NCHW>
if
(
type
.
isa
<
DenseTensorType
>
())
{
...
...
paddle/infrt/host_context/paddle_mlir.cc
浏览文件 @
9fc89b34
...
...
@@ -13,15 +13,17 @@
// limitations under the License.
#include "paddle/infrt/host_context/paddle_mlir.h"
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd_ops_info.h"
MLIRModelGenImpl
::
MLIRModelGenImpl
()
:
context_
(
infrt
::
Global
::
getMLIRContext
()),
builder_
(
context_
)
{
context_
->
allowUnregisteredDialects
();
context_
->
getOrLoadDialect
<
mlir
::
StandardOpsDialect
>
();
context_
->
getOrLoadDialect
<
infrt
::
ts
::
TensorShapeDialect
>
();
context_
->
getOrLoadDialect
<
infrt
::
dt
::
DTDialect
>
();
context_
->
getOrLoadDialect
<
mlir
::
pd
::
PaddleDialect
>
();
context_
->
getOrLoadDialect
<::
infrt
::
InfrtDialect
>
();
module_
=
mlir
::
ModuleOp
::
create
(
mlir
::
UnknownLoc
::
get
(
context_
));
}
...
...
@@ -55,7 +57,6 @@ mlir::ModuleOp MLIRModelGenImpl::ImportPaddleModel(
UpdateModelParams
(
program
,
&
mainFunc
);
UpdateModelOps
(
program
);
UpdateModelOutputs
(
program
);
return
module_
;
}
...
...
@@ -171,7 +172,11 @@ void MLIRModelGenImpl::UpdateModelParams(
ConvertDataType
(
var_desc
.
type
().
lod_tensor
().
tensor
().
data_type
(),
builder_
,
&
precision_
);
mlir
::
Type
type_
=
mlir
::
RankedTensorType
::
get
(
dims
,
precision_
);
mlir
::
Type
type_
=
infrt
::
DenseTensorType
::
get
(
context_
,
infrt
::
TargetType
::
CPU
,
infrt
::
PrecisionType
::
FLOAT32
,
infrt
::
LayoutType
::
NCHW
);
auto
op
=
builder_
.
create
<
infrt
::
dt
::
TensorMapGetTensorOp
>
(
mlir
::
UnknownLoc
::
get
(
context_
),
type_
,
map
,
name
);
params_map_
.
insert
(
std
::
pair
<
std
::
string
,
mlir
::
Value
>
(
...
...
@@ -197,8 +202,9 @@ void MLIRModelGenImpl::UpdateModelOutputs(
llvm
::
SmallVector
<
mlir
::
Type
,
4
>
resultTypes
;
llvm
::
SmallVector
<
mlir
::
NamedAttribute
,
4
>
attrs
;
mlir
::
OperationState
state
(
loc
,
mlir
::
ReturnOp
::
getOperationName
(),
::
infrt
::
ReturnOp
::
getOperationName
(),
operands
,
resultTypes
,
attrs
);
...
...
@@ -321,7 +327,7 @@ llvm::SmallVector<mlir::NamedAttribute, 4> MLIRModelGenImpl::GetOpAttributes(
switch
(
type
)
{
ATTR_IMPL_CASE
(
FLOAT
,
f
,
getF32FloatAttr
);
ATTR_IMPL_CASE
(
BOOLEAN
,
b
,
getBoolAttr
);
ATTR_IMPL_CASE
(
INT
,
i
,
getI32IntegerAttr
);
ATTR_IMPL_CASE
(
INT
,
i
,
get
S
I32IntegerAttr
);
ATTR_IMPL_CASE
(
LONG
,
l
,
getI64IntegerAttr
);
ATTR_IMPL_CASE
(
STRING
,
s
,
getStringAttr
);
...
...
paddle/infrt/tests/CMakeLists.txt
浏览文件 @
9fc89b34
cc_test_tiny
(
test_abs_model SRCS model/test_abs.cc DEPS infrt
${
MLIR_IR_LIBS
}
)
configure_file
(
lit.cfg.py.in
"
${
CMAKE_SOURCE_DIR
}
/paddle/infrt/tests/lit.cfg.py"
)
add_test
(
NAME test_infrt_by_lit COMMAND sh -c
"lit -v
${
CMAKE_SOURCE_DIR
}
/paddle/infrt/tests --filter-out
\"
disabled_*
\"
"
...
...
paddle/infrt/tests/model/abs_model.py
0 → 100644
浏览文件 @
9fc89b34
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
paddle
from
paddle.nn
import
Layer
from
paddle.static
import
InputSpec
from
paddle.jit
import
to_static
import
sys
class
AbsNet
(
paddle
.
nn
.
Layer
):
def
__init__
(
self
):
super
(
AbsNet
,
self
).
__init__
()
def
forward
(
self
,
x
):
x
=
paddle
.
abs
(
x
)
return
x
if
__name__
==
'__main__'
:
# build network
model
=
AbsNet
()
# save inferencing format model
net
=
to_static
(
model
,
input_spec
=
[
InputSpec
(
shape
=
[
None
,
1
,
28
,
28
],
name
=
'x'
)])
paddle
.
jit
.
save
(
net
,
sys
.
argv
[
1
])
paddle/infrt/tests/model/test_abs.cc
0 → 100644
浏览文件 @
9fc89b34
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <llvm/Support/CommandLine.h>
#include <mlir/Pass/PassManager.h>
#include <iostream>
#include <string>
#include "llvm/Support/DynamicLibrary.h"
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/mlir_loader.h"
#include "paddle/infrt/host_context/core_runtime.h"
#include "paddle/infrt/host_context/kernel_registry.h"
#include "paddle/infrt/host_context/mlir_to_runtime_translate.h"
#include "paddle/infrt/kernel/basic_kernels.h"
#include "paddle/infrt/kernel/control_flow_kernels.h"
#include "paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.h"
#include "paddle/infrt/kernel/phi/registry.h"
#include "paddle/infrt/kernel/tensor_kernels.h"
#include "paddle/infrt/kernel/tensor_shape_kernels.h"
#include "paddle/infrt/kernel/test_kernels.h"
#include "paddle/infrt/kernel/phi/infershaped/infershaped_utils.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h"
#include "paddle/infrt/dialect/phi/pass/phi_op_convert_pass.h"
#include "paddle/infrt/host_context/paddle_mlir.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h"
#include "paddle/infrt/dialect/phi/ir/phi_base.h"
#include "paddle/infrt/dialect/phi/ir/phi_kernels.h"
static
llvm
::
cl
::
list
<
std
::
string
>
cl_shared_libs
(
// NOLINT
"shared_libs"
,
llvm
::
cl
::
desc
(
"Specify shared library with kernels."
),
llvm
::
cl
::
ZeroOrMore
,
llvm
::
cl
::
MiscFlags
::
CommaSeparated
);
TEST
(
ABS_MODEL
,
convert_and_execute
)
{
std
::
string
model_file_name
=
"./abs.pdmodel"
;
std
::
string
params_file_name
=
"./abs.pdiparams"
;
// convert model
MLIRModelGenImpl
myGen
;
auto
module_
=
myGen
.
ImportPaddleModel
(
model_file_name
,
params_file_name
);
module_
.
dump
();
// pick kernel
mlir
::
MLIRContext
*
context
=
infrt
::
Global
::
getMLIRContext
();
context
->
allowUnregisteredDialects
();
context
->
getOrLoadDialect
<
mlir
::
StandardOpsDialect
>
();
context
->
getOrLoadDialect
<
infrt
::
InfrtDialect
>
();
context
->
getOrLoadDialect
<
infrt
::
ts
::
TensorShapeDialect
>
();
context
->
getOrLoadDialect
<
infrt
::
InfrtDialect
>
();
context
->
getOrLoadDialect
<
infrt
::
dt
::
DTDialect
>
();
context
->
getOrLoadDialect
<
mlir
::
pd
::
PaddleDialect
>
();
context
->
getOrLoadDialect
<
infrt
::
phi
::
PHIDenseTensorDialect
>
();
context
->
getOrLoadDialect
<
infrt
::
phi
::
PHICPUKernelDialect
>
();
context
->
getOrLoadDialect
<
infrt
::
phi
::
PHIGPUKernelDialect
>
();
context
->
getOrLoadDialect
<
infrt
::
phi
::
PHIDialect
>
();
context
->
loadAllAvailableDialects
();
mlir
::
PassManager
pm
(
context
);
mlir
::
OpPassManager
&
phi_pass_manager
=
pm
.
nest
<
mlir
::
FuncOp
>
();
std
::
vector
<
infrt
::
Place
>
valid_places
=
{{
infrt
::
TargetType
::
CPU
,
infrt
::
PrecisionType
::
FLOAT32
,
infrt
::
LayoutType
::
NCHW
}};
phi_pass_manager
.
addPass
(
infrt
::
createPhiOpCvtPass
(
valid_places
));
phi_pass_manager
.
addPass
(
infrt
::
createInfrtOpFusePass
());
if
(
mlir
::
failed
(
pm
.
run
(
module_
)))
{
std
::
cout
<<
"
\n
pass failed!
\n
"
<<
std
::
endl
;
}
module_
.
dump
();
// executate
infrt
::
host_context
::
KernelRegistry
registry
;
infrt
::
kernel
::
RegisterBasicKernels
(
&
registry
);
infrt
::
kernel
::
RegisterTestKernels
(
&
registry
);
infrt
::
kernel
::
RegisterTensorShapeKernels
(
&
registry
);
infrt
::
kernel
::
RegisterTensorKernels
(
&
registry
);
infrt
::
kernel
::
RegisterControlFlowKernels
(
&
registry
);
infrt
::
kernel
::
RegisterPhiKernels
(
&
registry
);
infrt
::
kernel
::
RegisterInferShapeLaunchers
(
&
registry
);
// load extra shared library
for
(
const
auto
&
lib_path
:
cl_shared_libs
)
{
std
::
string
err
;
llvm
::
sys
::
DynamicLibrary
dynLib
=
llvm
::
sys
::
DynamicLibrary
::
getPermanentLibrary
(
lib_path
.
c_str
(),
&
err
);
if
(
!
dynLib
.
isValid
())
{
llvm
::
errs
()
<<
"Load shared library failed. Error: "
<<
err
<<
"
\n
"
;
break
;
}
if
(
auto
reg_sym
=
dynLib
.
SearchForAddressOfSymbol
(
"RegisterKernels"
))
{
auto
reg_func
=
reinterpret_cast
<
void
(
*
)(
infrt
::
host_context
::
KernelRegistry
*
)
>
(
reg_sym
);
reg_func
(
&
registry
);
}
else
{
llvm
::
outs
()
<<
"Symbol
\"
RegisterKernels
\"
not found in
\"
"
<<
lib_path
<<
"
\"
. Skip.
\n
"
;
}
}
infrt
::
host_context
::
TestMlir
(
module_
,
&
registry
);
}
paddle/scripts/infrt_build.sh
浏览文件 @
9fc89b34
...
...
@@ -44,6 +44,8 @@ function update_pd_ops() {
cd
${
PADDLE_ROOT
}
/tools/infrt/
python3 generate_pd_op_dialect_from_paddle_op_maker.py
python3 generate_phi_kernel_dialect.py
# generate test model
python3 paddle/infrt/tests/model/abs_model.py
${
PADDLE_ROOT
}
/build/paddle/infrt/tests/abs
}
function
init
()
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录