Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
fcf87582
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
fcf87582
编写于
3月 22, 2022
作者:
H
huzhiqiang
提交者:
GitHub
3月 22, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[infrt] Add linear cpu demo (#40715)
上级
c29f85b6
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
112 addition
and
4 deletion
+112
-4
paddle/infrt/dialect/phi/CMakeLists.txt
paddle/infrt/dialect/phi/CMakeLists.txt
+4
-0
paddle/infrt/dialect/phi/ir/infrt_phi_base.td
paddle/infrt/dialect/phi/ir/infrt_phi_base.td
+2
-2
paddle/infrt/host_context/paddle_mlir.cc
paddle/infrt/host_context/paddle_mlir.cc
+5
-2
paddle/infrt/tests/CMakeLists.txt
paddle/infrt/tests/CMakeLists.txt
+1
-0
paddle/infrt/tests/dialect/phi/linear_cpu.mlir.in
paddle/infrt/tests/dialect/phi/linear_cpu.mlir.in
+19
-0
paddle/infrt/tests/model/linear.py
paddle/infrt/tests/model/linear.py
+80
-0
paddle/scripts/infrt_build.sh
paddle/scripts/infrt_build.sh
+1
-0
未找到文件。
paddle/infrt/dialect/phi/CMakeLists.txt
浏览文件 @
fcf87582
...
...
@@ -5,6 +5,10 @@ endif()
add_subdirectory
(
ir
)
add_subdirectory
(
pass
)
add_executable
(
phi-ir-exec phi_ir_exec.cc
)
target_link_libraries
(
phi-ir-exec infrt
)
add_executable
(
phi-exec phi_exec.cc
)
target_link_libraries
(
phi-exec infrt
)
...
...
paddle/infrt/dialect/phi/ir/infrt_phi_base.td
浏览文件 @
fcf87582
...
...
@@ -18,8 +18,8 @@ def PHI_Dialect : Dialect {
def PhiOpTrait : NativeOpTrait<"PhiOpTrait">;
class PHI_Type<string type, list<Trait> traits = []>
: TypeDef<PHI_Dialect, type, !listconcat(traits, [PhiOpTrait, IsolatedFromAbove])> {}
class PHI_Type<string type, list<Trait> traits = []
, string baseCppClass = "::mlir::Type"
>
: TypeDef<PHI_Dialect, type, !listconcat(traits, [PhiOpTrait, IsolatedFromAbove])
, baseCppClass
> {}
def Allocator : PHI_Type<"Allocator"> {
let mnemonic = "allocator";
...
...
paddle/infrt/host_context/paddle_mlir.cc
浏览文件 @
fcf87582
...
...
@@ -16,6 +16,7 @@
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd/common/pd_ops_info.h"
#include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h"
MLIRModelGenImpl
::
MLIRModelGenImpl
()
:
context_
(
infrt
::
Global
::
getMLIRContext
()),
builder_
(
context_
)
{
...
...
@@ -24,6 +25,8 @@ MLIRModelGenImpl::MLIRModelGenImpl()
context_
->
getOrLoadDialect
<
infrt
::
dt
::
DTDialect
>
();
context_
->
getOrLoadDialect
<
infrt
::
pd
::
PaddleDialect
>
();
context_
->
getOrLoadDialect
<::
infrt
::
InfrtDialect
>
();
context_
->
getOrLoadDialect
<::
infrt
::
phi
::
PHIDialect
>
();
context_
->
getOrLoadDialect
<::
infrt
::
phi
::
PHIDenseTensorDialect
>
();
module_
=
mlir
::
ModuleOp
::
create
(
mlir
::
UnknownLoc
::
get
(
context_
));
}
...
...
@@ -79,7 +82,7 @@ mlir::FuncOp MLIRModelGenImpl::UpdateModelModule(
llvm
::
SmallVector
<
mlir
::
Type
,
4
>
MLIRModelGenImpl
::
GetModelInputsType
(
const
infrt
::
paddle
::
framework_proto
::
ProgramDesc
&
program
)
{
llvm
::
SmallVector
<
mlir
::
Type
,
4
>
operandTypes
;
operandTypes
.
push_back
(
infrt
::
DenseHost
TensorMapType
::
get
(
context_
));
operandTypes
.
push_back
(
infrt
::
phi
::
Dense
TensorMapType
::
get
(
context_
));
for
(
auto
&
op_desc
:
main_block_
.
ops
())
{
if
(
op_desc
.
type
()
!=
"feed"
)
continue
;
for
(
int
var_idx
=
0
;
var_idx
<
op_desc
.
outputs_size
();
++
var_idx
)
{
...
...
@@ -180,7 +183,7 @@ void MLIRModelGenImpl::UpdateModelParams(
&
precision_
);
mlir
::
Type
type_
=
infrt
::
DenseTensorType
::
get
(
context_
,
infrt
::
TargetType
::
CPU
,
precision_
,
infrt
::
LayoutType
::
ANY
);
auto
op
=
builder_
.
create
<
infrt
::
dt
::
TensorMapGetTensorOp
>
(
auto
op
=
builder_
.
create
<
::
infrt
::
phi
::
TensorMapGetTensorOp
>
(
mlir
::
UnknownLoc
::
get
(
context_
),
type_
,
map
,
name
);
params_map_
.
insert
(
std
::
pair
<
std
::
string
,
mlir
::
Value
>
(
var_desc
.
name
(),
op
.
getOperation
()
->
getResult
(
0
)));
...
...
paddle/infrt/tests/CMakeLists.txt
浏览文件 @
fcf87582
...
...
@@ -6,3 +6,4 @@ add_test(NAME test_infrt_by_lit COMMAND sh -c "lit -v ${CMAKE_SOURCE_DIR}/paddle
DEPENDS infrtopt infrtexec
)
configure_file
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/dialect/tensor/tensor_map.mlir.in
${
CMAKE_CURRENT_SOURCE_DIR
}
/dialect/tensor/tensor_map.mlir
)
configure_file
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/dialect/phi/linear_cpu.mlir.in
${
CMAKE_CURRENT_SOURCE_DIR
}
/dialect/phi/linear_cpu.mlir
)
paddle/infrt/tests/dialect/phi/linear_cpu.mlir.in
0 → 100644
浏览文件 @
fcf87582
// RUN: infrtexec -i %s
module {
func @main_graph(%arg0: !phi.dense_tensor_map, %arg1: !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW> {
%0 = phi_dt.tensor_map_get_tensor(%arg0) {name = "linear_0.w_0"} -> !infrt.dense_tensor<CPU, FP32, NCHW>
%1 = phi_dt.tensor_map_get_tensor(%arg0) {name = "linear_0.b_0"} -> !infrt.dense_tensor<CPU, FP32, NCHW>
%2 = "phi_dt.create_context.cpu"() : () -> !phi.context<CPU>
%5 = "phi_cpu.matmul.float32.any"(%2, %arg1, %0) {trans_x = false, trans_y = false} : (!phi.context<CPU>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
%7 = "phi_cpu.add.float32.any"(%2, %5, %1): (!phi.context<CPU>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
infrt.return %7 : !infrt.dense_tensor<CPU, FP32, NCHW>
}
func @main() {
%ctx = "phi_dt.create_context.cpu" (): () -> !phi.context<CPU>
%1 = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[16:i64, 784:i64]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%map = phi_dt.load_combined_params(){model_path="@CMAKE_BINARY_DIR@/linear/linear.pdmodel",params_path="@CMAKE_BINARY_DIR@/linear/linear.pdiparams"}
%2 = infrt.call@main_graph(%map, %1) : (!phi.dense_tensor_map, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
phi_dt.print_tensor (%2 : !infrt.dense_tensor<CPU, FP32, NCHW>)
infrt.return
}
}
paddle/infrt/tests/model/linear.py
0 → 100644
浏览文件 @
fcf87582
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# example 1: save layer
import
numpy
as
np
import
paddle
import
paddle.nn
as
nn
import
paddle.optimizer
as
opt
BATCH_SIZE
=
16
BATCH_NUM
=
4
EPOCH_NUM
=
4
IMAGE_SIZE
=
784
CLASS_NUM
=
10
# define a random dataset
class
RandomDataset
(
paddle
.
io
.
Dataset
):
def
__init__
(
self
,
num_samples
):
self
.
num_samples
=
num_samples
def
__getitem__
(
self
,
idx
):
image
=
np
.
random
.
random
([
IMAGE_SIZE
]).
astype
(
'float32'
)
label
=
np
.
random
.
randint
(
0
,
CLASS_NUM
-
1
,
(
1
,
)).
astype
(
'int64'
)
return
image
,
label
def
__len__
(
self
):
return
self
.
num_samples
class
LinearNet
(
nn
.
Layer
):
def
__init__
(
self
):
super
(
LinearNet
,
self
).
__init__
()
self
.
_linear
=
nn
.
Linear
(
IMAGE_SIZE
,
CLASS_NUM
)
@
paddle
.
jit
.
to_static
def
forward
(
self
,
x
):
return
self
.
_linear
(
x
)
def
train
(
layer
,
loader
,
loss_fn
,
opt
):
for
epoch_id
in
range
(
EPOCH_NUM
):
for
batch_id
,
(
image
,
label
)
in
enumerate
(
loader
()):
out
=
layer
(
image
)
loss
=
loss_fn
(
out
,
label
)
loss
.
backward
()
opt
.
step
()
opt
.
clear_grad
()
# 1. train & save model.
# create network
layer
=
LinearNet
()
loss_fn
=
nn
.
CrossEntropyLoss
()
adam
=
opt
.
Adam
(
learning_rate
=
0.001
,
parameters
=
layer
.
parameters
())
# create data loader
dataset
=
RandomDataset
(
BATCH_NUM
*
BATCH_SIZE
)
loader
=
paddle
.
io
.
DataLoader
(
dataset
,
batch_size
=
BATCH_SIZE
,
shuffle
=
True
,
drop_last
=
True
,
num_workers
=
2
)
# train
train
(
layer
,
loader
,
loss_fn
,
adam
)
# save
path
=
"linear/linear"
paddle
.
jit
.
save
(
layer
,
path
)
paddle/scripts/infrt_build.sh
浏览文件 @
fcf87582
...
...
@@ -114,6 +114,7 @@ function create_fake_models() {
python3
-m
pip
install
*
whl
cd
${
PADDLE_ROOT
}
/build
python3
${
PADDLE_ROOT
}
/tools/infrt/fake_models/multi_fc.py
python3
${
PADDLE_ROOT
}
/paddle/infrt/tests/model/linear.py
}
function
test_infrt
()
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录