Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
ec510bfd
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ec510bfd
编写于
3月 31, 2022
作者:
王
王明冬
提交者:
GitHub
3月 31, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Infrt] add result check for some infrt op. (#41167)
上级
d006c7ff
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
55 addition
and
65 deletion
+55
-65
paddle/infrt/api/infrt_api.cc
paddle/infrt/api/infrt_api.cc
+2
-2
paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.cc
paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.cc
+1
-1
paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h
paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h
+1
-1
paddle/infrt/dialect/phi/CMakeLists.txt
paddle/infrt/dialect/phi/CMakeLists.txt
+0
-4
paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc
paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc
+2
-2
paddle/infrt/dialect/phi/pass/phi_op_convert_pass.h
paddle/infrt/dialect/phi/pass/phi_op_convert_pass.h
+2
-2
paddle/infrt/dialect/phi/phi_ir_exec.cc
paddle/infrt/dialect/phi/phi_ir_exec.cc
+0
-49
paddle/infrt/host_context/mlir_exec.cc
paddle/infrt/host_context/mlir_exec.cc
+2
-2
paddle/infrt/tests/dialect/phi/kernels/resnet50_ops.mlir
paddle/infrt/tests/dialect/phi/kernels/resnet50_ops.mlir
+43
-0
paddle/infrt/tests/models/test_abs.cc
paddle/infrt/tests/models/test_abs.cc
+2
-2
未找到文件。
paddle/infrt/api/infrt_api.cc
浏览文件 @
ec510bfd
...
...
@@ -238,8 +238,8 @@ int InfRtPredictor::Init(const InfRtConfig& config) {
std
::
vector
<::
infrt
::
Place
>
valid_places
=
{{
::
infrt
::
TargetType
::
CPU
,
::
infrt
::
PrecisionType
::
FLOAT32
,
::
infrt
::
LayoutType
::
NCHW
}};
phi_pass_manager
.
addPass
(
::
infrt
::
c
reatePhiOpCvtPass
(
valid_places
));
phi_pass_manager
.
addPass
(
::
infrt
::
c
reateInfrtOpFusePass
());
phi_pass_manager
.
addPass
(
C
reatePhiOpCvtPass
(
valid_places
));
phi_pass_manager
.
addPass
(
C
reateInfrtOpFusePass
());
if
(
mlir
::
failed
(
pm
.
run
(
module_op
)))
{
std
::
cout
<<
"
\n
pass failed!
\n
"
<<
std
::
endl
;
return
4
;
...
...
paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.cc
浏览文件 @
ec510bfd
...
...
@@ -53,7 +53,7 @@ void InfrtOpFusePass::runOnFunction() {
}
// namespace
std
::
unique_ptr
<
mlir
::
Pass
>
infrt
::
c
reateInfrtOpFusePass
()
{
std
::
unique_ptr
<
mlir
::
Pass
>
infrt
::
C
reateInfrtOpFusePass
()
{
return
std
::
make_unique
<
InfrtOpFusePass
>
();
}
...
...
paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h
浏览文件 @
ec510bfd
...
...
@@ -19,6 +19,6 @@ namespace infrt {
/*
* infrtOpFusePass.
*/
std
::
unique_ptr
<
mlir
::
Pass
>
c
reateInfrtOpFusePass
();
std
::
unique_ptr
<
mlir
::
Pass
>
C
reateInfrtOpFusePass
();
}
// namespace infrt
paddle/infrt/dialect/phi/CMakeLists.txt
浏览文件 @
ec510bfd
...
...
@@ -5,10 +5,6 @@ endif()
add_subdirectory
(
ir
)
add_subdirectory
(
pass
)
add_executable
(
phi-ir-exec phi_ir_exec.cc
)
target_link_libraries
(
phi-ir-exec infrt
)
add_executable
(
phi-exec phi_exec.cc
)
target_link_libraries
(
phi-exec infrt
)
...
...
paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc
浏览文件 @
ec510bfd
...
...
@@ -261,11 +261,11 @@ void PhiOpConvertPass::getDependentDialects(
mlir
::
PassRegistration
<
PhiOpConvertPass
>
phi_op_convert
;
std
::
unique_ptr
<
mlir
::
Pass
>
infrt
::
c
reatePhiOpCvtPass
(
std
::
unique_ptr
<
mlir
::
Pass
>
infrt
::
C
reatePhiOpCvtPass
(
std
::
vector
<
Place
>
valid_places
)
{
return
std
::
make_unique
<
PhiOpConvertPass
>
(
valid_places
);
}
std
::
unique_ptr
<
mlir
::
Pass
>
infrt
::
c
reatePhiOpCvtPass
()
{
std
::
unique_ptr
<
mlir
::
Pass
>
infrt
::
C
reatePhiOpCvtPass
()
{
return
std
::
make_unique
<
PhiOpConvertPass
>
();
}
paddle/infrt/dialect/phi/pass/phi_op_convert_pass.h
浏览文件 @
ec510bfd
...
...
@@ -21,8 +21,8 @@ namespace infrt {
* phiOpCvtPass.
* Convert the general operators from pd Dialect to phi dialect.
*/
std
::
unique_ptr
<
mlir
::
Pass
>
c
reatePhiOpCvtPass
(
std
::
vector
<
Place
>
valid_places
);
std
::
unique_ptr
<
mlir
::
Pass
>
C
reatePhiOpCvtPass
(
std
::
vector
<
Place
>
valid_places
);
std
::
unique_ptr
<
mlir
::
Pass
>
c
reatePhiOpCvtPass
();
std
::
unique_ptr
<
mlir
::
Pass
>
C
reatePhiOpCvtPass
();
}
// namespace infrt
paddle/infrt/dialect/phi/phi_ir_exec.cc
已删除
100644 → 0
浏览文件 @
d006c7ff
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <llvm/Support/CommandLine.h>
#include <mlir/Pass/PassManager.h>
#include <iostream>
#include <string>
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h"
#include "paddle/infrt/dialect/mlir_loader.h"
#include "paddle/infrt/dialect/phi/pass/phi_op_convert_pass.h"
int
main
(
int
argc
,
char
**
argv
)
{
static
llvm
::
cl
::
opt
<
std
::
string
>
input_file
(
llvm
::
cl
::
Positional
,
llvm
::
cl
::
desc
(
"Specify input filename"
),
llvm
::
cl
::
init
(
"-"
));
llvm
::
cl
::
ParseCommandLineOptions
(
argc
,
argv
);
mlir
::
MLIRContext
*
context
=
infrt
::
Global
::
getMLIRContext
();
auto
module
=
infrt
::
dialect
::
LoadMlirFile
(
input_file
.
c_str
(),
context
);
context
->
loadAllAvailableDialects
();
module
->
dump
();
mlir
::
PassManager
pm
(
context
);
mlir
::
OpPassManager
&
phi_pass_manager
=
pm
.
nest
<
mlir
::
FuncOp
>
();
std
::
vector
<
infrt
::
Place
>
valid_places
=
{{
infrt
::
TargetType
::
CPU
,
infrt
::
PrecisionType
::
FLOAT32
,
infrt
::
LayoutType
::
NCHW
}};
phi_pass_manager
.
addPass
(
infrt
::
createPhiOpCvtPass
(
valid_places
));
phi_pass_manager
.
addPass
(
infrt
::
createInfrtOpFusePass
());
if
(
mlir
::
failed
(
pm
.
run
(
*
module
)))
{
std
::
cout
<<
"
\n
pass failed!
\n
"
<<
std
::
endl
;
return
4
;
}
module
->
dump
();
return
0
;
}
paddle/infrt/host_context/mlir_exec.cc
浏览文件 @
ec510bfd
...
...
@@ -98,8 +98,8 @@ int main(int argc, char** argv) {
std
::
vector
<
infrt
::
Place
>
valid_places
=
{{
infrt
::
TargetType
::
CPU
,
infrt
::
PrecisionType
::
FLOAT32
,
infrt
::
LayoutType
::
NCHW
}};
phi_pass_manager
.
addPass
(
infrt
::
c
reatePhiOpCvtPass
(
valid_places
));
phi_pass_manager
.
addPass
(
infrt
::
c
reateInfrtOpFusePass
());
phi_pass_manager
.
addPass
(
infrt
::
C
reatePhiOpCvtPass
(
valid_places
));
phi_pass_manager
.
addPass
(
infrt
::
C
reateInfrtOpFusePass
());
#endif
if
(
mlir
::
failed
(
pm
.
run
(
*
module
)))
{
...
...
paddle/infrt/tests/dialect/phi/kernels/resnet50_ops.mlir
0 → 100644
浏览文件 @
ec510bfd
// RUN: infrtexec -i %s | FileCheck %s
module {
func @main() {
%ctx = "phi_dt.create_context.cpu" (): () -> !phi.context<CPU>
%0 = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value = 2.0 : f32, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[1, 3, 6, 6]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%1 = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value = 2.0 : f32, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[1, 3, 3, 3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%2 = "pd.conv2d"(%0, %1) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [3 : i32, 3 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: dense_tensor: shape=shape[1, 1, 5, 5], value=[0,0,0,0,0,0,48,72,72,24,0,72,108,108,36,0,72,108,108,36,0,24,36,36,12]
phi_dt.print_tensor (%2 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%3 = "pd.relu"(%2) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// dense_tensor: shape=shape[1, 1, 5, 5], value=[0,0,0,0,0,0,48,72,72,24,0,72,108,108,36,0,72,108,108,36,0,24,36,36,12]
phi_dt.print_tensor (%3 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%4 = "pd.pool2d"(%2) {adaptive = false, ceil_mode = false, data_format = "NCHW", exclusive = true, global_pooling = false, ksize = [2 : i32, 2 : i32], padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], pooling_type = "avg", strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: dense_tensor: shape=shape[1, 1, 3, 3], value=[0,0,0,0,75,60,0,60,48]
phi_dt.print_tensor (%4 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%5 = "pd.flatten_contiguous_range"(%4) {start_axis = 1 : si32, stop_axis = 3 : si32} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: dense_tensor: shape=shape[1, 9], value=[0,0,0,0,75,60,0,60,48]
phi_dt.print_tensor (%5 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%6 = "pd.elementwise_add"(%5, %5) {axis = 1 : si32} : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: dense_tensor: shape=shape[1, 9], value=[0,0,0,0,150,120,0,120,96]
phi_dt.print_tensor (%6 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%7 = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value = 4.0 : f32, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[9, 3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%8 = "pd.matmul_v2"(%5, %7) {trans_x = false, trans_y = false} : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: dense_tensor: shape=shape[1, 3], value=[972,972,972]
phi_dt.print_tensor (%8 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%scale = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=1.0:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%bias = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=1.8:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%mean = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=2.0:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%var = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=0.0:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%Y, %MeanOut, %VarianceOut = "pd.batch_norm"(%1, %scale, %bias, %mean, %var) {data_layout = "NCHW", epsilon = 0.01 : f32, momentum = 0.5 : f32} : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>)
// CHECK: dense_tensor: shape=shape[1, 3, 3, 3], value=[1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8]
phi_dt.print_tensor (%Y : !infrt.dense_tensor<CPU, FP32, NCHW>)
infrt.return
}
}
paddle/infrt/tests/models/test_abs.cc
浏览文件 @
ec510bfd
...
...
@@ -86,8 +86,8 @@ TEST(ABS_MODEL, convert_and_execute) {
std
::
vector
<
infrt
::
Place
>
valid_places
=
{{
infrt
::
TargetType
::
CPU
,
infrt
::
PrecisionType
::
FLOAT32
,
infrt
::
LayoutType
::
NCHW
}};
phi_pass_manager
.
addPass
(
infrt
::
c
reatePhiOpCvtPass
(
valid_places
));
phi_pass_manager
.
addPass
(
infrt
::
c
reateInfrtOpFusePass
());
phi_pass_manager
.
addPass
(
infrt
::
C
reatePhiOpCvtPass
(
valid_places
));
phi_pass_manager
.
addPass
(
infrt
::
C
reateInfrtOpFusePass
());
if
(
mlir
::
failed
(
pm
.
run
(
module_
)))
{
std
::
cout
<<
"
\n
pass failed!
\n
"
<<
std
::
endl
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录