Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
1b747de7
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1b747de7
编写于
8月 18, 2021
作者:
P
Peihan
提交者:
GitHub
8月 18, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add paddle detection model in pr-ci-inference (#34986)
上级
1b71a718
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
571 addition
and
48 deletion
+571
-48
paddle/fluid/inference/api/analysis_config.cc
paddle/fluid/inference/api/analysis_config.cc
+1
-1
paddle/fluid/inference/tests/infer_ut/run.sh
paddle/fluid/inference/tests/infer_ut/run.sh
+75
-47
paddle/fluid/inference/tests/infer_ut/test_ppyolo_mbv3.cc
paddle/fluid/inference/tests/infer_ut/test_ppyolo_mbv3.cc
+156
-0
paddle/fluid/inference/tests/infer_ut/test_ppyolov2_r50vd.cc
paddle/fluid/inference/tests/infer_ut/test_ppyolov2_r50vd.cc
+155
-0
paddle/fluid/inference/tests/infer_ut/test_resnet50.cc
paddle/fluid/inference/tests/infer_ut/test_resnet50.cc
+29
-0
paddle/fluid/inference/tests/infer_ut/test_yolov3.cc
paddle/fluid/inference/tests/infer_ut/test_yolov3.cc
+155
-0
未找到文件。
paddle/fluid/inference/api/analysis_config.cc
浏览文件 @
1b747de7
...
...
@@ -740,7 +740,7 @@ std::string AnalysisConfig::Summary() {
// cpu info
os
.
InsertRow
(
{
"cpu_math_thread"
,
std
::
to_string
(
cpu_math_library_num_threads_
)});
os
.
InsertRow
({
"enable_mk
dl
nn"
,
use_mkldnn_
?
"true"
:
"false"
});
os
.
InsertRow
({
"enable_mk
ld
nn"
,
use_mkldnn_
?
"true"
:
"false"
});
os
.
InsertRow
(
{
"mkldnn_cache_capacity"
,
std
::
to_string
(
mkldnn_cache_capacity_
)});
os
.
InsetDivider
();
...
...
paddle/fluid/inference/tests/infer_ut/run.sh
浏览文件 @
1b747de7
...
...
@@ -24,8 +24,14 @@ MSVC_STATIC_CRT=$6
inference_install_dir
=
${
PADDLE_ROOT
}
/build/paddle_inference_install_dir
EXIT_CODE
=
0
# init default exit code
export
RED
=
'\033[0;31m'
# red color
export
NC
=
'\033[0m'
# no color
export
YELLOW
=
'\033[33m'
# yellow color
cd
`
dirname
$0
`
current_dir
=
`
pwd
`
build_dir
=
${
current_dir
}
/build
log_dir
=
${
current_dir
}
/log
if
[
$2
==
ON
]
;
then
# You can export yourself if move the install path
MKL_LIB
=
${
inference_install_dir
}
/third_party/install/mklml/lib
...
...
@@ -83,24 +89,42 @@ for model_name in $nlp_download_list; do
download
$url_prefix
$model_name
done
det_download_list
=
'yolov3 ppyolo_mbv3 ppyolov2_r50vd'
for
model_name
in
$det_download_list
;
do
url_prefix
=
"https://paddle-qa.bj.bcebos.com/inference_model/2.1.1/detection"
download
$url_prefix
$model_name
done
function
compile_test
()
{
mkdir
-p
${
build_dir
}
cd
${
build_dir
}
TEST_NAME
=
$1
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
${
TEST_NAME
}
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB
=
OFF
\
-DUSE_TENSORRT
=
$USE_TENSORRT
\
-DTENSORRT_ROOT
=
$TENSORRT_ROOT_DIR
\
-DWITH_GTEST
=
ON
make
-j
$(
nproc
)
cd
-
}
# compile and run test
cd
$current_dir
mkdir
-p
build
cd
build
mkdir
-p
${
build_dir
}
mkdir
-p
${
log_dir
}
cd
${
build_dir
}
rm
-rf
*
# ---------tensorrt
resnet50
on linux---------
# ---------tensorrt
gpu tests
on linux---------
if
[
$USE_TENSORRT
==
ON
-a
$TEST_GPU_CPU
==
ON
]
;
then
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
test_resnet50
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB
=
OFF
\
-DUSE_TENSORRT
=
$USE_TENSORRT
\
-DTENSORRT_ROOT
=
$TENSORRT_ROOT_DIR
\
-DWITH_GTEST
=
ON
make
-j
$(
nproc
)
printf
"
${
YELLOW
}
start test_resnet50
${
NC
}
\n
"
;
compile_test
"test_resnet50"
./test_resnet50
\
--modeldir
=
$DATA_DIR
/resnet50/resnet50
\
--gtest_output
=
xml:test_resnet50.xml
...
...
@@ -108,18 +132,9 @@ if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
echo
"test_resnet50 runs failed"
>>
${
current_dir
}
/build/test_summary.txt
EXIT_CODE
=
1
fi
fi
# ---------tensorrt det_mv3_db on linux---------
if
[
$USE_TENSORRT
==
ON
-a
$TEST_GPU_CPU
==
ON
]
;
then
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
test_det_mv3_db
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB
=
OFF
\
-DUSE_TENSORRT
=
$USE_TENSORRT
\
-DTENSORRT_ROOT
=
$TENSORRT_ROOT_DIR
\
-DWITH_GTEST
=
ON
printf
"
${
YELLOW
}
start test_det_mv3_db
${
NC
}
\n
"
;
compile_test
"test_det_mv3_db"
make
-j
$(
nproc
)
./test_det_mv3_db
\
--modeldir
=
$DATA_DIR
/ocr_det_mv3_db/ocr_det_mv3_db
\
...
...
@@ -128,19 +143,9 @@ if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
echo
"test_det_mv3_db runs failed"
>>
${
current_dir
}
/build/test_summary.txt
EXIT_CODE
=
1
fi
fi
# ---------tensorrt LeViT on linux---------
if
[
$USE_TENSORRT
==
ON
-a
$TEST_GPU_CPU
==
ON
]
;
then
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
test_LeViT
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB
=
OFF
\
-DUSE_TENSORRT
=
$USE_TENSORRT
\
-DTENSORRT_ROOT
=
$TENSORRT_ROOT_DIR
\
-DWITH_GTEST
=
ON
make
-j
$(
nproc
)
printf
"
${
YELLOW
}
start test_LeViT
${
NC
}
\n
"
;
compile_test
"test_LeViT"
./test_LeViT
\
--modeldir
=
$DATA_DIR
/LeViT/LeViT
\
--gtest_output
=
xml:test_LeViT.xml
...
...
@@ -148,19 +153,9 @@ if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
echo
"test_LeViT runs failed"
>>
${
current_dir
}
/build/test_summary.txt
EXIT_CODE
=
1
fi
fi
# ---------gpu ernie_text_cls on linux---------
if
[
$USE_TENSORRT
==
ON
-a
$TEST_GPU_CPU
==
ON
]
;
then
cmake ..
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
test_ernie_text_cls
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB
=
OFF
\
-DUSE_TENSORRT
=
$USE_TENSORRT
\
-DTENSORRT_ROOT
=
$TENSORRT_ROOT_DIR
\
-DWITH_GTEST
=
ON
make
-j
$(
nproc
)
printf
"
${
YELLOW
}
start test_ernie_text_cls
${
NC
}
\n
"
;
compile_test
"test_ernie_text_cls"
./test_ernie_text_cls
\
--modeldir
=
$DATA_DIR
/ernie_text_cls/ernie_text_cls
\
--gtest_output
=
xml:test_ernie_text_cls.xml
...
...
@@ -168,8 +163,41 @@ if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
echo
"test_ernie_text_cls runs failed"
>>
${
current_dir
}
/build/test_summary.txt
EXIT_CODE
=
1
fi
printf
"
${
YELLOW
}
start test_yolov3
${
NC
}
\n
"
;
compile_test
"test_yolov3"
./test_yolov3
\
--modeldir
=
$DATA_DIR
/yolov3/yolov3
\
--gtest_output
=
xml:test_yolov3.xml
if
[
$?
-ne
0
]
;
then
echo
"test_yolov3 runs failed"
>>
${
current_dir
}
/build/test_summary.txt
EXIT_CODE
=
1
fi
printf
"
${
YELLOW
}
start test_ppyolo_mbv3
${
NC
}
\n
"
;
compile_test
"test_ppyolo_mbv3"
./test_ppyolo_mbv3
\
--modeldir
=
$DATA_DIR
/ppyolo_mbv3/ppyolo_mbv3
\
--gtest_output
=
xml:test_ppyolo_mbv3.xml
if
[
$?
-ne
0
]
;
then
echo
"test_ppyolo_mbv3 runs failed"
>>
${
current_dir
}
/build/test_summary.txt
EXIT_CODE
=
1
fi
printf
"
${
YELLOW
}
start test_ppyolov2_r50vd
${
NC
}
\n
"
;
compile_test
"test_ppyolov2_r50vd"
./test_ppyolov2_r50vd
\
--modeldir
=
$DATA_DIR
/ppyolov2_r50vd/ppyolov2_r50vd
\
--gtest_output
=
xml:test_ppyolov2_r50vd.xml
if
[
$?
-ne
0
]
;
then
echo
"test_ppyolov2_r50vd runs failed"
>>
${
current_dir
}
/build/test_summary.txt
EXIT_CODE
=
1
fi
cp
./
*
.xml
${
log_dir
}
;
fi
if
[[
-f
${
current_dir
}
/build/test_summary.txt
]]
;
then
echo
"=====================test summary======================"
cat
${
current_dir
}
/build/test_summary.txt
...
...
paddle/fluid/inference/tests/infer_ut/test_ppyolo_mbv3.cc
0 → 100644
浏览文件 @
1b747de7
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test_suite.h" // NOLINT
DEFINE_string
(
modeldir
,
""
,
"Directory of the inference model."
);
namespace
paddle_infer
{
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
PrepareInput
(
int
batch_size
)
{
// init input data
int
channel
=
3
;
int
width
=
320
;
int
height
=
320
;
paddle
::
test
::
Record
image
,
im_shape
,
scale_factor
;
int
input_num
=
batch_size
*
channel
*
width
*
height
;
int
shape_num
=
batch_size
*
2
;
std
::
vector
<
float
>
image_data
(
input_num
,
1
);
for
(
int
i
=
1
;
i
<
input_num
+
1
;
++
i
)
{
image_data
[
i
]
=
i
%
10
*
0.5
;
}
std
::
vector
<
float
>
im_shape_data
(
shape_num
,
1
);
std
::
vector
<
float
>
scale_factor_data
(
shape_num
,
1
);
image
.
data
=
std
::
vector
<
float
>
(
image_data
.
begin
(),
image_data
.
end
());
image
.
shape
=
std
::
vector
<
int
>
{
batch_size
,
channel
,
width
,
height
};
image
.
type
=
paddle
::
PaddleDType
::
FLOAT32
;
im_shape
.
data
=
std
::
vector
<
float
>
(
im_shape_data
.
begin
(),
im_shape_data
.
end
());
im_shape
.
shape
=
std
::
vector
<
int
>
{
batch_size
,
2
};
im_shape
.
type
=
paddle
::
PaddleDType
::
FLOAT32
;
scale_factor
.
data
=
std
::
vector
<
float
>
(
scale_factor_data
.
begin
(),
scale_factor_data
.
end
());
scale_factor
.
shape
=
std
::
vector
<
int
>
{
batch_size
,
2
};
scale_factor
.
type
=
paddle
::
PaddleDType
::
FLOAT32
;
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
input_data_map
;
input_data_map
.
insert
({
"image"
,
image
});
input_data_map
.
insert
({
"im_shape"
,
im_shape
});
input_data_map
.
insert
({
"scale_factor"
,
scale_factor
});
return
input_data_map
;
}
TEST
(
test_ppyolo_mbv3
,
multi_thread4_trt_fp32_bz2
)
{
int
thread_num
=
4
;
// init input data
auto
input_data_map
=
PrepareInput
(
2
);
// init output data
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
infer_output_data
,
truth_output_data
;
// prepare groudtruth config
paddle_infer
::
Config
config
,
config_no_ir
;
config_no_ir
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config_no_ir
.
EnableUseGpu
(
100
,
0
);
config_no_ir
.
SwitchIrOptim
(
false
);
// prepare inference config
config
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config
.
EnableUseGpu
(
100
,
0
);
config
.
EnableTensorRtEngine
(
1
<<
20
,
2
,
3
,
paddle_infer
::
PrecisionType
::
kFloat32
,
false
,
false
);
LOG
(
INFO
)
<<
config
.
Summary
();
// get groudtruth by disbale ir
paddle_infer
::
services
::
PredictorPool
pred_pool_no_ir
(
config_no_ir
,
1
);
SingleThreadPrediction
(
pred_pool_no_ir
.
Retrive
(
0
),
&
input_data_map
,
&
truth_output_data
,
1
);
// get infer results from multi threads
std
::
vector
<
std
::
thread
>
threads
;
services
::
PredictorPool
pred_pool
(
config
,
thread_num
);
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
threads
.
emplace_back
(
paddle
::
test
::
SingleThreadPrediction
,
pred_pool
.
Retrive
(
i
),
&
input_data_map
,
&
infer_output_data
,
2
);
}
// thread join & check outputs
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
LOG
(
INFO
)
<<
"join tid : "
<<
i
;
threads
[
i
].
join
();
CompareRecord
(
&
truth_output_data
,
&
infer_output_data
,
1e-2
);
// TODO(OliverLPH): precision set to 1e-2 since input is fake, change to
// real input later
}
std
::
cout
<<
"finish multi-thread test"
<<
std
::
endl
;
}
TEST
(
DISABLED_test_ppyolo_mbv3
,
multi_thread4_mkl_bz2
)
{
// TODO(OliverLPH): mkldnn multi thread will fail
int
thread_num
=
4
;
// init input data
auto
input_data_map
=
PrepareInput
(
2
);
// init output data
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
infer_output_data
,
truth_output_data
;
// prepare groudtruth config
paddle_infer
::
Config
config
,
config_no_ir
;
config_no_ir
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config_no_ir
.
DisableGpu
();
config_no_ir
.
SwitchIrOptim
(
false
);
// prepare inference config
config
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config
.
DisableGpu
();
config
.
EnableMKLDNN
();
config
.
SetMkldnnCacheCapacity
(
10
);
config
.
SetCpuMathLibraryNumThreads
(
10
);
LOG
(
INFO
)
<<
config
.
Summary
();
// get groudtruth by disbale ir
paddle_infer
::
services
::
PredictorPool
pred_pool_no_ir
(
config_no_ir
,
1
);
SingleThreadPrediction
(
pred_pool_no_ir
.
Retrive
(
0
),
&
input_data_map
,
&
truth_output_data
,
1
);
// get infer results from multi threads
std
::
vector
<
std
::
thread
>
threads
;
services
::
PredictorPool
pred_pool
(
config
,
thread_num
);
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
threads
.
emplace_back
(
paddle
::
test
::
SingleThreadPrediction
,
pred_pool
.
Retrive
(
i
),
&
input_data_map
,
&
infer_output_data
,
2
);
}
// thread join & check outputs
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
LOG
(
INFO
)
<<
"join tid : "
<<
i
;
threads
[
i
].
join
();
CompareRecord
(
&
truth_output_data
,
&
infer_output_data
,
1e-4
);
}
std
::
cout
<<
"finish multi-thread test"
<<
std
::
endl
;
}
}
// namespace paddle_infer
int
main
(
int
argc
,
char
**
argv
)
{
::
testing
::
InitGoogleTest
(
&
argc
,
argv
);
::
google
::
ParseCommandLineFlags
(
&
argc
,
&
argv
,
true
);
return
RUN_ALL_TESTS
();
}
paddle/fluid/inference/tests/infer_ut/test_ppyolov2_r50vd.cc
0 → 100644
浏览文件 @
1b747de7
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test_suite.h" // NOLINT
DEFINE_string
(
modeldir
,
""
,
"Directory of the inference model."
);
namespace
paddle_infer
{
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
PrepareInput
(
int
batch_size
)
{
// init input data
int
channel
=
3
;
int
width
=
640
;
int
height
=
640
;
paddle
::
test
::
Record
image
,
im_shape
,
scale_factor
;
int
input_num
=
batch_size
*
channel
*
width
*
height
;
int
shape_num
=
batch_size
*
2
;
std
::
vector
<
float
>
image_data
(
input_num
,
1
);
for
(
int
i
=
1
;
i
<
input_num
+
1
;
++
i
)
{
image_data
[
i
]
=
i
%
10
*
0.5
;
}
std
::
vector
<
float
>
im_shape_data
(
shape_num
,
1
);
std
::
vector
<
float
>
scale_factor_data
(
shape_num
,
1
);
image
.
data
=
std
::
vector
<
float
>
(
image_data
.
begin
(),
image_data
.
end
());
image
.
shape
=
std
::
vector
<
int
>
{
batch_size
,
channel
,
width
,
height
};
image
.
type
=
paddle
::
PaddleDType
::
FLOAT32
;
im_shape
.
data
=
std
::
vector
<
float
>
(
im_shape_data
.
begin
(),
im_shape_data
.
end
());
im_shape
.
shape
=
std
::
vector
<
int
>
{
batch_size
,
2
};
im_shape
.
type
=
paddle
::
PaddleDType
::
FLOAT32
;
scale_factor
.
data
=
std
::
vector
<
float
>
(
scale_factor_data
.
begin
(),
scale_factor_data
.
end
());
scale_factor
.
shape
=
std
::
vector
<
int
>
{
batch_size
,
2
};
scale_factor
.
type
=
paddle
::
PaddleDType
::
FLOAT32
;
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
input_data_map
;
input_data_map
.
insert
({
"image"
,
image
});
input_data_map
.
insert
({
"im_shape"
,
im_shape
});
input_data_map
.
insert
({
"scale_factor"
,
scale_factor
});
return
input_data_map
;
}
TEST
(
test_ppyolov2_r50vd
,
multi_thread2_trt_fp32_bz1
)
{
int
thread_num
=
2
;
// thread > 2 may OOM
// init input data
auto
input_data_map
=
PrepareInput
(
1
);
// init output data
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
infer_output_data
,
truth_output_data
;
// prepare groudtruth config
paddle_infer
::
Config
config
,
config_no_ir
;
config_no_ir
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config_no_ir
.
EnableUseGpu
(
100
,
0
);
config_no_ir
.
SwitchIrOptim
(
false
);
// prepare inference config
config
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config
.
EnableUseGpu
(
100
,
0
);
config
.
EnableTensorRtEngine
(
1
<<
20
,
2
,
10
,
paddle_infer
::
PrecisionType
::
kFloat32
,
false
,
false
);
LOG
(
INFO
)
<<
config
.
Summary
();
// get groudtruth by disbale ir
paddle_infer
::
services
::
PredictorPool
pred_pool_no_ir
(
config_no_ir
,
1
);
SingleThreadPrediction
(
pred_pool_no_ir
.
Retrive
(
0
),
&
input_data_map
,
&
truth_output_data
,
1
);
// get infer results from multi threads
std
::
vector
<
std
::
thread
>
threads
;
services
::
PredictorPool
pred_pool
(
config
,
thread_num
);
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
threads
.
emplace_back
(
paddle
::
test
::
SingleThreadPrediction
,
pred_pool
.
Retrive
(
i
),
&
input_data_map
,
&
infer_output_data
,
2
);
}
// thread join & check outputs
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
LOG
(
INFO
)
<<
"join tid : "
<<
i
;
threads
[
i
].
join
();
// CompareRecord(&truth_output_data, &infer_output_data, 1e-2);
// TODO(OliverLPH): disable comparison since precsion is low
}
std
::
cout
<<
"finish multi-thread test"
<<
std
::
endl
;
}
TEST
(
test_ppyolov2_r50vd
,
multi_thread2_mkl_bz2
)
{
int
thread_num
=
2
;
// init input data
auto
input_data_map
=
PrepareInput
(
2
);
// init output data
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
infer_output_data
,
truth_output_data
;
// prepare groudtruth config
paddle_infer
::
Config
config
,
config_no_ir
;
config_no_ir
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config_no_ir
.
DisableGpu
();
config_no_ir
.
SwitchIrOptim
(
false
);
// prepare inference config
config
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config
.
DisableGpu
();
config
.
EnableMKLDNN
();
config
.
SetMkldnnCacheCapacity
(
10
);
config
.
SetCpuMathLibraryNumThreads
(
10
);
LOG
(
INFO
)
<<
config
.
Summary
();
// get groudtruth by disbale ir
paddle_infer
::
services
::
PredictorPool
pred_pool_no_ir
(
config_no_ir
,
1
);
SingleThreadPrediction
(
pred_pool_no_ir
.
Retrive
(
0
),
&
input_data_map
,
&
truth_output_data
,
1
);
// get infer results from multi threads
std
::
vector
<
std
::
thread
>
threads
;
services
::
PredictorPool
pred_pool
(
config
,
thread_num
);
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
threads
.
emplace_back
(
paddle
::
test
::
SingleThreadPrediction
,
pred_pool
.
Retrive
(
i
),
&
input_data_map
,
&
infer_output_data
,
2
);
}
// thread join & check outputs
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
LOG
(
INFO
)
<<
"join tid : "
<<
i
;
threads
[
i
].
join
();
// CompareRecord(&truth_output_data, &infer_output_data, 1e-4);
// TODO(OliverLPH): disable comparison since precsion is low
}
std
::
cout
<<
"finish multi-thread test"
<<
std
::
endl
;
}
}
// namespace paddle_infer
int
main
(
int
argc
,
char
**
argv
)
{
::
testing
::
InitGoogleTest
(
&
argc
,
argv
);
::
google
::
ParseCommandLineFlags
(
&
argc
,
&
argv
,
true
);
return
RUN_ALL_TESTS
();
}
paddle/fluid/inference/tests/infer_ut/test_resnet50.cc
浏览文件 @
1b747de7
...
...
@@ -170,6 +170,35 @@ TEST(test_resnet50, multi_thread4_trt_fp32_bz2) {
std
::
cout
<<
"finish multi-thread test"
<<
std
::
endl
;
}
TEST
(
test_resnet50
,
trt_int8_bz2
)
{
// init input data
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
my_input_data_map
;
my_input_data_map
[
"inputs"
]
=
PrepareInput
(
2
);
// init output data
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
infer_output_data
,
truth_output_data
;
// prepare inference config
paddle_infer
::
Config
config
;
config
.
SetModel
(
FLAGS_modeldir
+
"/inference.pdmodel"
,
FLAGS_modeldir
+
"/inference.pdiparams"
);
config
.
EnableUseGpu
(
100
,
0
);
config
.
EnableTensorRtEngine
(
1
<<
20
,
2
,
3
,
paddle_infer
::
PrecisionType
::
kInt8
,
true
,
true
);
// get first time prediction int8 results
paddle_infer
::
services
::
PredictorPool
pred_pool
(
config
,
1
);
SingleThreadPrediction
(
pred_pool
.
Retrive
(
0
),
&
my_input_data_map
,
&
truth_output_data
,
1
);
// get repeat 5 times prediction int8 results
SingleThreadPrediction
(
pred_pool
.
Retrive
(
0
),
&
my_input_data_map
,
&
infer_output_data
,
5
);
// check outputs
CompareRecord
(
&
truth_output_data
,
&
infer_output_data
);
std
::
cout
<<
"finish test"
<<
std
::
endl
;
}
}
// namespace paddle_infer
int
main
(
int
argc
,
char
**
argv
)
{
...
...
paddle/fluid/inference/tests/infer_ut/test_yolov3.cc
0 → 100644
浏览文件 @
1b747de7
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test_suite.h" // NOLINT
DEFINE_string
(
modeldir
,
""
,
"Directory of the inference model."
);
namespace
paddle_infer
{
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
PrepareInput
(
int
batch_size
)
{
// init input data
int
channel
=
3
;
int
width
=
608
;
int
height
=
608
;
paddle
::
test
::
Record
image
,
im_shape
,
scale_factor
;
int
input_num
=
batch_size
*
channel
*
width
*
height
;
int
shape_num
=
batch_size
*
2
;
std
::
vector
<
float
>
image_data
(
input_num
,
1
);
for
(
int
i
=
1
;
i
<
input_num
+
1
;
++
i
)
{
image_data
[
i
]
=
i
%
10
*
0.5
;
}
std
::
vector
<
float
>
im_shape_data
(
shape_num
,
1
);
std
::
vector
<
float
>
scale_factor_data
(
shape_num
,
1
);
image
.
data
=
std
::
vector
<
float
>
(
image_data
.
begin
(),
image_data
.
end
());
image
.
shape
=
std
::
vector
<
int
>
{
batch_size
,
channel
,
width
,
height
};
image
.
type
=
paddle
::
PaddleDType
::
FLOAT32
;
im_shape
.
data
=
std
::
vector
<
float
>
(
im_shape_data
.
begin
(),
im_shape_data
.
end
());
im_shape
.
shape
=
std
::
vector
<
int
>
{
batch_size
,
2
};
im_shape
.
type
=
paddle
::
PaddleDType
::
FLOAT32
;
scale_factor
.
data
=
std
::
vector
<
float
>
(
scale_factor_data
.
begin
(),
scale_factor_data
.
end
());
scale_factor
.
shape
=
std
::
vector
<
int
>
{
batch_size
,
2
};
scale_factor
.
type
=
paddle
::
PaddleDType
::
FLOAT32
;
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
input_data_map
;
input_data_map
.
insert
({
"image"
,
image
});
input_data_map
.
insert
({
"im_shape"
,
im_shape
});
input_data_map
.
insert
({
"scale_factor"
,
scale_factor
});
return
input_data_map
;
}
TEST
(
test_yolov3
,
multi_thread3_trt_fp32_bz2
)
{
int
thread_num
=
3
;
// init input data
auto
input_data_map
=
PrepareInput
(
2
);
// init output data
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
infer_output_data
,
truth_output_data
;
// prepare groudtruth config
paddle_infer
::
Config
config
,
config_no_ir
;
config_no_ir
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config_no_ir
.
EnableUseGpu
(
100
,
0
);
config_no_ir
.
SwitchIrOptim
(
false
);
// prepare inference config
config
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config
.
EnableUseGpu
(
100
,
0
);
config
.
EnableTensorRtEngine
(
1
<<
20
,
2
,
3
,
paddle_infer
::
PrecisionType
::
kFloat32
,
false
,
false
);
LOG
(
INFO
)
<<
config
.
Summary
();
// get groudtruth by disbale ir
paddle_infer
::
services
::
PredictorPool
pred_pool_no_ir
(
config_no_ir
,
1
);
SingleThreadPrediction
(
pred_pool_no_ir
.
Retrive
(
0
),
&
input_data_map
,
&
truth_output_data
,
1
);
// get infer results from multi threads
std
::
vector
<
std
::
thread
>
threads
;
services
::
PredictorPool
pred_pool
(
config
,
thread_num
);
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
threads
.
emplace_back
(
paddle
::
test
::
SingleThreadPrediction
,
pred_pool
.
Retrive
(
i
),
&
input_data_map
,
&
infer_output_data
,
2
);
}
// thread join & check outputs
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
LOG
(
INFO
)
<<
"join tid : "
<<
i
;
threads
[
i
].
join
();
CompareRecord
(
&
truth_output_data
,
&
infer_output_data
,
1e-2
);
// TODO(OliverLPH): precision set to 1e-2 since input is fake, change to
// real input later
}
std
::
cout
<<
"finish multi-thread test"
<<
std
::
endl
;
}
TEST
(
test_yolov3
,
multi_thread4_mkl_bz2
)
{
int
thread_num
=
4
;
// init input data
auto
input_data_map
=
PrepareInput
(
2
);
// init output data
std
::
map
<
std
::
string
,
paddle
::
test
::
Record
>
infer_output_data
,
truth_output_data
;
// prepare groudtruth config
paddle_infer
::
Config
config
,
config_no_ir
;
config_no_ir
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config_no_ir
.
DisableGpu
();
config_no_ir
.
SwitchIrOptim
(
false
);
// prepare inference config
config
.
SetModel
(
FLAGS_modeldir
+
"/model.pdmodel"
,
FLAGS_modeldir
+
"/model.pdiparams"
);
config
.
DisableGpu
();
config
.
EnableMKLDNN
();
config
.
SetMkldnnCacheCapacity
(
10
);
config
.
SetCpuMathLibraryNumThreads
(
10
);
LOG
(
INFO
)
<<
config
.
Summary
();
// get groudtruth by disbale ir
paddle_infer
::
services
::
PredictorPool
pred_pool_no_ir
(
config_no_ir
,
1
);
SingleThreadPrediction
(
pred_pool_no_ir
.
Retrive
(
0
),
&
input_data_map
,
&
truth_output_data
,
1
);
// get infer results from multi threads
std
::
vector
<
std
::
thread
>
threads
;
services
::
PredictorPool
pred_pool
(
config
,
thread_num
);
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
threads
.
emplace_back
(
paddle
::
test
::
SingleThreadPrediction
,
pred_pool
.
Retrive
(
i
),
&
input_data_map
,
&
infer_output_data
,
2
);
}
// thread join & check outputs
for
(
int
i
=
0
;
i
<
thread_num
;
++
i
)
{
LOG
(
INFO
)
<<
"join tid : "
<<
i
;
threads
[
i
].
join
();
CompareRecord
(
&
truth_output_data
,
&
infer_output_data
,
1e-4
);
}
std
::
cout
<<
"finish multi-thread test"
<<
std
::
endl
;
}
}
// namespace paddle_infer
int
main
(
int
argc
,
char
**
argv
)
{
::
testing
::
InitGoogleTest
(
&
argc
,
argv
);
::
google
::
ParseCommandLineFlags
(
&
argc
,
&
argv
,
true
);
return
RUN_ALL_TESTS
();
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录