Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
7c5e6d2c
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
7c5e6d2c
编写于
7月 16, 2018
作者:
T
Tao Luo
提交者:
GitHub
7月 16, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #12160 from luotao1/vis_demo
add independent vis_demo for inference
上级
0b9abcbe
6313bdc3
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
104 addition
and
129 deletion
+104
-129
paddle/contrib/inference/CMakeLists.txt
paddle/contrib/inference/CMakeLists.txt
+0
-4
paddle/contrib/inference/demo/CMakeLists.txt
paddle/contrib/inference/demo/CMakeLists.txt
+0
-59
paddle/contrib/inference/demo/README.md
paddle/contrib/inference/demo/README.md
+0
-36
paddle/contrib/inference/demo_ci/.gitignore
paddle/contrib/inference/demo_ci/.gitignore
+1
-0
paddle/contrib/inference/demo_ci/README.md
paddle/contrib/inference/demo_ci/README.md
+26
-0
paddle/contrib/inference/demo_ci/run.sh
paddle/contrib/inference/demo_ci/run.sh
+42
-1
paddle/contrib/inference/demo_ci/utils.h
paddle/contrib/inference/demo_ci/utils.h
+1
-1
paddle/contrib/inference/demo_ci/vis_demo.cc
paddle/contrib/inference/demo_ci/vis_demo.cc
+34
-28
未找到文件。
paddle/contrib/inference/CMakeLists.txt
浏览文件 @
7c5e6d2c
...
@@ -104,7 +104,3 @@ if (WITH_ANAKIN) # only needed in CI
...
@@ -104,7 +104,3 @@ if (WITH_ANAKIN) # only needed in CI
target_compile_options
(
inference_anakin_test BEFORE PUBLIC
${
ANAKIN_COMPILE_EXTRA_FLAGS
}
)
target_compile_options
(
inference_anakin_test BEFORE PUBLIC
${
ANAKIN_COMPILE_EXTRA_FLAGS
}
)
endif
(
WITH_TESTING
)
endif
(
WITH_TESTING
)
endif
()
endif
()
if
(
WITH_TESTING
)
add_subdirectory
(
demo
)
endif
()
paddle/contrib/inference/demo/CMakeLists.txt
已删除
100644 → 0
浏览文件 @
0b9abcbe
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
option
(
WITH_INFERENCE_DEMO
"Compile with Inference demo"
OFF
)
if
(
NOT WITH_INFERENCE_DEMO
)
return
()
endif
()
set
(
DEMO_INSTALL_DIR
"
${
PADDLE_BINARY_DIR
}
/inference_demo"
)
set
(
URL_ROOT http://paddlemodels.bj.bcebos.com/inference-vis-demos%2F
)
function
(
inference_download_test_demo TARGET
)
if
(
NOT WITH_TESTING
)
return
()
endif
()
set
(
options
""
)
set
(
oneValueArgs URL
)
set
(
multiValueArgs SRCS
)
cmake_parse_arguments
(
tests
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
set
(
test_dir
"
${
DEMO_INSTALL_DIR
}
/
${
TARGET
}
"
)
message
(
STATUS
"inference demo
${
test_dir
}
"
)
if
(
NOT EXISTS
"
${
test_dir
}
"
)
message
(
STATUS
"Download
${
TARGET
}
model from
${
tests_URL
}
"
)
execute_process
(
COMMAND bash -c
"mkdir -p
${
test_dir
}
"
)
execute_process
(
COMMAND bash -c
"cd
${
test_dir
}
; wget -q
${
tests_URL
}
"
)
execute_process
(
COMMAND bash -c
"cd
${
test_dir
}
; tar xzf *.tar.gz"
)
endif
()
cc_test
(
${
TARGET
}
SRCS
"
${
tests_SRCS
}
"
DEPS paddle_inference_api paddle_fluid
ARGS --data=
${
test_dir
}
/data.txt
--modeldir=
${
test_dir
}
/model
--refer=
${
test_dir
}
/result.txt
)
endfunction
()
# disable mobilenet test
#inference_download_test_demo(mobilenet_inference_demo
# SRCS vis_demo.cc
# URL ${URL_ROOT}mobilenet.tar.gz)
inference_download_test_demo
(
se_resnext50_inference_demo
SRCS vis_demo.cc
URL
${
URL_ROOT
}
se_resnext50.tar.gz
)
inference_download_test_demo
(
ocr_inference_demo
SRCS vis_demo.cc
URL
${
URL_ROOT
}
ocr.tar.gz
)
paddle/contrib/inference/demo/README.md
已删除
100644 → 0
浏览文件 @
0b9abcbe
# Infernce Demos
Input data format:
-
Each line contains a single record
-
Each record's format is
```
<space splitted floats as data>\t<space splitted ints as shape>
```
Follow the C++ codes in
`vis_demo.cc`
.
## MobileNet
To execute the demo, simply run
```
sh
./mobilenet_inference_demo
--modeldir
<model>
--data
<datafile>
```
## SE-ResNeXt-50
To execute the demo, simply run
```
sh
./se_resnext50_inference_demo
--modeldir
<model>
--data
<datafile>
```
## OCR
To execute the demo, simply run
```
sh
./ocr_inference_demo
--modeldir
<model>
--data
<datafile>
```
paddle/contrib/inference/demo_ci/.gitignore
0 → 100644
浏览文件 @
7c5e6d2c
data
paddle/contrib/inference/demo_ci/README.md
0 → 100644
浏览文件 @
7c5e6d2c
# Inference Demos
There are several demos:
-
simple_on_word2vec:
-
Follow the C++ codes is in
`simple_on_word2vec.cc`
.
-
It is suitable for word2vec model.
-
vis_demo:
-
Follow the C++ codes is in
`vis_demo.cc`
.
-
It is suitable for mobilenet, se_resnext50 and ocr three models.
-
Input data format:
-
Each line contains a single record
-
Each record's format is
```
<space splitted floats as data>\t<space splitted ints as shape>
```
To build and execute the demos, simply run
```
./run.sh $PADDLE_ROOT $TURN_ON_MKL $TEST_GPU_CPU
```
-
It will build and execute the demos in both static and shared library.
-
`$PADDLE_ROOT`
: paddle library path
-
`$TURN_ON_MKL`
: use MKL or Openblas
-
`$TEST_GPU_CPU`
: test both GPU/CPU mode or only CPU mode
-
NOTE: for simple_on_word2vec, must run
`ctest -R test_word2vec -R`
to obtain word2vec model at first.
paddle/contrib/inference/demo_ci/run.sh
浏览文件 @
7c5e6d2c
...
@@ -13,10 +13,30 @@ else
...
@@ -13,10 +13,30 @@ else
use_gpu_list
=
'false'
use_gpu_list
=
'false'
fi
fi
# download vis_demo data
function
download
()
{
dir_name
=
$1
mkdir
-p
$dir_name
cd
$dir_name
wget
-q
${
URL_ROOT
}
$dir_name
.tar.gz
tar
xzf
*
.tar.gz
cd
..
}
URL_ROOT
=
http://paddlemodels.bj.bcebos.com/inference-vis-demos%2F
mkdir
-p
data
cd
data
vis_demo_list
=
'se_resnext50 ocr mobilenet'
for
vis_demo_name
in
$vis_demo_list
;
do
download
$vis_demo_name
done
cd
..
# compile and test the demo
mkdir
-p
build
mkdir
-p
build
cd
build
cd
build
for
WITH_STATIC_LIB
in
ON OFF
;
do
for
WITH_STATIC_LIB
in
ON OFF
;
do
# -----simple_on_word2vec-----
rm
-rf
*
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DWITH_MKL
=
$TURN_ON_MKL
\
...
@@ -29,9 +49,30 @@ for WITH_STATIC_LIB in ON OFF; do
...
@@ -29,9 +49,30 @@ for WITH_STATIC_LIB in ON OFF; do
--dirname
=
${
PADDLE_ROOT
}
/build/python/paddle/fluid/tests/book/word2vec.inference.model
\
--dirname
=
${
PADDLE_ROOT
}
/build/python/paddle/fluid/tests/book/word2vec.inference.model
\
--use_gpu
=
$use_gpu
--use_gpu
=
$use_gpu
if
[
$?
-ne
0
]
;
then
if
[
$?
-ne
0
]
;
then
echo
"
inference
demo runs fail."
echo
"
simple_on_word2vec
demo runs fail."
exit
1
exit
1
fi
fi
done
done
# ---------vis_demo---------
rm
-rf
*
cmake ..
-DPADDLE_LIB
=
${
PADDLE_ROOT
}
/build/fluid_install_dir/
\
-DWITH_MKL
=
$TURN_ON_MKL
\
-DDEMO_NAME
=
vis_demo
\
-DWITH_GPU
=
$TEST_GPU_CPU
\
-DWITH_STATIC_LIB
=
$WITH_STATIC_LIB
make
-j
for
use_gpu
in
false
;
do
for
vis_demo_name
in
$vis_demo_list
;
do
./vis_demo
\
--modeldir
=
../data/
$vis_demo_name
/model
\
--data
=
../data/
$vis_demo_name
/data.txt
\
--refer
=
../data/
$vis_demo_name
/result.txt
\
--use_gpu
=
$use_gpu
if
[
$?
-ne
0
]
;
then
echo
"vis demo
$vis_demo_name
runs fail."
exit
1
fi
done
done
done
done
set
+x
set
+x
paddle/contrib/inference/demo/utils.h
→
paddle/contrib/inference/demo
_ci
/utils.h
浏览文件 @
7c5e6d2c
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
#include <string>
#include <string>
#include <vector>
#include <vector>
#include "
paddle/
contrib/inference/paddle_inference_api.h"
#include "contrib/inference/paddle_inference_api.h"
namespace
paddle
{
namespace
paddle
{
namespace
demo
{
namespace
demo
{
...
...
paddle/contrib/inference/demo/vis_demo.cc
→
paddle/contrib/inference/demo
_ci
/vis_demo.cc
浏览文件 @
7c5e6d2c
...
@@ -18,19 +18,14 @@ limitations under the License. */
...
@@ -18,19 +18,14 @@ limitations under the License. */
#include <gflags/gflags.h>
#include <gflags/gflags.h>
#include <glog/logging.h> // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files.
#include <glog/logging.h> // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files.
#include <gtest/gtest.h>
#include <fstream>
#include <fstream>
#include <iostream>
#include <iostream>
#include "paddle/
contrib/inference/demo/utils
.h"
#include "paddle/
fluid/platform/enforce
.h"
#include "
paddle/contrib/inference/paddle_inference_api
.h"
#include "
utils
.h"
#ifdef PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_CUDA
DECLARE_double
(
fraction_of_gpu_memory_to_use
);
DECLARE_double
(
fraction_of_gpu_memory_to_use
);
#endif
#endif
namespace
paddle
{
namespace
demo
{
DEFINE_string
(
modeldir
,
""
,
"Directory of the inference model."
);
DEFINE_string
(
modeldir
,
""
,
"Directory of the inference model."
);
DEFINE_string
(
refer
,
""
,
"path to reference result for comparison."
);
DEFINE_string
(
refer
,
""
,
"path to reference result for comparison."
);
DEFINE_string
(
DEFINE_string
(
...
@@ -38,6 +33,10 @@ DEFINE_string(
...
@@ -38,6 +33,10 @@ DEFINE_string(
""
,
""
,
"path of data; each line is a record, format is "
"path of data; each line is a record, format is "
"'<space splitted floats as data>
\t
<space splitted ints as shape'"
);
"'<space splitted floats as data>
\t
<space splitted ints as shape'"
);
DEFINE_bool
(
use_gpu
,
false
,
"Whether use gpu."
);
namespace
paddle
{
namespace
demo
{
struct
Record
{
struct
Record
{
std
::
vector
<
float
>
data
;
std
::
vector
<
float
>
data
;
...
@@ -47,7 +46,7 @@ struct Record {
...
@@ -47,7 +46,7 @@ struct Record {
void
split
(
const
std
::
string
&
str
,
char
sep
,
std
::
vector
<
std
::
string
>*
pieces
);
void
split
(
const
std
::
string
&
str
,
char
sep
,
std
::
vector
<
std
::
string
>*
pieces
);
Record
ProcessALine
(
const
std
::
string
&
line
)
{
Record
ProcessALine
(
const
std
::
string
&
line
)
{
LOG
(
INFO
)
<<
"process a line"
;
VLOG
(
3
)
<<
"process a line"
;
std
::
vector
<
std
::
string
>
columns
;
std
::
vector
<
std
::
string
>
columns
;
split
(
line
,
'\t'
,
&
columns
);
split
(
line
,
'\t'
,
&
columns
);
CHECK_EQ
(
columns
.
size
(),
2UL
)
CHECK_EQ
(
columns
.
size
(),
2UL
)
...
@@ -65,8 +64,8 @@ Record ProcessALine(const std::string& line) {
...
@@ -65,8 +64,8 @@ Record ProcessALine(const std::string& line) {
for
(
auto
&
s
:
shape_strs
)
{
for
(
auto
&
s
:
shape_strs
)
{
record
.
shape
.
push_back
(
std
::
stoi
(
s
));
record
.
shape
.
push_back
(
std
::
stoi
(
s
));
}
}
LOG
(
INFO
)
<<
"data size "
<<
record
.
data
.
size
();
VLOG
(
3
)
<<
"data size "
<<
record
.
data
.
size
();
LOG
(
INFO
)
<<
"data shape size "
<<
record
.
shape
.
size
();
VLOG
(
3
)
<<
"data shape size "
<<
record
.
shape
.
size
();
return
record
;
return
record
;
}
}
...
@@ -78,20 +77,22 @@ void CheckOutput(const std::string& referfile, const PaddleTensor& output) {
...
@@ -78,20 +77,22 @@ void CheckOutput(const std::string& referfile, const PaddleTensor& output) {
file
.
close
();
file
.
close
();
size_t
numel
=
output
.
data
.
length
()
/
PaddleDtypeSize
(
output
.
dtype
);
size_t
numel
=
output
.
data
.
length
()
/
PaddleDtypeSize
(
output
.
dtype
);
LOG
(
INFO
)
<<
"predictor output numel "
<<
numel
;
VLOG
(
3
)
<<
"predictor output numel "
<<
numel
;
LOG
(
INFO
)
<<
"reference output numel "
<<
refer
.
data
.
size
();
VLOG
(
3
)
<<
"reference output numel "
<<
refer
.
data
.
size
();
EXPECT
_EQ
(
numel
,
refer
.
data
.
size
());
PADDLE_ENFORCE
_EQ
(
numel
,
refer
.
data
.
size
());
switch
(
output
.
dtype
)
{
switch
(
output
.
dtype
)
{
case
PaddleDType
::
INT64
:
{
case
PaddleDType
::
INT64
:
{
for
(
size_t
i
=
0
;
i
<
numel
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
numel
;
++
i
)
{
EXPECT_EQ
(
static_cast
<
int64_t
*>
(
output
.
data
.
data
())[
i
],
refer
.
data
[
i
]);
PADDLE_ENFORCE_EQ
(
static_cast
<
int64_t
*>
(
output
.
data
.
data
())[
i
],
refer
.
data
[
i
]);
}
}
break
;
break
;
}
}
case
PaddleDType
::
FLOAT32
:
case
PaddleDType
::
FLOAT32
:
for
(
size_t
i
=
0
;
i
<
numel
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
numel
;
++
i
)
{
EXPECT_NEAR
(
PADDLE_ENFORCE_LT
(
static_cast
<
float
*>
(
output
.
data
.
data
())[
i
],
refer
.
data
[
i
],
1e-5
);
fabs
(
static_cast
<
float
*>
(
output
.
data
.
data
())[
i
]
-
refer
.
data
[
i
]),
1e-5
);
}
}
break
;
break
;
}
}
...
@@ -106,15 +107,15 @@ void Main(bool use_gpu) {
...
@@ -106,15 +107,15 @@ void Main(bool use_gpu) {
config
.
prog_file
=
FLAGS_modeldir
+
"/__model__"
;
config
.
prog_file
=
FLAGS_modeldir
+
"/__model__"
;
config
.
use_gpu
=
use_gpu
;
config
.
use_gpu
=
use_gpu
;
config
.
device
=
0
;
config
.
device
=
0
;
#ifdef PADDLE_WITH_CUDA
if
(
FLAGS_use_gpu
)
{
config
.
fraction_of_gpu_memory
=
FLAGS_fraction_of_gpu_memory_to_use
;
config
.
fraction_of_gpu_memory
=
0.1
;
// set by yourself
#endif
}
LOG
(
INFO
)
<<
"init predictor"
;
VLOG
(
3
)
<<
"init predictor"
;
auto
predictor
=
auto
predictor
=
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
LOG
(
INFO
)
<<
"begin to process data"
;
VLOG
(
3
)
<<
"begin to process data"
;
// Just a single batch of data.
// Just a single batch of data.
std
::
string
line
;
std
::
string
line
;
std
::
ifstream
file
(
FLAGS_data
);
std
::
ifstream
file
(
FLAGS_data
);
...
@@ -129,21 +130,26 @@ void Main(bool use_gpu) {
...
@@ -129,21 +130,26 @@ void Main(bool use_gpu) {
.
data
=
PaddleBuf
(
record
.
data
.
data
(),
record
.
data
.
size
()
*
sizeof
(
float
)),
.
data
=
PaddleBuf
(
record
.
data
.
data
(),
record
.
data
.
size
()
*
sizeof
(
float
)),
.
dtype
=
PaddleDType
::
FLOAT32
};
.
dtype
=
PaddleDType
::
FLOAT32
};
LOG
(
INFO
)
<<
"run executor"
;
VLOG
(
3
)
<<
"run executor"
;
std
::
vector
<
PaddleTensor
>
output
;
std
::
vector
<
PaddleTensor
>
output
;
predictor
->
Run
({
input
},
&
output
);
predictor
->
Run
({
input
},
&
output
);
LOG
(
INFO
)
<<
"output.size "
<<
output
.
size
();
VLOG
(
3
)
<<
"output.size "
<<
output
.
size
();
auto
&
tensor
=
output
.
front
();
auto
&
tensor
=
output
.
front
();
LOG
(
INFO
)
<<
"output: "
<<
SummaryTensor
(
tensor
);
VLOG
(
3
)
<<
"output: "
<<
SummaryTensor
(
tensor
);
// compare with reference result
// compare with reference result
CheckOutput
(
FLAGS_refer
,
tensor
);
CheckOutput
(
FLAGS_refer
,
tensor
);
}
}
TEST
(
demo
,
vis_demo_cpu
)
{
Main
(
false
/*use_gpu*/
);
}
#ifdef PADDLE_WITH_CUDA
TEST
(
demo
,
vis_demo_gpu
)
{
Main
(
true
/*use_gpu*/
);
}
#endif
}
// namespace demo
}
// namespace demo
}
// namespace paddle
}
// namespace paddle
int
main
(
int
argc
,
char
**
argv
)
{
google
::
ParseCommandLineFlags
(
&
argc
,
&
argv
,
true
);
paddle
::
demo
::
Main
(
false
/* use_gpu*/
);
if
(
FLAGS_use_gpu
)
{
paddle
::
demo
::
Main
(
true
/*use_gpu*/
);
}
return
0
;
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录