Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
97b75027
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
97b75027
编写于
5月 31, 2018
作者:
Y
Yan Chunwei
提交者:
GitHub
5月 31, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
inference API little fix (#11069)
上级
f437c46f
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
75 addition
and
113 deletion
+75
-113
paddle/contrib/inference/CMakeLists.txt
paddle/contrib/inference/CMakeLists.txt
+2
-6
paddle/contrib/inference/paddle_inference_api.h
paddle/contrib/inference/paddle_inference_api.h
+24
-20
paddle/contrib/inference/paddle_inference_api_impl.cc
paddle/contrib/inference/paddle_inference_api_impl.cc
+29
-65
paddle/contrib/inference/paddle_inference_api_impl.h
paddle/contrib/inference/paddle_inference_api_impl.h
+5
-13
paddle/contrib/inference/test_paddle_inference_api_impl.cc
paddle/contrib/inference/test_paddle_inference_api_impl.cc
+7
-6
paddle/fluid/inference/CMakeLists.txt
paddle/fluid/inference/CMakeLists.txt
+8
-3
未找到文件。
paddle/contrib/inference/CMakeLists.txt
浏览文件 @
97b75027
...
...
@@ -36,7 +36,7 @@ function(inference_api_test TARGET_NAME TEST_SRC)
string
(
REGEX REPLACE
"^_$"
""
arg
"
${
arg
}
"
)
cc_test
(
${
TARGET_NAME
}
SRCS
${
TEST_SRC
}
DEPS paddle_fluid_api paddle_inference_api
paddle_inference_api_impl
DEPS paddle_fluid_api paddle_inference_api
ARGS --dirname=
${
PYTHON_TESTS_DIR
}
/book/
)
# TODO(panyx0178): Figure out how to add word2vec and image_classification
# as deps.
...
...
@@ -47,13 +47,9 @@ endfunction(inference_api_test)
cc_library
(
paddle_inference_api
SRCS paddle_inference_api.cc
SRCS paddle_inference_api.cc
paddle_inference_api_impl.cc
DEPS
${
FLUID_CORE_MODULES
}
${
GLOB_OP_LIB
}
)
cc_library
(
paddle_inference_api_impl
SRCS paddle_inference_api_impl.cc
DEPS paddle_inference_api paddle_fluid_api
)
cc_test
(
test_paddle_inference_api
SRCS test_paddle_inference_api.cc
DEPS paddle_inference_api
)
...
...
paddle/contrib/inference/paddle_inference_api.h
浏览文件 @
97b75027
...
...
@@ -45,10 +45,10 @@ struct PaddleTensor {
};
/*
* A simple Inference API for Paddle. Currently this API might just
be used by
* non-sequence scenerios.
* TODO(Superjomn) Prepare
another API for NLP-related usages.
*/
* A simple Inference API for Paddle. Currently this API can
be used by
* non-sequence scenerios.
* TODO(Superjomn) Support
another API for NLP-related usages.
*/
class
PaddlePredictor
{
public:
struct
Config
;
...
...
@@ -66,34 +66,38 @@ class PaddlePredictor {
// be thread-safe.
virtual
std
::
unique_ptr
<
PaddlePredictor
>
Clone
()
=
0
;
virtual
bool
InitShared
()
{
return
false
;
}
// Destroy the Predictor.
virtual
~
PaddlePredictor
()
{}
friend
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
(
const
PaddlePredictor
::
Config
&
config
);
enum
class
EngineKind
{
kNative
=
-
1
,
// Use the native Fluid facility.
// TODO(Superjomn) support latter.
// kAnakin, // Use Anakin for inference.
// kTensorRT, // Use TensorRT for inference.
// kAutoMixedAnakin, // Automatically mix Fluid with Anakin.
// kAutoMixedTensorRT, // Automatically mix Fluid with TensorRT.
};
// The common configs for all the predictors.
struct
Config
{
enum
class
EngineKind
;
std
::
string
model_dir
;
// path to the model directory.
bool
enable_engine
{
false
};
// Enable to execute (part of) the model on
// third-party engines.
EngineKind
engine_kind
{
Config
::
EngineKind
::
kNone
};
enum
class
EngineKind
{
kNone
=
-
1
,
// Use the native Fluid facility.
kAnakin
,
// Use Anakin for inference.
kTensorRT
,
// Use TensorRT for inference.
kAutoMixedAnakin
,
// Automatically mix Fluid with Anakin.
kAutoMixedTensorRT
,
// Automatically mix Fluid with TensorRT.
};
};
};
struct
NativeConfig
:
public
PaddlePredictor
::
Config
{
bool
use_gpu
{
false
};
int
device
;
float
fraction_of_gpu_memory
;
std
::
string
prog_file
;
std
::
string
param_file
;
bool
share_variables
;
};
// A factory to help create difference predictor.
template
<
typename
ConfigT
>
template
<
typename
ConfigT
,
PaddlePredictor
::
EngineKind
engine
=
PaddlePredictor
::
EngineKind
::
kNative
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
(
const
ConfigT
&
config
);
}
// namespace paddle
paddle/contrib/inference/paddle_inference_api_impl.cc
浏览文件 @
97b75027
...
...
@@ -54,7 +54,7 @@ std::string num2str(T a) {
}
}
// namespace
bool
PaddlePredictorImpl
::
Init
()
{
bool
NativePaddlePredictor
::
Init
()
{
VLOG
(
3
)
<<
"Predictor::init()"
;
// TODO(panyx0718): Should CPU vs GPU device be decided by id?
...
...
@@ -96,7 +96,7 @@ bool PaddlePredictorImpl::Init() {
return
true
;
}
bool
PaddlePredictorImpl
::
Run
(
const
std
::
vector
<
PaddleTensor
>
&
inputs
,
bool
NativePaddlePredictor
::
Run
(
const
std
::
vector
<
PaddleTensor
>
&
inputs
,
std
::
vector
<
PaddleTensor
>
*
output_data
)
{
VLOG
(
3
)
<<
"Predictor::predict"
;
Timer
timer
;
...
...
@@ -133,58 +133,19 @@ bool PaddlePredictorImpl::Run(const std::vector<PaddleTensor> &inputs,
return
true
;
}
std
::
unique_ptr
<
PaddlePredictor
>
PaddlePredictorImpl
::
Clone
()
{
std
::
unique_ptr
<
PaddlePredictor
>
NativePaddlePredictor
::
Clone
()
{
VLOG
(
3
)
<<
"Predictor::clone"
;
std
::
unique_ptr
<
PaddlePredictor
>
cls
(
new
PaddlePredictorImpl
(
config_
));
if
(
!
cls
->
InitShared
())
{
LOG
(
ERROR
)
<<
"fail to call InitShared"
;
std
::
unique_ptr
<
PaddlePredictor
>
cls
(
new
NativePaddlePredictor
(
config_
));
if
(
!
dynamic_cast
<
NativePaddlePredictor
*>
(
cls
.
get
())
->
Init
())
{
LOG
(
ERROR
)
<<
"fail to call Init"
;
return
nullptr
;
}
// fix manylinux compile error.
return
std
::
move
(
cls
);
}
// TODO(panyx0718): Consider merge with Init()?
bool
PaddlePredictorImpl
::
InitShared
()
{
VLOG
(
3
)
<<
"Predictor::init_shared"
;
// 1. Define place, executor, scope
if
(
this
->
config_
.
device
>=
0
)
{
place_
=
platform
::
CUDAPlace
();
}
else
{
place_
=
platform
::
CPUPlace
();
}
this
->
executor_
.
reset
(
new
framework
::
Executor
(
this
->
place_
));
this
->
scope_
.
reset
(
new
framework
::
Scope
());
// Initialize the inference program
if
(
!
this
->
config_
.
model_dir
.
empty
())
{
// Parameters are saved in separate files sited in
// the specified `dirname`.
this
->
inference_program_
=
inference
::
Load
(
this
->
executor_
.
get
(),
this
->
scope_
.
get
(),
this
->
config_
.
model_dir
);
}
else
if
(
!
this
->
config_
.
prog_file
.
empty
()
&&
!
this
->
config_
.
param_file
.
empty
())
{
// All parameters are saved in a single file.
// The file names should be consistent with that used
// in Python API `fluid.io.save_inference_model`.
this
->
inference_program_
=
inference
::
Load
(
this
->
executor_
.
get
(),
this
->
scope_
.
get
(),
this
->
config_
.
prog_file
,
this
->
config_
.
param_file
);
}
this
->
ctx_
=
this
->
executor_
->
Prepare
(
*
this
->
inference_program_
,
0
);
// 3. create variables
// TODO(panyx0718): why test share_variables.
if
(
config_
.
share_variables
)
{
this
->
executor_
->
CreateVariables
(
*
this
->
inference_program_
,
this
->
scope_
.
get
(),
0
);
}
// 4. Get the feed_target_names and fetch_target_names
this
->
feed_target_names_
=
this
->
inference_program_
->
GetFeedTargetNames
();
this
->
fetch_target_names_
=
this
->
inference_program_
->
GetFetchTargetNames
();
return
true
;
}
bool
PaddlePredictorImpl
::
SetFeed
(
const
std
::
vector
<
PaddleTensor
>
&
inputs
,
bool
NativePaddlePredictor
::
SetFeed
(
const
std
::
vector
<
PaddleTensor
>
&
inputs
,
std
::
vector
<
framework
::
LoDTensor
>
*
feeds
)
{
VLOG
(
3
)
<<
"Predictor::set_feed"
;
if
(
inputs
.
size
()
!=
feed_target_names_
.
size
())
{
...
...
@@ -213,7 +174,7 @@ bool PaddlePredictorImpl::SetFeed(const std::vector<PaddleTensor> &inputs,
return
true
;
}
bool
PaddlePredictorImpl
::
GetFetch
(
bool
NativePaddlePredictor
::
GetFetch
(
const
std
::
vector
<
framework
::
LoDTensor
>
&
fetchs
,
std
::
vector
<
PaddleTensor
>
*
outputs
)
{
VLOG
(
3
)
<<
"Predictor::get_fetch"
;
...
...
@@ -280,9 +241,11 @@ bool PaddlePredictorImpl::GetFetch(
}
template
<
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
(
const
ConfigImpl
&
config
)
{
VLOG
(
3
)
<<
"create PaddlePredictorImpl"
;
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
NativeConfig
,
PaddlePredictor
::
EngineKind
::
kNative
>
(
const
NativeConfig
&
config
)
{
VLOG
(
3
)
<<
"create NativePaddlePredictor"
;
if
(
config
.
use_gpu
)
{
// 1. GPU memeroy
std
::
vector
<
std
::
string
>
flags
;
if
(
config
.
fraction_of_gpu_memory
>=
0.0
f
||
...
...
@@ -294,9 +257,10 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor(
VLOG
(
3
)
<<
"set flag: "
<<
flag
;
framework
::
InitGflags
(
flags
);
}
}
std
::
unique_ptr
<
PaddlePredictor
>
predictor
(
new
PaddlePredictorImpl
(
config
));
if
(
!
dynamic_cast
<
PaddlePredictorImpl
*>
(
predictor
.
get
())
->
Init
())
{
std
::
unique_ptr
<
PaddlePredictor
>
predictor
(
new
NativePaddlePredictor
(
config
));
if
(
!
dynamic_cast
<
NativePaddlePredictor
*>
(
predictor
.
get
())
->
Init
())
{
return
nullptr
;
}
return
std
::
move
(
predictor
);
...
...
paddle/contrib/inference/paddle_inference_api_impl.h
浏览文件 @
97b75027
...
...
@@ -29,17 +29,10 @@
namespace
paddle
{
struct
ConfigImpl
:
public
PaddlePredictor
::
Config
{
int
device
;
float
fraction_of_gpu_memory
;
std
::
string
prog_file
;
std
::
string
param_file
;
bool
share_variables
;
};
class
PaddlePredictorImpl
:
public
PaddlePredictor
{
class
NativePaddlePredictor
:
public
PaddlePredictor
{
public:
explicit
PaddlePredictorImpl
(
const
ConfigImpl
&
config
)
:
config_
(
config
)
{}
explicit
NativePaddlePredictor
(
const
NativeConfig
&
config
)
:
config_
(
config
)
{}
bool
Init
();
...
...
@@ -48,16 +41,15 @@ class PaddlePredictorImpl : public PaddlePredictor {
std
::
unique_ptr
<
PaddlePredictor
>
Clone
()
override
;
~
PaddlePredictorImpl
()
override
{};
~
NativePaddlePredictor
()
override
{};
private:
bool
InitShared
()
override
;
bool
SetFeed
(
const
std
::
vector
<
PaddleTensor
>
&
input_datas
,
std
::
vector
<
framework
::
LoDTensor
>
*
feeds
);
bool
GetFetch
(
const
std
::
vector
<
framework
::
LoDTensor
>
&
fetchs
,
std
::
vector
<
PaddleTensor
>
*
output_data
);
ConfigImpl
config_
;
NativeConfig
config_
;
platform
::
Place
place_
;
std
::
unique_ptr
<
framework
::
Executor
>
executor_
;
std
::
unique_ptr
<
framework
::
Scope
>
scope_
;
...
...
paddle/contrib/inference/test_paddle_inference_api_impl.cc
浏览文件 @
97b75027
...
...
@@ -40,19 +40,20 @@ PaddleTensor LodTensorToPaddleTensor(framework::LoDTensor* t) {
return
pt
;
}
ConfigImpl
GetConfig
()
{
ConfigImpl
config
;
NativeConfig
GetConfig
()
{
NativeConfig
config
;
config
.
model_dir
=
FLAGS_dirname
+
"word2vec.inference.model"
;
LOG
(
INFO
)
<<
"dirname "
<<
config
.
model_dir
;
config
.
fraction_of_gpu_memory
=
0.15
;
config
.
use_gpu
=
true
;
config
.
device
=
0
;
config
.
share_variables
=
true
;
return
config
;
}
TEST
(
paddle_inference_api_impl
,
word2vec
)
{
ConfigImpl
config
=
GetConfig
();
std
::
unique_ptr
<
PaddlePredictor
>
predictor
=
CreatePaddlePredictor
(
config
);
NativeConfig
config
=
GetConfig
();
auto
predictor
=
CreatePaddlePredictor
<
NativeConfig
>
(
config
);
framework
::
LoDTensor
first_word
,
second_word
,
third_word
,
fourth_word
;
framework
::
LoD
lod
{{
0
,
1
}};
...
...
@@ -104,7 +105,7 @@ TEST(paddle_inference_api_impl, image_classification) {
int
batch_size
=
2
;
bool
use_mkldnn
=
false
;
bool
repeat
=
false
;
ConfigImpl
config
=
GetConfig
();
NativeConfig
config
=
GetConfig
();
config
.
model_dir
=
FLAGS_dirname
+
"image_classification_resnet.inference.model"
;
...
...
@@ -133,7 +134,7 @@ TEST(paddle_inference_api_impl, image_classification) {
is_combined
,
use_mkldnn
);
std
::
unique_ptr
<
PaddlePredictor
>
predictor
=
CreatePaddlePredictor
(
config
);
auto
predictor
=
CreatePaddlePredictor
(
config
);
std
::
vector
<
PaddleTensor
>
paddle_tensor_feeds
;
paddle_tensor_feeds
.
push_back
(
LodTensorToPaddleTensor
(
&
input
));
...
...
paddle/fluid/inference/CMakeLists.txt
浏览文件 @
97b75027
...
...
@@ -5,14 +5,19 @@ cc_library(paddle_fluid_api
SRCS io.cc
DEPS
${
FLUID_CORE_MODULES
}
${
GLOB_OP_LIB
}
)
# Create static library
get_property
(
fluid_modules GLOBAL PROPERTY FLUID_MODULES
)
cc_library
(
paddle_fluid DEPS
${
fluid_modules
}
)
if
(
WITH_CONTRIB
)
set
(
fluid_modules
"
${
fluid_modules
}
"
paddle_inference_api
)
endif
()
# Create static library
cc_library
(
paddle_fluid DEPS
${
fluid_modules
}
paddle_fluid_api
)
# Create shared library
cc_library
(
paddle_fluid_shared SHARED
SRCS io.cc
DEPS
${
fluid_modules
}
)
DEPS
${
fluid_modules
}
paddle_fluid_api
)
set_target_properties
(
paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid
)
if
(
NOT APPLE
)
# TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录