Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
5de14c6b
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5de14c6b
编写于
9月 25, 2018
作者:
Y
Yan Chunwei
提交者:
GitHub
9月 25, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine inference api (#13518)
上级
79463ae7
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
171 addition
and
93 deletion
+171
-93
paddle/fluid/inference/api/analysis_predictor.cc
paddle/fluid/inference/api/analysis_predictor.cc
+14
-5
paddle/fluid/inference/api/analysis_predictor.h
paddle/fluid/inference/api/analysis_predictor.h
+2
-2
paddle/fluid/inference/api/api_anakin_engine.cc
paddle/fluid/inference/api/api_anakin_engine.cc
+11
-7
paddle/fluid/inference/api/api_anakin_engine.h
paddle/fluid/inference/api/api_anakin_engine.h
+2
-0
paddle/fluid/inference/api/api_impl.cc
paddle/fluid/inference/api/api_impl.cc
+7
-4
paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc
paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc
+12
-4
paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc
...luid/inference/api/api_tensorrt_subgraph_engine_tester.cc
+4
-2
paddle/fluid/inference/api/paddle_inference_api.h
paddle/fluid/inference/api/paddle_inference_api.h
+94
-48
paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc
paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc
+6
-5
paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc
paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc
+6
-5
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
+4
-4
paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc
paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc
+5
-5
paddle/fluid/inference/tests/api/tester_helper.h
paddle/fluid/inference/tests/api/tester_helper.h
+4
-2
未找到文件。
paddle/fluid/inference/api/analysis_predictor.cc
浏览文件 @
5de14c6b
...
...
@@ -71,7 +71,7 @@ bool AnalysisPredictor::Init(
inference_program_
=
paddle
::
inference
::
Load
(
executor_
.
get
(),
scope_
.
get
(),
config_
.
prog_file
,
config_
.
param_file
);
}
else
{
LOG
(
ERROR
)
<<
"fail to load inference model
."
;
LOG
(
ERROR
)
<<
"fail to load inference model
from "
<<
config_
.
model_dir
;
return
false
;
}
...
...
@@ -109,8 +109,9 @@ void AnalysisPredictor::OptimizeInferenceProgram() {
}
argument_
.
origin_program_desc
.
reset
(
new
ProgramDesc
(
*
inference_program_
->
Proto
()));
PADDLE_ENFORCE
(
config_
.
ir_mode
==
AnalysisConfig
::
IrPassMode
::
kExclude
,
"Only kExclude is supported yet."
);
PADDLE_ENFORCE
(
config_
.
ir_mode
==
contrib
::
AnalysisConfig
::
IrPassMode
::
kExclude
,
"Only kExclude is supported yet."
);
Analyzer
().
DisableIrPasses
(
config_
.
ir_passes
).
Run
(
&
argument_
);
CHECK
(
argument_
.
transformed_program_desc
);
...
...
@@ -126,8 +127,9 @@ void AnalysisPredictor::OptimizeInferenceProgram() {
}
template
<
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
AnalysisConfig
,
PaddleEngineKind
::
kAnalysis
>
(
const
AnalysisConfig
&
config
)
{
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
contrib
::
AnalysisConfig
,
PaddleEngineKind
::
kAnalysis
>
(
const
contrib
::
AnalysisConfig
&
config
)
{
VLOG
(
3
)
<<
"create AnalysisConfig"
;
if
(
config
.
use_gpu
)
{
// 1. GPU memeroy
...
...
@@ -154,4 +156,11 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
return
predictor
;
}
template
<
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
contrib
::
AnalysisConfig
>
(
const
contrib
::
AnalysisConfig
&
config
)
{
return
CreatePaddlePredictor
<
contrib
::
AnalysisConfig
,
PaddleEngineKind
::
kAnalysis
>
(
config
);
}
}
// namespace paddle
paddle/fluid/inference/api/analysis_predictor.h
浏览文件 @
5de14c6b
...
...
@@ -30,7 +30,7 @@ using framework::proto::ProgramDesc;
*/
class
AnalysisPredictor
:
public
NativePaddlePredictor
{
public:
explicit
AnalysisPredictor
(
const
AnalysisConfig
&
config
)
explicit
AnalysisPredictor
(
const
contrib
::
AnalysisConfig
&
config
)
:
NativePaddlePredictor
(
config
),
config_
(
config
)
{}
bool
Init
(
const
std
::
shared_ptr
<
framework
::
Scope
>&
parent_scope
);
...
...
@@ -46,7 +46,7 @@ class AnalysisPredictor : public NativePaddlePredictor {
Argument
&
analysis_argument
()
{
return
argument_
;
}
private:
AnalysisConfig
config_
;
contrib
::
AnalysisConfig
config_
;
Argument
argument_
;
};
...
...
paddle/fluid/inference/api/api_anakin_engine.cc
浏览文件 @
5de14c6b
...
...
@@ -31,21 +31,24 @@
namespace
paddle
{
using
paddle
::
contrib
::
AnakinConfig
;
template
<
typename
Target
>
PaddleInferenceAnakinPredictor
<
Target
>::
PaddleInferenceAnakinPredictor
(
const
AnakinConfig
&
config
)
{
const
contrib
::
AnakinConfig
&
config
)
{
CHECK
(
Init
(
config
));
}
template
<
>
PaddleInferenceAnakinPredictor
<
anakin
::
X86
>::
PaddleInferenceAnakinPredictor
(
const
AnakinConfig
&
config
)
{
const
contrib
::
AnakinConfig
&
config
)
{
omp_set_dynamic
(
0
);
omp_set_num_threads
(
1
);
mkl_set_num_threads
(
1
);
CHECK
(
Init
(
config
));
}
template
<
typename
Target
>
bool
PaddleInferenceAnakinPredictor
<
Target
>::
Init
(
const
AnakinConfig
&
config
)
{
bool
PaddleInferenceAnakinPredictor
<
Target
>::
Init
(
const
contrib
::
AnakinConfig
&
config
)
{
if
(
!
(
graph_
.
load
(
config
.
model_file
)))
{
VLOG
(
3
)
<<
"fail to load graph from "
<<
config
.
model_file
;
return
false
;
...
...
@@ -200,10 +203,11 @@ template class PaddleInferenceAnakinPredictor<anakin::X86>;
// A factory to help create difference predictor.
template
<
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
AnakinConfig
,
PaddleEngineKind
::
kAnakin
>
(
const
AnakinConfig
&
config
)
{
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
contrib
::
AnakinConfig
,
PaddleEngineKind
::
kAnakin
>
(
const
contrib
::
AnakinConfig
&
config
)
{
VLOG
(
3
)
<<
"Anakin Predictor create."
;
if
(
config
.
target_type
==
AnakinConfig
::
NVGPU
)
{
if
(
config
.
target_type
==
contrib
::
AnakinConfig
::
NVGPU
)
{
#ifdef PADDLE_WITH_CUDA
VLOG
(
3
)
<<
"Anakin Predictor create on [ NVIDIA GPU ]."
;
std
::
unique_ptr
<
PaddlePredictor
>
x
(
...
...
@@ -213,7 +217,7 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
LOG
(
ERROR
)
<<
"AnakinConfig::NVGPU could not used in ONLY-CPU environment"
;
return
nullptr
;
#endif
}
else
if
(
config
.
target_type
==
AnakinConfig
::
X86
)
{
}
else
if
(
config
.
target_type
==
contrib
::
AnakinConfig
::
X86
)
{
VLOG
(
3
)
<<
"Anakin Predictor create on [ Intel X86 ]."
;
std
::
unique_ptr
<
PaddlePredictor
>
x
(
new
PaddleInferenceAnakinPredictor
<
anakin
::
X86
>
(
config
));
...
...
paddle/fluid/inference/api/api_anakin_engine.h
浏览文件 @
5de14c6b
...
...
@@ -29,6 +29,8 @@ limitations under the License. */
namespace
paddle
{
using
contrib
::
AnakinConfig
;
template
<
typename
Target
>
class
PaddleInferenceAnakinPredictor
:
public
PaddlePredictor
{
public:
...
...
paddle/fluid/inference/api/api_impl.cc
浏览文件 @
5de14c6b
...
...
@@ -101,14 +101,11 @@ bool NativePaddlePredictor::Init(
inference_program_
=
paddle
::
inference
::
Load
(
executor_
.
get
(),
scope_
.
get
(),
config_
.
prog_file
,
config_
.
param_file
);
}
else
{
LOG
(
ERROR
)
<<
"fail to load inference model
."
;
LOG
(
ERROR
)
<<
"fail to load inference model
from "
<<
config_
.
model_dir
;
return
false
;
}
ctx_
=
executor_
->
Prepare
(
*
inference_program_
,
0
);
if
(
config_
.
_use_mkldnn
)
{
executor_
->
EnableMKLDNN
(
*
inference_program_
);
}
executor_
->
CreateVariables
(
*
inference_program_
,
sub_scope_
?
sub_scope_
:
scope_
.
get
(),
0
);
...
...
@@ -330,4 +327,10 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
#endif
}
template
<
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
NativeConfig
>
(
const
NativeConfig
&
config
)
{
return
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
}
}
// namespace paddle
paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc
浏览文件 @
5de14c6b
...
...
@@ -25,10 +25,11 @@ using inference::analysis::Argument;
using
inference
::
Singleton
;
using
inference
::
analysis
::
Analyzer
;
using
framework
::
proto
::
ProgramDesc
;
using
paddle
::
contrib
::
MixedRTConfig
;
class
TensorRTSubgraphPredictor
:
public
NativePaddlePredictor
{
public:
explicit
TensorRTSubgraphPredictor
(
const
Tensor
RTConfig
&
config
)
explicit
TensorRTSubgraphPredictor
(
const
Mixed
RTConfig
&
config
)
:
NativePaddlePredictor
(
config
),
config_
(
config
)
{}
bool
Init
(
const
std
::
shared_ptr
<
framework
::
Scope
>&
parent_scope
)
{
...
...
@@ -115,13 +116,13 @@ class TensorRTSubgraphPredictor : public NativePaddlePredictor {
}
private:
Tensor
RTConfig
config_
;
Mixed
RTConfig
config_
;
};
template
<
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
Tensor
RTConfig
,
PaddleEngineKind
::
kAutoMixedTensorRT
>
(
const
Tensor
RTConfig
&
config
)
{
CreatePaddlePredictor
<
Mixed
RTConfig
,
PaddleEngineKind
::
kAutoMixedTensorRT
>
(
const
Mixed
RTConfig
&
config
)
{
VLOG
(
3
)
<<
"create TensorRTSubgraphPredictor"
;
if
(
config
.
use_gpu
)
{
// 1. GPU memeroy
...
...
@@ -150,6 +151,13 @@ CreatePaddlePredictor<TensorRTConfig, PaddleEngineKind::kAutoMixedTensorRT>(
return
std
::
move
(
predictor
);
}
template
<
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
MixedRTConfig
>
(
const
MixedRTConfig
&
config
)
{
return
CreatePaddlePredictor
<
MixedRTConfig
,
PaddleEngineKind
::
kAutoMixedTensorRT
>
(
config
);
}
}
// namespace paddle
USE_TRT_CONVERTER
(
elementwise_add_weight
);
...
...
paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc
浏览文件 @
5de14c6b
...
...
@@ -20,6 +20,8 @@
namespace
paddle
{
using
contrib
::
MixedRTConfig
;
DEFINE_string
(
dirname
,
""
,
"Directory of the inference model."
);
void
CompareTensorRTWithFluid
(
bool
enable_tensorrt
)
{
...
...
@@ -32,7 +34,7 @@ void CompareTensorRTWithFluid(bool enable_tensorrt) {
config0
.
fraction_of_gpu_memory
=
0.3
;
config0
.
device
=
0
;
Tensor
RTConfig
config1
;
Mixed
RTConfig
config1
;
config1
.
model_dir
=
FLAGS_dirname
+
"word2vec.inference.model"
;
config1
.
use_gpu
=
true
;
config1
.
fraction_of_gpu_memory
=
0.3
;
...
...
@@ -42,7 +44,7 @@ void CompareTensorRTWithFluid(bool enable_tensorrt) {
auto
predictor0
=
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config0
);
auto
predictor1
=
CreatePaddlePredictor
<
Tensor
RTConfig
,
CreatePaddlePredictor
<
Mixed
RTConfig
,
PaddleEngineKind
::
kAutoMixedTensorRT
>
(
config1
);
for
(
int
batch_id
=
0
;
batch_id
<
1
;
batch_id
++
)
{
...
...
paddle/fluid/inference/api/paddle_inference_api.h
浏览文件 @
5de14c6b
...
...
@@ -28,34 +28,61 @@ limitations under the License. */
namespace
paddle
{
// Data type.
enum
PaddleDType
{
FLOAT32
,
INT64
,
// TODO(Superjomn) support more data types if needed.
};
/*
* Memory menage for PaddleTensor.
* The PaddleBuf holds a buffer for data input or output. The memory can be
* allocated by user or by PaddleBuf itself, but in any case, the PaddleBuf
* should be reused for better performance.
*
* For user allocated memory, the following API can be used:
* - PaddleBuf(void* data, size_t length) to set an external memory by
* specifying
* the memory address and length.
* - Reset(void* data, size_t length) to reset the PaddleBuf with an external
* memory.
* ATTENTION, for user allocated memory, deallocation should be done by users
* externally after the program finished. The PaddleBuf won't do any allocation
* or deallocation.
*
* To have the PaddleBuf allocate and manage the memory:
* - PaddleBuf(size_t length) will allocate a memory of size `length`.
* - Resize(size_t length) resize the memory to no less than `length`, ATTENTION
* if the allocated memory is larger than `length`, nothing will done.
*/
class
PaddleBuf
{
public:
PaddleBuf
()
=
default
;
PaddleBuf
(
PaddleBuf
&&
other
);
// Copy only available when memory is managed externally.
explicit
PaddleBuf
(
const
PaddleBuf
&
);
PaddleBuf
&
operator
=
(
const
PaddleBuf
&
);
PaddleBuf
&
operator
=
(
PaddleBuf
&&
);
// Do not own the memory.
PaddleBuf
(
void
*
data
,
size_t
length
)
:
data_
(
data
),
length_
(
length
),
memory_owned_
{
false
}
{}
// Own memory.
// PaddleBuf allocate memory internally, and manage it.
explicit
PaddleBuf
(
size_t
length
)
:
data_
(
new
char
[
length
]),
length_
(
length
),
memory_owned_
(
true
)
{}
// Resize to `length` bytes.
// Set external memory, the PaddleBuf won't manage it.
PaddleBuf
(
void
*
data
,
size_t
length
)
:
data_
(
data
),
length_
(
length
),
memory_owned_
{
false
}
{}
// Copy only available when memory is managed externally.
explicit
PaddleBuf
(
const
PaddleBuf
&
);
// Resize the memory.
void
Resize
(
size_t
length
);
// Reset to external memory.
// Reset to external memory
, with address and length set
.
void
Reset
(
void
*
data
,
size_t
length
);
// Tell whether the buffer is empty.
bool
empty
()
const
{
return
length_
==
0
;
}
// Get the memory address.
void
*
data
()
const
{
return
data_
;
}
// Get the memory length.
size_t
length
()
const
{
return
length_
;
}
~
PaddleBuf
()
{
Free
();
}
PaddleBuf
&
operator
=
(
const
PaddleBuf
&
);
PaddleBuf
&
operator
=
(
PaddleBuf
&&
);
PaddleBuf
()
=
default
;
PaddleBuf
(
PaddleBuf
&&
other
);
private:
void
Free
();
...
...
@@ -64,6 +91,7 @@ class PaddleBuf {
bool
memory_owned_
{
true
};
};
// Basic input and output data structure for PaddlePredictor.
struct
PaddleTensor
{
PaddleTensor
()
=
default
;
std
::
string
name
;
// variable name.
...
...
@@ -73,19 +101,8 @@ struct PaddleTensor {
std
::
vector
<
std
::
vector
<
size_t
>>
lod
;
// Tensor+LoD equals LoDTensor
};
enum
class
PaddleEngineKind
{
kNative
=
0
,
// Use the native Fluid facility.
kAnakin
,
// Use Anakin for inference.
kAutoMixedTensorRT
,
// Automatically mix Fluid with TensorRT.
kAnalysis
// TODO(Superjomn) support following engines latter.
// kTensorRT, // Use TensorRT for inference.
// kAutoMixedAnakin, // Automatically mix Fluid with Anakin.
};
/*
* A simple Inference API for Paddle. Currently this API can be used by
* non-sequence scenerios.
* A simple Inference API for Paddle.
*/
class
PaddlePredictor
{
public:
...
...
@@ -120,26 +137,53 @@ struct NativeConfig : public PaddlePredictor::Config {
// GPU related fields.
bool
use_gpu
{
false
};
int
device
{
0
};
float
fraction_of_gpu_memory
{
-
1.
f
};
// Negative to notify initialization.
// NOTE: NOT use it, just for the internal test, will discard later
bool
_use_mkldnn
{
false
};
// Specify the variable's name of each input.
bool
specify_input_name
{
false
};
float
fraction_of_gpu_memory
{
-
1.
f
};
// Change to a float in (0,1] if needed.
// Specify the exact path of program and parameter files.
std
::
string
prog_file
;
std
::
string
param_file
;
// Specify the variable's name of each input if input tensors don't follow the
// `feeds` and `fetches` of the phase `save_inference_model`.
bool
specify_input_name
{
false
};
};
// Configurations for Anakin engine.
struct
AnakinConfig
:
public
PaddlePredictor
::
Config
{
enum
TargetType
{
NVGPU
=
0
,
X86
};
int
device
;
std
::
string
model_file
;
int
max_batch_size
{
-
1
};
TargetType
target_type
;
// A factory to help create different predictors.
//
// Usage:
//
// NativeConfig config;
// ... // change the configs.
// auto native_predictor = CreatePaddlePredictor(config);
//
// FOR EXTENSION DEVELOPER:
// Different predictors are designated by config type. Similar configs can be
// merged, but there shouldn't be a huge config containing different fields for
// more than one kind of predictors.
template
<
typename
ConfigT
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
(
const
ConfigT
&
config
);
// NOTE The following APIs are too trivial, we will discard it in the following
// versions.
enum
class
PaddleEngineKind
{
kNative
=
0
,
// Use the native Fluid facility.
kAutoMixedTensorRT
,
// Automatically mix Fluid with TensorRT.
kAnalysis
,
// More optimization.
kAnakin
// Use Anakin for inference, not mature yet.
};
struct
TensorRTConfig
:
public
NativeConfig
{
template
<
typename
ConfigT
,
PaddleEngineKind
engine
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
(
const
ConfigT
&
config
);
// ==
//
// -----------------------------------------------------------------------------------
// NOTE: The following APIs are not mature yet, we are still working on them.
namespace
contrib
{
// Accelerate GPU computation with TensorRT engine.
struct
MixedRTConfig
:
public
NativeConfig
{
// Determine whether a subgraph will be executed by TRT.
int
min_subgraph_size
{
1
};
// While TensorRT allows an engine optimized for a given max batch size
...
...
@@ -154,7 +198,6 @@ struct TensorRTConfig : public NativeConfig {
// NOTE WIP, not stable yet.
struct
AnalysisConfig
:
public
NativeConfig
{
//
enum
class
IrPassMode
{
kSystem
,
// Use system default passes, not customize.
kInclude
,
// Specify the passes in `ir_passes`.
...
...
@@ -165,18 +208,21 @@ struct AnalysisConfig : public NativeConfig {
IrPassMode
ir_mode
{
IrPassMode
::
kExclude
};
// attention lstm fuse works only on some specific models, disable as default.
std
::
vector
<
std
::
string
>
ir_passes
{
"attention_lstm_fuse_pass"
};
// NOTE this is just for internal development, please not use it.
bool
_use_mkldnn
{
false
};
};
//
A factory to help create different predictors
.
//
// FOR EXTENSION DEVELOPER:
// Different predictors are designated by config type and engine kind. Similar
// configs can be merged, but there shouldn't be a huge config containing
// different fields for more than one kind of predictors.
//
// Similarly, each engine kind should map to a unique predictor implementation.
template
<
typename
ConfigT
,
PaddleEngineKind
engine
=
PaddleEngineKind
::
kNative
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
(
const
ConfigT
&
config
);
//
Configurations for Anakin engine
.
struct
AnakinConfig
:
public
PaddlePredictor
::
Config
{
enum
TargetType
{
NVGPU
=
0
,
X86
};
int
device
;
std
::
string
model_file
;
int
max_batch_size
{
-
1
};
TargetType
target_type
;
};
}
// namespace contrib
int
PaddleDtypeSize
(
PaddleDType
dtype
);
...
...
paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc
浏览文件 @
5de14c6b
...
...
@@ -22,10 +22,10 @@ DEFINE_string(model, "", "Directory of the inference model(mobile_v2).");
namespace
paddle
{
AnakinConfig
GetConfig
()
{
AnakinConfig
config
;
contrib
::
AnakinConfig
GetConfig
()
{
contrib
::
AnakinConfig
config
;
// using AnakinConfig::X86 if you need to use cpu to do inference
config
.
target_type
=
AnakinConfig
::
NVGPU
;
config
.
target_type
=
contrib
::
AnakinConfig
::
NVGPU
;
config
.
model_file
=
FLAGS_model
;
config
.
device
=
0
;
config
.
max_batch_size
=
1
;
...
...
@@ -33,9 +33,10 @@ AnakinConfig GetConfig() {
}
TEST
(
inference
,
anakin
)
{
AnakinConfig
config
=
GetConfig
();
auto
config
=
GetConfig
();
auto
predictor
=
CreatePaddlePredictor
<
AnakinConfig
,
PaddleEngineKind
::
kAnakin
>
(
config
);
CreatePaddlePredictor
<
contrib
::
AnakinConfig
,
PaddleEngineKind
::
kAnakin
>
(
config
);
float
data
[
1
*
3
*
224
*
224
]
=
{
1.0
f
};
PaddleTensor
tensor
;
...
...
paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc
浏览文件 @
5de14c6b
...
...
@@ -97,10 +97,10 @@ void Data::get_batch_data(
namespace
paddle
{
AnakinConfig
GetConfig
()
{
AnakinConfig
config
;
contrib
::
AnakinConfig
GetConfig
()
{
contrib
::
AnakinConfig
config
;
// using AnakinConfig::X86 if you need to use cpu to do inference
config
.
target_type
=
AnakinConfig
::
X86
;
config
.
target_type
=
contrib
::
AnakinConfig
::
X86
;
config
.
model_file
=
FLAGS_model
;
config
.
device
=
0
;
config
.
max_batch_size
=
1000
;
// the max number of token
...
...
@@ -121,9 +121,10 @@ void set_tensor(std::string name, std::vector<int> shape,
}
void
single_test
()
{
AnakinConfig
config
=
GetConfig
();
auto
config
=
GetConfig
();
auto
predictor
=
CreatePaddlePredictor
<
AnakinConfig
,
PaddleEngineKind
::
kAnakin
>
(
config
);
CreatePaddlePredictor
<
contrib
::
AnakinConfig
,
PaddleEngineKind
::
kAnakin
>
(
config
);
int
max_batch_size
=
1000
;
std
::
string
feature_file
=
FLAGS_datapath
;
...
...
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
浏览文件 @
5de14c6b
...
...
@@ -95,7 +95,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
}
}
void
SetConfig
(
AnalysisConfig
*
cfg
)
{
void
SetConfig
(
contrib
::
AnalysisConfig
*
cfg
)
{
cfg
->
prog_file
=
FLAGS_infer_model
+
"/__model__"
;
cfg
->
param_file
=
FLAGS_infer_model
+
"/param"
;
cfg
->
use_gpu
=
false
;
...
...
@@ -117,7 +117,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
TEST
(
Analyzer_Chinese_ner
,
profile
)
{
AnalysisConfig
cfg
;
contrib
::
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
PaddleTensor
>
outputs
;
...
...
@@ -141,7 +141,7 @@ TEST(Analyzer_Chinese_ner, profile) {
// Check the fuse status
TEST
(
Analyzer_Chinese_ner
,
fuse_statis
)
{
AnalysisConfig
cfg
;
contrib
::
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
int
num_ops
;
...
...
@@ -155,7 +155,7 @@ TEST(Analyzer_Chinese_ner, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig
TEST
(
Analyzer_Chinese_ner
,
compare
)
{
AnalysisConfig
cfg
;
contrib
::
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
...
...
paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc
浏览文件 @
5de14c6b
...
...
@@ -149,7 +149,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
}
}
void
SetConfig
(
AnalysisConfig
*
cfg
)
{
void
SetConfig
(
contrib
::
AnalysisConfig
*
cfg
)
{
cfg
->
prog_file
=
FLAGS_infer_model
+
"/__model__"
;
cfg
->
param_file
=
FLAGS_infer_model
+
"/param"
;
cfg
->
use_gpu
=
false
;
...
...
@@ -172,7 +172,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
TEST
(
Analyzer_rnn1
,
profile
)
{
AnalysisConfig
cfg
;
contrib
::
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
PaddleTensor
>
outputs
;
...
...
@@ -183,7 +183,7 @@ TEST(Analyzer_rnn1, profile) {
// Check the fuse status
TEST
(
Analyzer_rnn1
,
fuse_statis
)
{
AnalysisConfig
cfg
;
contrib
::
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
int
num_ops
;
...
...
@@ -198,7 +198,7 @@ TEST(Analyzer_rnn1, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig
TEST
(
Analyzer_rnn1
,
compare
)
{
AnalysisConfig
cfg
;
contrib
::
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
...
...
@@ -208,7 +208,7 @@ TEST(Analyzer_rnn1, compare) {
// Test Multi-Thread.
TEST
(
Analyzer_rnn1
,
multi_thread
)
{
AnalysisConfig
cfg
;
contrib
::
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
PaddleTensor
>
outputs
;
...
...
paddle/fluid/inference/tests/api/tester_helper.h
浏览文件 @
5de14c6b
...
...
@@ -38,6 +38,8 @@ DEFINE_bool(use_analysis, true,
namespace
paddle
{
namespace
inference
{
using
contrib
::
AnalysisConfig
;
void
CompareResult
(
const
std
::
vector
<
PaddleTensor
>
&
outputs
,
const
std
::
vector
<
PaddleTensor
>
&
ref_outputs
)
{
EXPECT_GT
(
outputs
.
size
(),
0UL
);
...
...
@@ -77,8 +79,8 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
std
::
unique_ptr
<
PaddlePredictor
>
CreateTestPredictor
(
const
AnalysisConfig
&
config
,
bool
use_analysis
=
true
)
{
if
(
use_analysis
)
{
return
CreatePaddlePredictor
<
AnalysisConfig
,
PaddleEngineKind
::
kAnalysis
>
(
config
);
return
CreatePaddlePredictor
<
contrib
::
AnalysisConfig
,
PaddleEngineKind
::
kAnalysis
>
(
config
);
}
else
{
return
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
config
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录