Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
65517908
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
65517908
编写于
1月 29, 2019
作者:
Y
Yan Chunwei
提交者:
GitHub
1月 29, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
AnalysisConfig remove contrib namespace (#15540)
上级
7bc8481c
变更
24
隐藏空白更改
内联
并排
Showing
24 changed file
with
78 addition
and
97 deletion
+78
-97
paddle/fluid/inference/analysis/argument.h
paddle/fluid/inference/analysis/argument.h
+1
-1
paddle/fluid/inference/analysis/helper.h
paddle/fluid/inference/analysis/helper.h
+1
-1
paddle/fluid/inference/analysis/ir_pass_manager.cc
paddle/fluid/inference/analysis/ir_pass_manager.cc
+1
-1
paddle/fluid/inference/api/analysis_config.cc
paddle/fluid/inference/api/analysis_config.cc
+25
-25
paddle/fluid/inference/api/analysis_predictor.cc
paddle/fluid/inference/api/analysis_predictor.cc
+4
-5
paddle/fluid/inference/api/analysis_predictor.h
paddle/fluid/inference/api/analysis_predictor.h
+1
-2
paddle/fluid/inference/api/analysis_predictor_tester.cc
paddle/fluid/inference/api/analysis_predictor_tester.cc
+0
-1
paddle/fluid/inference/api/api_impl_tester.cc
paddle/fluid/inference/api/api_impl_tester.cc
+1
-1
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
+1
-1
paddle/fluid/inference/api/demo_ci/vis_demo.cc
paddle/fluid/inference/api/demo_ci/vis_demo.cc
+0
-1
paddle/fluid/inference/api/paddle_analysis_config.h
paddle/fluid/inference/api/paddle_analysis_config.h
+0
-6
paddle/fluid/inference/api/paddle_api.h
paddle/fluid/inference/api/paddle_api.h
+1
-1
paddle/fluid/inference/tensorrt/trt_int8_calibrator.h
paddle/fluid/inference/tensorrt/trt_int8_calibrator.h
+4
-4
paddle/fluid/inference/tests/api/analyzer_dam_tester.cc
paddle/fluid/inference/tests/api/analyzer_dam_tester.cc
+5
-6
paddle/fluid/inference/tests/api/analyzer_lac_tester.cc
paddle/fluid/inference/tests/api/analyzer_lac_tester.cc
+0
-2
paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc
paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc
+4
-5
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
+4
-5
paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc
.../fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc
+4
-5
paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc
paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc
+4
-4
paddle/fluid/inference/tests/api/analyzer_vis_tester.cc
paddle/fluid/inference/tests/api/analyzer_vis_tester.cc
+0
-1
paddle/fluid/inference/tests/api/config_printer.h
paddle/fluid/inference/tests/api/config_printer.h
+2
-3
paddle/fluid/inference/tests/api/tester_helper.h
paddle/fluid/inference/tests/api/tester_helper.h
+3
-3
paddle/fluid/inference/tests/api/trt_models_tester.cc
paddle/fluid/inference/tests/api/trt_models_tester.cc
+12
-12
paddle/fluid/pybind/inference_api.cc
paddle/fluid/pybind/inference_api.cc
+0
-1
未找到文件。
paddle/fluid/inference/analysis/argument.h
浏览文件 @
65517908
...
...
@@ -132,7 +132,7 @@ struct Argument {
DECL_ARGUMENT_FIELD
(
tensorrt_workspace_size
,
TensorRtWorkspaceSize
,
int
);
DECL_ARGUMENT_FIELD
(
tensorrt_min_subgraph_size
,
TensorRtMinSubgraphSize
,
int
);
DECL_ARGUMENT_FIELD
(
tensorrt_precision_mode
,
TensorRtPrecisionMode
,
contrib
::
AnalysisConfig
::
Precision
);
AnalysisConfig
::
Precision
);
// Memory optimized related.
DECL_ARGUMENT_FIELD
(
enable_memory_optim
,
EnableMemoryOptim
,
bool
);
...
...
paddle/fluid/inference/analysis/helper.h
浏览文件 @
65517908
...
...
@@ -32,7 +32,7 @@ limitations under the License. */
#ifdef _WIN32
#include <direct.h>
#include <io.h>
#define GCC_ATTRIBUTE(attr__)
;
#define GCC_ATTRIBUTE(attr__)
#define MKDIR(path) _mkdir(path)
#else
#include <unistd.h>
...
...
paddle/fluid/inference/analysis/ir_pass_manager.cc
浏览文件 @
65517908
...
...
@@ -71,7 +71,7 @@ void IRPassManager::CreatePasses(Argument *argument,
new
framework
::
ProgramDesc
*
(
&
argument
->
main_program
()));
bool
enable_int8
=
argument
->
tensorrt_precision_mode
()
==
contrib
::
AnalysisConfig
::
Precision
::
kInt8
;
AnalysisConfig
::
Precision
::
kInt8
;
pass
->
Set
(
"enable_int8"
,
new
bool
(
enable_int8
));
std
::
string
model_opt_cache_dir
=
...
...
paddle/fluid/inference/api/analysis_config.cc
浏览文件 @
65517908
...
...
@@ -22,7 +22,7 @@
namespace
paddle
{
PassStrategy
*
contrib
::
AnalysisConfig
::
pass_builder
()
const
{
PassStrategy
*
AnalysisConfig
::
pass_builder
()
const
{
if
(
!
pass_builder_
.
get
())
{
if
(
use_gpu_
)
{
LOG
(
INFO
)
<<
"Create GPU IR passes"
;
...
...
@@ -42,27 +42,27 @@ PassStrategy *contrib::AnalysisConfig::pass_builder() const {
return
pass_builder_
.
get
();
}
contrib
::
AnalysisConfig
::
AnalysisConfig
(
const
std
::
string
&
model_dir
)
{
AnalysisConfig
::
AnalysisConfig
(
const
std
::
string
&
model_dir
)
{
model_dir_
=
model_dir
;
Update
();
}
contrib
::
AnalysisConfig
::
AnalysisConfig
(
const
std
::
string
&
prog_file
,
const
std
::
string
&
params_file
)
{
AnalysisConfig
::
AnalysisConfig
(
const
std
::
string
&
prog_file
,
const
std
::
string
&
params_file
)
{
prog_file_
=
prog_file
;
params_file_
=
params_file
;
Update
();
}
void
contrib
::
AnalysisConfig
::
SetModel
(
const
std
::
string
&
prog_file_path
,
const
std
::
string
&
params_file_path
)
{
void
AnalysisConfig
::
SetModel
(
const
std
::
string
&
prog_file_path
,
const
std
::
string
&
params_file_path
)
{
prog_file_
=
prog_file_path
;
params_file_
=
params_file_path
;
Update
();
}
void
contrib
::
AnalysisConfig
::
EnableUseGpu
(
uint64_t
memory_pool_init_size_mb
,
int
device_id
)
{
void
AnalysisConfig
::
EnableUseGpu
(
uint64_t
memory_pool_init_size_mb
,
int
device_id
)
{
#ifdef PADDLE_WITH_CUDA
use_gpu_
=
true
;
memory_pool_init_size_mb_
=
memory_pool_init_size_mb
;
...
...
@@ -74,13 +74,13 @@ void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
Update
();
}
void
contrib
::
AnalysisConfig
::
DisableGpu
()
{
void
AnalysisConfig
::
DisableGpu
()
{
use_gpu_
=
false
;
Update
();
}
contrib
::
AnalysisConfig
::
AnalysisConfig
(
const
contrib
::
AnalysisConfig
&
other
)
{
AnalysisConfig
::
AnalysisConfig
(
const
AnalysisConfig
&
other
)
{
#define CP_MEMBER(member__) member__ = other.member__;
// Model related.
...
...
@@ -130,7 +130,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) {
Update
();
}
void
contrib
::
AnalysisConfig
::
EnableMKLDNN
()
{
void
AnalysisConfig
::
EnableMKLDNN
()
{
#ifdef PADDLE_WITH_MKLDNN
pass_builder
()
->
EnableMKLDNN
();
use_mkldnn_
=
true
;
...
...
@@ -142,9 +142,9 @@ void contrib::AnalysisConfig::EnableMKLDNN() {
Update
();
}
void
contrib
::
AnalysisConfig
::
EnableTensorRtEngine
(
void
AnalysisConfig
::
EnableTensorRtEngine
(
int
workspace_size
,
int
max_batch_size
,
int
min_subgraph_size
,
contrib
::
AnalysisConfig
::
Precision
precision_mode
)
{
AnalysisConfig
::
Precision
precision_mode
)
{
#ifdef PADDLE_WITH_CUDA
if
(
!
use_gpu
())
{
LOG
(
ERROR
)
<<
"To use TensorRT engine, please call EnableGpu() first"
;
...
...
@@ -165,7 +165,7 @@ void contrib::AnalysisConfig::EnableTensorRtEngine(
}
// TODO(Superjomn) refactor this, buggy.
void
contrib
::
AnalysisConfig
::
Update
()
{
void
AnalysisConfig
::
Update
()
{
auto
info
=
SerializeInfoCache
();
if
(
info
==
serialized_info_cache_
)
return
;
...
...
@@ -225,7 +225,7 @@ void contrib::AnalysisConfig::Update() {
}
}
std
::
string
contrib
::
AnalysisConfig
::
SerializeInfoCache
()
{
std
::
string
AnalysisConfig
::
SerializeInfoCache
()
{
std
::
stringstream
ss
;
ss
<<
model_dir_
;
ss
<<
prog_file_
;
...
...
@@ -260,14 +260,14 @@ std::string contrib::AnalysisConfig::SerializeInfoCache() {
return
ss
.
str
();
}
void
contrib
::
AnalysisConfig
::
SetCpuMathLibraryNumThreads
(
void
AnalysisConfig
::
SetCpuMathLibraryNumThreads
(
int
cpu_math_library_num_threads
)
{
cpu_math_library_num_threads_
=
cpu_math_library_num_threads
;
Update
();
}
float
contrib
::
AnalysisConfig
::
fraction_of_gpu_memory_for_pool
()
const
{
float
AnalysisConfig
::
fraction_of_gpu_memory_for_pool
()
const
{
#ifdef PADDLE_WITH_CUDA
// Get the GPU memory details and calculate the fraction of memory for the
// GPU memory pool.
...
...
@@ -282,8 +282,8 @@ float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
#endif
}
void
contrib
::
AnalysisConfig
::
EnableMemoryOptim
(
bool
static_optim
,
bool
force_update_static_cache
)
{
void
AnalysisConfig
::
EnableMemoryOptim
(
bool
static_optim
,
bool
force_update_static_cache
)
{
enable_memory_optim_
=
true
;
static_memory_optim_
=
static_optim
;
static_memory_optim_force_update_
=
force_update_static_cache
;
...
...
@@ -291,14 +291,14 @@ void contrib::AnalysisConfig::EnableMemoryOptim(
Update
();
}
bool
contrib
::
AnalysisConfig
::
enable_memory_optim
()
const
{
bool
AnalysisConfig
::
enable_memory_optim
()
const
{
return
enable_memory_optim_
;
}
void
contrib
::
AnalysisConfig
::
SetModelBuffer
(
const
char
*
prog_buffer
,
size_t
prog_buffer_size
,
const
char
*
param_buffer
,
size_t
param_buffer_size
)
{
void
AnalysisConfig
::
SetModelBuffer
(
const
char
*
prog_buffer
,
size_t
prog_buffer_size
,
const
char
*
param_buffer
,
size_t
param_buffer_size
)
{
prog_file_
=
std
::
string
(
prog_buffer
,
prog_buffer
+
prog_buffer_size
);
params_file_
=
std
::
string
(
param_buffer
,
param_buffer
+
param_buffer_size
);
model_from_memory_
=
true
;
...
...
@@ -306,7 +306,7 @@ void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer,
Update
();
}
NativeConfig
contrib
::
AnalysisConfig
::
ToNativeConfig
()
const
{
NativeConfig
AnalysisConfig
::
ToNativeConfig
()
const
{
NativeConfig
config
;
config
.
model_dir
=
model_dir_
;
config
.
prog_file
=
prog_file_
;
...
...
paddle/fluid/inference/api/analysis_predictor.cc
浏览文件 @
65517908
...
...
@@ -47,7 +47,6 @@ DECLARE_bool(profile);
namespace
paddle
{
using
contrib
::
AnalysisConfig
;
using
inference
::
Singleton
;
#if PADDLE_WITH_TENSORRT
using
inference
::
tensorrt
::
TRTInt8Calibrator
;
...
...
@@ -731,10 +730,10 @@ std::string AnalysisPredictor::GetSeriazlizedProgram() const {
}
template
<
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
contrib
::
AnalysisConfig
>
(
const
contrib
::
AnalysisConfig
&
config
)
{
return
CreatePaddlePredictor
<
contrib
::
AnalysisConfig
,
PaddleEngineKind
::
kAnalysis
>
(
config
);
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
AnalysisConfig
>
(
const
AnalysisConfig
&
config
)
{
return
CreatePaddlePredictor
<
AnalysisConfig
,
PaddleEngineKind
::
kAnalysis
>
(
config
);
}
}
// namespace paddle
...
...
paddle/fluid/inference/api/analysis_predictor.h
浏览文件 @
65517908
...
...
@@ -33,7 +33,6 @@ using inference::analysis::Argument;
using
inference
::
analysis
::
Analyzer
;
using
framework
::
proto
::
ProgramDesc
;
using
framework
::
NaiveExecutor
;
using
contrib
::
AnalysisConfig
;
/** \brief This predictor is based on the original native predictor with IR and
* Analysis support.
...
...
@@ -123,7 +122,7 @@ class AnalysisPredictor : public PaddlePredictor {
#endif
private:
contrib
::
AnalysisConfig
config_
;
AnalysisConfig
config_
;
Argument
argument_
;
std
::
unique_ptr
<
NaiveExecutor
>
executor_
;
platform
::
Place
place_
;
...
...
paddle/fluid/inference/api/analysis_predictor_tester.cc
浏览文件 @
65517908
...
...
@@ -24,7 +24,6 @@
DEFINE_string
(
dirname
,
""
,
"dirname to tests."
);
namespace
paddle
{
using
contrib
::
AnalysisConfig
;
TEST
(
AnalysisPredictor
,
analysis_off
)
{
AnalysisConfig
config
;
...
...
paddle/fluid/inference/api/api_impl_tester.cc
浏览文件 @
65517908
...
...
@@ -295,7 +295,7 @@ TEST(inference_api_native, image_classification_gpu) {
#endif
TEST
(
PassBuilder
,
Delete
)
{
contrib
::
AnalysisConfig
config
;
AnalysisConfig
config
;
config
.
DisableGpu
();
config
.
pass_builder
()
->
DeletePass
(
"attention_lstm_fuse_pass"
);
const
auto
&
passes
=
config
.
pass_builder
()
->
AllPasses
();
...
...
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
浏览文件 @
65517908
...
...
@@ -36,7 +36,7 @@ namespace demo {
*/
void
Main
()
{
std
::
unique_ptr
<
PaddlePredictor
>
predictor
;
paddle
::
contrib
::
AnalysisConfig
config
;
paddle
::
AnalysisConfig
config
;
config
.
EnableUseGpu
(
100
,
0
);
config
.
SetModel
(
FLAGS_modeldir
+
"/__model__"
,
FLAGS_modeldir
+
"/__params__"
);
...
...
paddle/fluid/inference/api/demo_ci/vis_demo.cc
浏览文件 @
65517908
...
...
@@ -34,7 +34,6 @@ DEFINE_bool(use_gpu, false, "Whether use gpu.");
namespace
paddle
{
namespace
demo
{
using
contrib
::
AnalysisConfig
;
/*
* Use the native and analysis fluid engine to inference the demo.
*/
...
...
paddle/fluid/inference/api/paddle_analysis_config.h
浏览文件 @
65517908
...
...
@@ -29,11 +29,6 @@
namespace
paddle
{
class
AnalysisPredictor
;
// ==
//
// -----------------------------------------------------------------------------------
// NOTE: The following APIs are not mature yet, we are still working on them.
namespace
contrib
{
// NOTE WIP, not stable yet.
struct
AnalysisConfig
{
...
...
@@ -260,5 +255,4 @@ struct AnalysisConfig {
mutable
std
::
unique_ptr
<
PassStrategy
>
pass_builder_
;
};
}
// namespace contrib
}
// namespace paddle
paddle/fluid/inference/api/paddle_api.h
浏览文件 @
65517908
...
...
@@ -221,7 +221,7 @@ class PaddlePredictor {
virtual
std
::
string
GetSeriazlizedProgram
()
const
{
assert
(
false
);
// Force raise error.
return
"NotImplemented"
;
}
;
}
/** The common configs for all the predictors.
*/
...
...
paddle/fluid/inference/tensorrt/trt_int8_calibrator.h
浏览文件 @
65517908
...
...
@@ -13,16 +13,16 @@
// limitations under the License.
#pragma once
#include <NvInfer.h>
#include <cuda_runtime_api.h>
#include <atomic>
#include <memory>
#include <mutex>
#include <mutex>
// NOLINT
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <NvInfer.h>
#include <cuda_runtime_api.h>
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/inference/tensorrt/engine.h"
#include "paddle/fluid/platform/place.h"
...
...
paddle/fluid/inference/tests/api/analyzer_dam_tester.cc
浏览文件 @
65517908
...
...
@@ -19,7 +19,6 @@ DEFINE_int32(max_turn_num, 9,
namespace
paddle
{
namespace
inference
{
using
contrib
::
AnalysisConfig
;
constexpr
int32_t
kMaxTurnLen
=
50
;
...
...
@@ -165,7 +164,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
input_slots
->
push_back
(
std
::
move
(
response_mask_tensor
));
}
void
SetConfig
(
contrib
::
AnalysisConfig
*
cfg
)
{
void
SetConfig
(
AnalysisConfig
*
cfg
)
{
cfg
->
SetModel
(
FLAGS_infer_model
+
"/__model__"
,
FLAGS_infer_model
+
"/param"
);
cfg
->
SwitchSpecifyInputNames
();
cfg
->
SwitchIrOptim
(
true
);
...
...
@@ -187,7 +186,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
void
profile
(
bool
use_mkldnn
=
false
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
if
(
use_mkldnn
)
{
...
...
@@ -223,7 +222,7 @@ TEST(Analyzer_dam, profile_mkldnn) { profile(true /* use_mkldnn */); }
// Check the fuse status
TEST
(
Analyzer_dam
,
fuse_statis
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
int
num_ops
;
...
...
@@ -256,7 +255,7 @@ void compare(bool use_mkldnn = false) {
TEST
(
Analyzer_dam
,
compare_with_static_memory_optim
)
{
// The small dam will core in CI, but works in local.
if
(
FLAGS_max_turn_num
==
9
)
{
contrib
::
AnalysisConfig
cfg
,
cfg1
;
AnalysisConfig
cfg
,
cfg1
;
DataRecord
data
(
FLAGS_infer_data
,
FLAGS_batch_size
);
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
...
...
@@ -282,7 +281,7 @@ TEST(Analyzer_dam, compare_with_static_memory_optim) {
TEST
(
Analyzer_dam
,
compare_with_dynamic_memory_optim
)
{
// The small dam will core in CI, but works in local.
if
(
FLAGS_max_turn_num
==
9
)
{
contrib
::
AnalysisConfig
cfg
,
cfg1
;
AnalysisConfig
cfg
,
cfg1
;
DataRecord
data
(
FLAGS_infer_data
,
FLAGS_batch_size
);
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
...
...
paddle/fluid/inference/tests/api/analyzer_lac_tester.cc
浏览文件 @
65517908
...
...
@@ -18,8 +18,6 @@ namespace paddle {
namespace
inference
{
namespace
analysis
{
using
contrib
::
AnalysisConfig
;
struct
DataRecord
{
std
::
vector
<
int64_t
>
data
;
std
::
vector
<
size_t
>
lod
;
...
...
paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc
浏览文件 @
65517908
...
...
@@ -16,7 +16,6 @@
namespace
paddle
{
namespace
inference
{
using
contrib
::
AnalysisConfig
;
struct
DataRecord
{
std
::
vector
<
std
::
vector
<
int64_t
>>
query
,
title
;
...
...
@@ -75,7 +74,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
}
}
void
SetConfig
(
contrib
::
AnalysisConfig
*
cfg
)
{
void
SetConfig
(
AnalysisConfig
*
cfg
)
{
cfg
->
SetModel
(
FLAGS_infer_model
);
cfg
->
DisableGpu
();
cfg
->
SwitchSpecifyInputNames
();
...
...
@@ -95,7 +94,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
void
profile
(
bool
use_mkldnn
=
false
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
PaddleTensor
>
outputs
;
...
...
@@ -130,7 +129,7 @@ TEST(Analyzer_MM_DNN, profile_mkldnn) { profile(true /* use_mkldnn */); }
// Check the fuse status
TEST
(
Analyzer_MM_DNN
,
fuse_statis
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
int
num_ops
;
...
...
@@ -141,7 +140,7 @@ TEST(Analyzer_MM_DNN, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig
void
compare
(
bool
use_mkldnn
=
false
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
if
(
use_mkldnn
)
{
...
...
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
浏览文件 @
65517908
...
...
@@ -16,7 +16,6 @@
namespace
paddle
{
namespace
inference
{
using
contrib
::
AnalysisConfig
;
struct
DataRecord
{
std
::
vector
<
std
::
vector
<
int64_t
>>
word
,
mention
;
...
...
@@ -76,7 +75,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data) {
}
}
void
SetConfig
(
contrib
::
AnalysisConfig
*
cfg
,
bool
memory_load
=
false
)
{
void
SetConfig
(
AnalysisConfig
*
cfg
,
bool
memory_load
=
false
)
{
if
(
memory_load
)
{
std
::
string
buffer_prog
,
buffer_param
;
ReadBinaryFile
(
FLAGS_infer_model
+
"/__model__"
,
&
buffer_prog
);
...
...
@@ -105,7 +104,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
void
profile
(
bool
memory_load
=
false
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
,
memory_load
);
std
::
vector
<
PaddleTensor
>
outputs
;
...
...
@@ -136,7 +135,7 @@ TEST(Analyzer_Chinese_ner, profile_memory_load) {
// Check the fuse status
TEST
(
Analyzer_Chinese_ner
,
fuse_statis
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
int
num_ops
;
...
...
@@ -152,7 +151,7 @@ TEST(Analyzer_Chinese_ner, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig
TEST
(
Analyzer_Chinese_ner
,
compare
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
...
...
paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc
浏览文件 @
65517908
...
...
@@ -16,7 +16,6 @@
namespace
paddle
{
namespace
inference
{
using
contrib
::
AnalysisConfig
;
struct
DataRecord
{
std
::
vector
<
std
::
vector
<
int64_t
>>
query_basic
,
query_phrase
,
title_basic
,
...
...
@@ -103,7 +102,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
}
}
void
SetConfig
(
contrib
::
AnalysisConfig
*
cfg
)
{
void
SetConfig
(
AnalysisConfig
*
cfg
)
{
cfg
->
SetModel
(
FLAGS_infer_model
);
cfg
->
DisableGpu
();
cfg
->
SwitchSpecifyInputNames
();
...
...
@@ -123,7 +122,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
TEST
(
Analyzer_Pyramid_DNN
,
profile
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
PaddleTensor
>
outputs
;
...
...
@@ -147,7 +146,7 @@ TEST(Analyzer_Pyramid_DNN, profile) {
// Check the fuse status
TEST
(
Analyzer_Pyramid_DNN
,
fuse_statis
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
int
num_ops
;
...
...
@@ -158,7 +157,7 @@ TEST(Analyzer_Pyramid_DNN, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig
TEST
(
Analyzer_Pyramid_DNN
,
compare
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
...
...
paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc
浏览文件 @
65517908
...
...
@@ -223,7 +223,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
TEST
(
Analyzer_rnn1
,
profile
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
cfg
.
DisableGpu
();
cfg
.
SwitchIrDebug
();
...
...
@@ -237,7 +237,7 @@ TEST(Analyzer_rnn1, profile) {
// Check the fuse status
TEST
(
Analyzer_rnn1
,
fuse_statis
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
int
num_ops
;
...
...
@@ -254,7 +254,7 @@ TEST(Analyzer_rnn1, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig
TEST
(
Analyzer_rnn1
,
compare
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
std
::
vector
<
PaddleTensor
>>
input_slots_all
;
...
...
@@ -276,7 +276,7 @@ TEST(Analyzer_rnn1, compare_determine) {
// Test Multi-Thread.
TEST
(
Analyzer_rnn1
,
multi_thread
)
{
contrib
::
AnalysisConfig
cfg
;
AnalysisConfig
cfg
;
SetConfig
(
&
cfg
);
std
::
vector
<
PaddleTensor
>
outputs
;
...
...
paddle/fluid/inference/tests/api/analyzer_vis_tester.cc
浏览文件 @
65517908
...
...
@@ -20,7 +20,6 @@ limitations under the License. */
namespace
paddle
{
namespace
inference
{
namespace
analysis
{
using
contrib
::
AnalysisConfig
;
struct
Record
{
std
::
vector
<
float
>
data
;
...
...
paddle/fluid/inference/tests/api/config_printer.h
浏览文件 @
65517908
...
...
@@ -58,9 +58,8 @@ std::ostream &operator<<(std::ostream &os, const NativeConfig &config) {
return
os
;
}
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
contrib
::
AnalysisConfig
&
config
)
{
os
<<
GenSpaces
(
num_spaces
)
<<
"contrib::AnalysisConfig {
\n
"
;
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
AnalysisConfig
&
config
)
{
os
<<
GenSpaces
(
num_spaces
)
<<
"AnalysisConfig {
\n
"
;
num_spaces
++
;
os
<<
config
.
ToNativeConfig
();
if
(
!
config
.
model_from_memory
())
{
...
...
paddle/fluid/inference/tests/api/tester_helper.h
浏览文件 @
65517908
...
...
@@ -65,7 +65,7 @@ float Random(float low, float high) {
void
PrintConfig
(
const
PaddlePredictor
::
Config
*
config
,
bool
use_analysis
)
{
const
auto
*
analysis_config
=
reinterpret_cast
<
const
contrib
::
AnalysisConfig
*>
(
config
);
reinterpret_cast
<
const
AnalysisConfig
*>
(
config
);
if
(
use_analysis
)
{
LOG
(
INFO
)
<<
*
analysis_config
;
return
;
...
...
@@ -109,9 +109,9 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
std
::
unique_ptr
<
PaddlePredictor
>
CreateTestPredictor
(
const
PaddlePredictor
::
Config
*
config
,
bool
use_analysis
=
true
)
{
const
auto
*
analysis_config
=
reinterpret_cast
<
const
contrib
::
AnalysisConfig
*>
(
config
);
reinterpret_cast
<
const
AnalysisConfig
*>
(
config
);
if
(
use_analysis
)
{
return
CreatePaddlePredictor
<
contrib
::
AnalysisConfig
>
(
*
analysis_config
);
return
CreatePaddlePredictor
<
AnalysisConfig
>
(
*
analysis_config
);
}
auto
native_config
=
analysis_config
->
ToNativeConfig
();
return
CreatePaddlePredictor
<
NativeConfig
>
(
native_config
);
...
...
paddle/fluid/inference/tests/api/trt_models_tester.cc
浏览文件 @
65517908
...
...
@@ -42,9 +42,9 @@ void SetConfig(ConfigType* config, std::string model_dir, bool use_gpu,
}
template
<
>
void
SetConfig
<
contrib
::
AnalysisConfig
>
(
contrib
::
AnalysisConfig
*
config
,
std
::
string
model_dir
,
bool
use_gpu
,
bool
use_tensorrt
,
int
batch_size
)
{
void
SetConfig
<
AnalysisConfig
>
(
AnalysisConfig
*
config
,
std
::
string
model_dir
,
bool
use_gpu
,
bool
use_tensorrt
,
int
batch_size
)
{
if
(
!
FLAGS_prog_filename
.
empty
()
&&
!
FLAGS_param_filename
.
empty
())
{
config
->
SetModel
(
model_dir
+
"/"
+
FLAGS_prog_filename
,
model_dir
+
"/"
+
FLAGS_param_filename
);
...
...
@@ -75,11 +75,11 @@ void profile(std::string model_dir, bool use_analysis, bool use_tensorrt) {
std
::
vector
<
PaddleTensor
>
outputs
;
if
(
use_analysis
||
use_tensorrt
)
{
contrib
::
AnalysisConfig
config
;
AnalysisConfig
config
;
config
.
EnableUseGpu
(
100
,
0
);
config
.
pass_builder
()
->
TurnOnDebug
();
SetConfig
<
contrib
::
AnalysisConfig
>
(
&
config
,
model_dir
,
true
,
use_tensorrt
,
FLAGS_batch_size
);
SetConfig
<
AnalysisConfig
>
(
&
config
,
model_dir
,
true
,
use_tensorrt
,
FLAGS_batch_size
);
TestPrediction
(
reinterpret_cast
<
PaddlePredictor
::
Config
*>
(
&
config
),
inputs_all
,
&
outputs
,
FLAGS_num_threads
,
true
);
}
else
{
...
...
@@ -99,18 +99,18 @@ void compare(std::string model_dir, bool use_tensorrt) {
SetFakeImageInput
(
&
inputs_all
,
model_dir
,
false
,
"__model__"
,
""
);
}
contrib
::
AnalysisConfig
analysis_config
;
SetConfig
<
contrib
::
AnalysisConfig
>
(
&
analysis_config
,
model_dir
,
true
,
use_tensorrt
,
FLAGS_batch_size
);
AnalysisConfig
analysis_config
;
SetConfig
<
AnalysisConfig
>
(
&
analysis_config
,
model_dir
,
true
,
use_tensorrt
,
FLAGS_batch_size
);
CompareNativeAndAnalysis
(
reinterpret_cast
<
const
PaddlePredictor
::
Config
*>
(
&
analysis_config
),
inputs_all
);
}
void
compare_continuous_input
(
std
::
string
model_dir
,
bool
use_tensorrt
)
{
contrib
::
AnalysisConfig
analysis_config
;
SetConfig
<
contrib
::
AnalysisConfig
>
(
&
analysis_config
,
model_dir
,
true
,
use_tensorrt
,
FLAGS_batch_size
);
AnalysisConfig
analysis_config
;
SetConfig
<
AnalysisConfig
>
(
&
analysis_config
,
model_dir
,
true
,
use_tensorrt
,
FLAGS_batch_size
);
auto
config
=
reinterpret_cast
<
const
PaddlePredictor
::
Config
*>
(
&
analysis_config
);
auto
native_pred
=
CreateTestPredictor
(
config
,
false
);
...
...
paddle/fluid/pybind/inference_api.cc
浏览文件 @
65517908
...
...
@@ -33,7 +33,6 @@ using paddle::PaddlePredictor;
using
paddle
::
NativeConfig
;
using
paddle
::
NativePaddlePredictor
;
using
paddle
::
AnalysisPredictor
;
using
paddle
::
contrib
::
AnalysisConfig
;
static
void
BindPaddleDType
(
py
::
module
*
m
);
static
void
BindPaddleBuf
(
py
::
module
*
m
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录