未验证 提交 65517908 编写于 作者: Y Yan Chunwei 提交者: GitHub

AnalysisConfig remove contrib namespace (#15540)

上级 7bc8481c
...@@ -132,7 +132,7 @@ struct Argument { ...@@ -132,7 +132,7 @@ struct Argument {
DECL_ARGUMENT_FIELD(tensorrt_workspace_size, TensorRtWorkspaceSize, int); DECL_ARGUMENT_FIELD(tensorrt_workspace_size, TensorRtWorkspaceSize, int);
DECL_ARGUMENT_FIELD(tensorrt_min_subgraph_size, TensorRtMinSubgraphSize, int); DECL_ARGUMENT_FIELD(tensorrt_min_subgraph_size, TensorRtMinSubgraphSize, int);
DECL_ARGUMENT_FIELD(tensorrt_precision_mode, TensorRtPrecisionMode, DECL_ARGUMENT_FIELD(tensorrt_precision_mode, TensorRtPrecisionMode,
contrib::AnalysisConfig::Precision); AnalysisConfig::Precision);
// Memory optimized related. // Memory optimized related.
DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool); DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool);
......
...@@ -32,7 +32,7 @@ limitations under the License. */ ...@@ -32,7 +32,7 @@ limitations under the License. */
#ifdef _WIN32 #ifdef _WIN32
#include <direct.h> #include <direct.h>
#include <io.h> #include <io.h>
#define GCC_ATTRIBUTE(attr__) ; #define GCC_ATTRIBUTE(attr__)
#define MKDIR(path) _mkdir(path) #define MKDIR(path) _mkdir(path)
#else #else
#include <unistd.h> #include <unistd.h>
......
...@@ -71,7 +71,7 @@ void IRPassManager::CreatePasses(Argument *argument, ...@@ -71,7 +71,7 @@ void IRPassManager::CreatePasses(Argument *argument,
new framework::ProgramDesc *(&argument->main_program())); new framework::ProgramDesc *(&argument->main_program()));
bool enable_int8 = argument->tensorrt_precision_mode() == bool enable_int8 = argument->tensorrt_precision_mode() ==
contrib::AnalysisConfig::Precision::kInt8; AnalysisConfig::Precision::kInt8;
pass->Set("enable_int8", new bool(enable_int8)); pass->Set("enable_int8", new bool(enable_int8));
std::string model_opt_cache_dir = std::string model_opt_cache_dir =
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
namespace paddle { namespace paddle {
PassStrategy *contrib::AnalysisConfig::pass_builder() const { PassStrategy *AnalysisConfig::pass_builder() const {
if (!pass_builder_.get()) { if (!pass_builder_.get()) {
if (use_gpu_) { if (use_gpu_) {
LOG(INFO) << "Create GPU IR passes"; LOG(INFO) << "Create GPU IR passes";
...@@ -42,27 +42,27 @@ PassStrategy *contrib::AnalysisConfig::pass_builder() const { ...@@ -42,27 +42,27 @@ PassStrategy *contrib::AnalysisConfig::pass_builder() const {
return pass_builder_.get(); return pass_builder_.get();
} }
contrib::AnalysisConfig::AnalysisConfig(const std::string &model_dir) { AnalysisConfig::AnalysisConfig(const std::string &model_dir) {
model_dir_ = model_dir; model_dir_ = model_dir;
Update(); Update();
} }
contrib::AnalysisConfig::AnalysisConfig(const std::string &prog_file, AnalysisConfig::AnalysisConfig(const std::string &prog_file,
const std::string &params_file) { const std::string &params_file) {
prog_file_ = prog_file; prog_file_ = prog_file;
params_file_ = params_file; params_file_ = params_file;
Update(); Update();
} }
void contrib::AnalysisConfig::SetModel(const std::string &prog_file_path, void AnalysisConfig::SetModel(const std::string &prog_file_path,
const std::string &params_file_path) { const std::string &params_file_path) {
prog_file_ = prog_file_path; prog_file_ = prog_file_path;
params_file_ = params_file_path; params_file_ = params_file_path;
Update(); Update();
} }
void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb, void AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
int device_id) { int device_id) {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
use_gpu_ = true; use_gpu_ = true;
memory_pool_init_size_mb_ = memory_pool_init_size_mb; memory_pool_init_size_mb_ = memory_pool_init_size_mb;
...@@ -74,13 +74,13 @@ void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb, ...@@ -74,13 +74,13 @@ void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
Update(); Update();
} }
void contrib::AnalysisConfig::DisableGpu() { void AnalysisConfig::DisableGpu() {
use_gpu_ = false; use_gpu_ = false;
Update(); Update();
} }
contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) { AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
#define CP_MEMBER(member__) member__ = other.member__; #define CP_MEMBER(member__) member__ = other.member__;
// Model related. // Model related.
...@@ -130,7 +130,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) { ...@@ -130,7 +130,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) {
Update(); Update();
} }
void contrib::AnalysisConfig::EnableMKLDNN() { void AnalysisConfig::EnableMKLDNN() {
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
pass_builder()->EnableMKLDNN(); pass_builder()->EnableMKLDNN();
use_mkldnn_ = true; use_mkldnn_ = true;
...@@ -142,9 +142,9 @@ void contrib::AnalysisConfig::EnableMKLDNN() { ...@@ -142,9 +142,9 @@ void contrib::AnalysisConfig::EnableMKLDNN() {
Update(); Update();
} }
void contrib::AnalysisConfig::EnableTensorRtEngine( void AnalysisConfig::EnableTensorRtEngine(
int workspace_size, int max_batch_size, int min_subgraph_size, int workspace_size, int max_batch_size, int min_subgraph_size,
contrib::AnalysisConfig::Precision precision_mode) { AnalysisConfig::Precision precision_mode) {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
if (!use_gpu()) { if (!use_gpu()) {
LOG(ERROR) << "To use TensorRT engine, please call EnableGpu() first"; LOG(ERROR) << "To use TensorRT engine, please call EnableGpu() first";
...@@ -165,7 +165,7 @@ void contrib::AnalysisConfig::EnableTensorRtEngine( ...@@ -165,7 +165,7 @@ void contrib::AnalysisConfig::EnableTensorRtEngine(
} }
// TODO(Superjomn) refactor this, buggy. // TODO(Superjomn) refactor this, buggy.
void contrib::AnalysisConfig::Update() { void AnalysisConfig::Update() {
auto info = SerializeInfoCache(); auto info = SerializeInfoCache();
if (info == serialized_info_cache_) return; if (info == serialized_info_cache_) return;
...@@ -225,7 +225,7 @@ void contrib::AnalysisConfig::Update() { ...@@ -225,7 +225,7 @@ void contrib::AnalysisConfig::Update() {
} }
} }
std::string contrib::AnalysisConfig::SerializeInfoCache() { std::string AnalysisConfig::SerializeInfoCache() {
std::stringstream ss; std::stringstream ss;
ss << model_dir_; ss << model_dir_;
ss << prog_file_; ss << prog_file_;
...@@ -260,14 +260,14 @@ std::string contrib::AnalysisConfig::SerializeInfoCache() { ...@@ -260,14 +260,14 @@ std::string contrib::AnalysisConfig::SerializeInfoCache() {
return ss.str(); return ss.str();
} }
void contrib::AnalysisConfig::SetCpuMathLibraryNumThreads( void AnalysisConfig::SetCpuMathLibraryNumThreads(
int cpu_math_library_num_threads) { int cpu_math_library_num_threads) {
cpu_math_library_num_threads_ = cpu_math_library_num_threads; cpu_math_library_num_threads_ = cpu_math_library_num_threads;
Update(); Update();
} }
float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const { float AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
// Get the GPU memory details and calculate the fraction of memory for the // Get the GPU memory details and calculate the fraction of memory for the
// GPU memory pool. // GPU memory pool.
...@@ -282,8 +282,8 @@ float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const { ...@@ -282,8 +282,8 @@ float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
#endif #endif
} }
void contrib::AnalysisConfig::EnableMemoryOptim( void AnalysisConfig::EnableMemoryOptim(bool static_optim,
bool static_optim, bool force_update_static_cache) { bool force_update_static_cache) {
enable_memory_optim_ = true; enable_memory_optim_ = true;
static_memory_optim_ = static_optim; static_memory_optim_ = static_optim;
static_memory_optim_force_update_ = force_update_static_cache; static_memory_optim_force_update_ = force_update_static_cache;
...@@ -291,14 +291,14 @@ void contrib::AnalysisConfig::EnableMemoryOptim( ...@@ -291,14 +291,14 @@ void contrib::AnalysisConfig::EnableMemoryOptim(
Update(); Update();
} }
bool contrib::AnalysisConfig::enable_memory_optim() const { bool AnalysisConfig::enable_memory_optim() const {
return enable_memory_optim_; return enable_memory_optim_;
} }
void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer, void AnalysisConfig::SetModelBuffer(const char *prog_buffer,
size_t prog_buffer_size, size_t prog_buffer_size,
const char *param_buffer, const char *param_buffer,
size_t param_buffer_size) { size_t param_buffer_size) {
prog_file_ = std::string(prog_buffer, prog_buffer + prog_buffer_size); prog_file_ = std::string(prog_buffer, prog_buffer + prog_buffer_size);
params_file_ = std::string(param_buffer, param_buffer + param_buffer_size); params_file_ = std::string(param_buffer, param_buffer + param_buffer_size);
model_from_memory_ = true; model_from_memory_ = true;
...@@ -306,7 +306,7 @@ void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer, ...@@ -306,7 +306,7 @@ void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer,
Update(); Update();
} }
NativeConfig contrib::AnalysisConfig::ToNativeConfig() const { NativeConfig AnalysisConfig::ToNativeConfig() const {
NativeConfig config; NativeConfig config;
config.model_dir = model_dir_; config.model_dir = model_dir_;
config.prog_file = prog_file_; config.prog_file = prog_file_;
......
...@@ -47,7 +47,6 @@ DECLARE_bool(profile); ...@@ -47,7 +47,6 @@ DECLARE_bool(profile);
namespace paddle { namespace paddle {
using contrib::AnalysisConfig;
using inference::Singleton; using inference::Singleton;
#if PADDLE_WITH_TENSORRT #if PADDLE_WITH_TENSORRT
using inference::tensorrt::TRTInt8Calibrator; using inference::tensorrt::TRTInt8Calibrator;
...@@ -731,10 +730,10 @@ std::string AnalysisPredictor::GetSeriazlizedProgram() const { ...@@ -731,10 +730,10 @@ std::string AnalysisPredictor::GetSeriazlizedProgram() const {
} }
template <> template <>
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<contrib::AnalysisConfig>( std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
const contrib::AnalysisConfig &config) { const AnalysisConfig &config) {
return CreatePaddlePredictor<contrib::AnalysisConfig, return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
PaddleEngineKind::kAnalysis>(config); config);
} }
} // namespace paddle } // namespace paddle
......
...@@ -33,7 +33,6 @@ using inference::analysis::Argument; ...@@ -33,7 +33,6 @@ using inference::analysis::Argument;
using inference::analysis::Analyzer; using inference::analysis::Analyzer;
using framework::proto::ProgramDesc; using framework::proto::ProgramDesc;
using framework::NaiveExecutor; using framework::NaiveExecutor;
using contrib::AnalysisConfig;
/** \brief This predictor is based on the original native predictor with IR and /** \brief This predictor is based on the original native predictor with IR and
* Analysis support. * Analysis support.
...@@ -123,7 +122,7 @@ class AnalysisPredictor : public PaddlePredictor { ...@@ -123,7 +122,7 @@ class AnalysisPredictor : public PaddlePredictor {
#endif #endif
private: private:
contrib::AnalysisConfig config_; AnalysisConfig config_;
Argument argument_; Argument argument_;
std::unique_ptr<NaiveExecutor> executor_; std::unique_ptr<NaiveExecutor> executor_;
platform::Place place_; platform::Place place_;
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
DEFINE_string(dirname, "", "dirname to tests."); DEFINE_string(dirname, "", "dirname to tests.");
namespace paddle { namespace paddle {
using contrib::AnalysisConfig;
TEST(AnalysisPredictor, analysis_off) { TEST(AnalysisPredictor, analysis_off) {
AnalysisConfig config; AnalysisConfig config;
......
...@@ -295,7 +295,7 @@ TEST(inference_api_native, image_classification_gpu) { ...@@ -295,7 +295,7 @@ TEST(inference_api_native, image_classification_gpu) {
#endif #endif
TEST(PassBuilder, Delete) { TEST(PassBuilder, Delete) {
contrib::AnalysisConfig config; AnalysisConfig config;
config.DisableGpu(); config.DisableGpu();
config.pass_builder()->DeletePass("attention_lstm_fuse_pass"); config.pass_builder()->DeletePass("attention_lstm_fuse_pass");
const auto& passes = config.pass_builder()->AllPasses(); const auto& passes = config.pass_builder()->AllPasses();
......
...@@ -36,7 +36,7 @@ namespace demo { ...@@ -36,7 +36,7 @@ namespace demo {
*/ */
void Main() { void Main() {
std::unique_ptr<PaddlePredictor> predictor; std::unique_ptr<PaddlePredictor> predictor;
paddle::contrib::AnalysisConfig config; paddle::AnalysisConfig config;
config.EnableUseGpu(100, 0); config.EnableUseGpu(100, 0);
config.SetModel(FLAGS_modeldir + "/__model__", config.SetModel(FLAGS_modeldir + "/__model__",
FLAGS_modeldir + "/__params__"); FLAGS_modeldir + "/__params__");
......
...@@ -34,7 +34,6 @@ DEFINE_bool(use_gpu, false, "Whether use gpu."); ...@@ -34,7 +34,6 @@ DEFINE_bool(use_gpu, false, "Whether use gpu.");
namespace paddle { namespace paddle {
namespace demo { namespace demo {
using contrib::AnalysisConfig;
/* /*
* Use the native and analysis fluid engine to inference the demo. * Use the native and analysis fluid engine to inference the demo.
*/ */
......
...@@ -29,11 +29,6 @@ ...@@ -29,11 +29,6 @@
namespace paddle { namespace paddle {
class AnalysisPredictor; class AnalysisPredictor;
// ==
//
// -----------------------------------------------------------------------------------
// NOTE: The following APIs are not mature yet, we are still working on them.
namespace contrib {
// NOTE WIP, not stable yet. // NOTE WIP, not stable yet.
struct AnalysisConfig { struct AnalysisConfig {
...@@ -260,5 +255,4 @@ struct AnalysisConfig { ...@@ -260,5 +255,4 @@ struct AnalysisConfig {
mutable std::unique_ptr<PassStrategy> pass_builder_; mutable std::unique_ptr<PassStrategy> pass_builder_;
}; };
} // namespace contrib
} // namespace paddle } // namespace paddle
...@@ -221,7 +221,7 @@ class PaddlePredictor { ...@@ -221,7 +221,7 @@ class PaddlePredictor {
virtual std::string GetSeriazlizedProgram() const { virtual std::string GetSeriazlizedProgram() const {
assert(false); // Force raise error. assert(false); // Force raise error.
return "NotImplemented"; return "NotImplemented";
}; }
/** The common configs for all the predictors. /** The common configs for all the predictors.
*/ */
......
...@@ -13,16 +13,16 @@ ...@@ -13,16 +13,16 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <NvInfer.h>
#include <cuda_runtime_api.h>
#include <atomic> #include <atomic>
#include <memory> #include <memory>
#include <mutex> #include <mutex> // NOLINT
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <utility> #include <utility>
#include <vector> #include <vector>
#include <NvInfer.h>
#include <cuda_runtime_api.h>
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/inference/tensorrt/engine.h" #include "paddle/fluid/inference/tensorrt/engine.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
......
...@@ -19,7 +19,6 @@ DEFINE_int32(max_turn_num, 9, ...@@ -19,7 +19,6 @@ DEFINE_int32(max_turn_num, 9,
namespace paddle { namespace paddle {
namespace inference { namespace inference {
using contrib::AnalysisConfig;
constexpr int32_t kMaxTurnLen = 50; constexpr int32_t kMaxTurnLen = 50;
...@@ -165,7 +164,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data, ...@@ -165,7 +164,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
input_slots->push_back(std::move(response_mask_tensor)); input_slots->push_back(std::move(response_mask_tensor));
} }
void SetConfig(contrib::AnalysisConfig *cfg) { void SetConfig(AnalysisConfig *cfg) {
cfg->SetModel(FLAGS_infer_model + "/__model__", FLAGS_infer_model + "/param"); cfg->SetModel(FLAGS_infer_model + "/__model__", FLAGS_infer_model + "/param");
cfg->SwitchSpecifyInputNames(); cfg->SwitchSpecifyInputNames();
cfg->SwitchIrOptim(true); cfg->SwitchIrOptim(true);
...@@ -187,7 +186,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) { ...@@ -187,7 +186,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently. // Easy for profiling independently.
void profile(bool use_mkldnn = false) { void profile(bool use_mkldnn = false) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
if (use_mkldnn) { if (use_mkldnn) {
...@@ -223,7 +222,7 @@ TEST(Analyzer_dam, profile_mkldnn) { profile(true /* use_mkldnn */); } ...@@ -223,7 +222,7 @@ TEST(Analyzer_dam, profile_mkldnn) { profile(true /* use_mkldnn */); }
// Check the fuse status // Check the fuse status
TEST(Analyzer_dam, fuse_statis) { TEST(Analyzer_dam, fuse_statis) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
int num_ops; int num_ops;
...@@ -256,7 +255,7 @@ void compare(bool use_mkldnn = false) { ...@@ -256,7 +255,7 @@ void compare(bool use_mkldnn = false) {
TEST(Analyzer_dam, compare_with_static_memory_optim) { TEST(Analyzer_dam, compare_with_static_memory_optim) {
// The small dam will core in CI, but works in local. // The small dam will core in CI, but works in local.
if (FLAGS_max_turn_num == 9) { if (FLAGS_max_turn_num == 9) {
contrib::AnalysisConfig cfg, cfg1; AnalysisConfig cfg, cfg1;
DataRecord data(FLAGS_infer_data, FLAGS_batch_size); DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
std::vector<std::vector<PaddleTensor>> input_slots_all; std::vector<std::vector<PaddleTensor>> input_slots_all;
...@@ -282,7 +281,7 @@ TEST(Analyzer_dam, compare_with_static_memory_optim) { ...@@ -282,7 +281,7 @@ TEST(Analyzer_dam, compare_with_static_memory_optim) {
TEST(Analyzer_dam, compare_with_dynamic_memory_optim) { TEST(Analyzer_dam, compare_with_dynamic_memory_optim) {
// The small dam will core in CI, but works in local. // The small dam will core in CI, but works in local.
if (FLAGS_max_turn_num == 9) { if (FLAGS_max_turn_num == 9) {
contrib::AnalysisConfig cfg, cfg1; AnalysisConfig cfg, cfg1;
DataRecord data(FLAGS_infer_data, FLAGS_batch_size); DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
std::vector<std::vector<PaddleTensor>> input_slots_all; std::vector<std::vector<PaddleTensor>> input_slots_all;
......
...@@ -18,8 +18,6 @@ namespace paddle { ...@@ -18,8 +18,6 @@ namespace paddle {
namespace inference { namespace inference {
namespace analysis { namespace analysis {
using contrib::AnalysisConfig;
struct DataRecord { struct DataRecord {
std::vector<int64_t> data; std::vector<int64_t> data;
std::vector<size_t> lod; std::vector<size_t> lod;
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
namespace paddle { namespace paddle {
namespace inference { namespace inference {
using contrib::AnalysisConfig;
struct DataRecord { struct DataRecord {
std::vector<std::vector<int64_t>> query, title; std::vector<std::vector<int64_t>> query, title;
...@@ -75,7 +74,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data, ...@@ -75,7 +74,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
} }
} }
void SetConfig(contrib::AnalysisConfig *cfg) { void SetConfig(AnalysisConfig *cfg) {
cfg->SetModel(FLAGS_infer_model); cfg->SetModel(FLAGS_infer_model);
cfg->DisableGpu(); cfg->DisableGpu();
cfg->SwitchSpecifyInputNames(); cfg->SwitchSpecifyInputNames();
...@@ -95,7 +94,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) { ...@@ -95,7 +94,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently. // Easy for profiling independently.
void profile(bool use_mkldnn = false) { void profile(bool use_mkldnn = false) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
std::vector<PaddleTensor> outputs; std::vector<PaddleTensor> outputs;
...@@ -130,7 +129,7 @@ TEST(Analyzer_MM_DNN, profile_mkldnn) { profile(true /* use_mkldnn */); } ...@@ -130,7 +129,7 @@ TEST(Analyzer_MM_DNN, profile_mkldnn) { profile(true /* use_mkldnn */); }
// Check the fuse status // Check the fuse status
TEST(Analyzer_MM_DNN, fuse_statis) { TEST(Analyzer_MM_DNN, fuse_statis) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
int num_ops; int num_ops;
...@@ -141,7 +140,7 @@ TEST(Analyzer_MM_DNN, fuse_statis) { ...@@ -141,7 +140,7 @@ TEST(Analyzer_MM_DNN, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig // Compare result of NativeConfig and AnalysisConfig
void compare(bool use_mkldnn = false) { void compare(bool use_mkldnn = false) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
if (use_mkldnn) { if (use_mkldnn) {
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
namespace paddle { namespace paddle {
namespace inference { namespace inference {
using contrib::AnalysisConfig;
struct DataRecord { struct DataRecord {
std::vector<std::vector<int64_t>> word, mention; std::vector<std::vector<int64_t>> word, mention;
...@@ -76,7 +75,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data) { ...@@ -76,7 +75,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data) {
} }
} }
void SetConfig(contrib::AnalysisConfig *cfg, bool memory_load = false) { void SetConfig(AnalysisConfig *cfg, bool memory_load = false) {
if (memory_load) { if (memory_load) {
std::string buffer_prog, buffer_param; std::string buffer_prog, buffer_param;
ReadBinaryFile(FLAGS_infer_model + "/__model__", &buffer_prog); ReadBinaryFile(FLAGS_infer_model + "/__model__", &buffer_prog);
...@@ -105,7 +104,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) { ...@@ -105,7 +104,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently. // Easy for profiling independently.
void profile(bool memory_load = false) { void profile(bool memory_load = false) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg, memory_load); SetConfig(&cfg, memory_load);
std::vector<PaddleTensor> outputs; std::vector<PaddleTensor> outputs;
...@@ -136,7 +135,7 @@ TEST(Analyzer_Chinese_ner, profile_memory_load) { ...@@ -136,7 +135,7 @@ TEST(Analyzer_Chinese_ner, profile_memory_load) {
// Check the fuse status // Check the fuse status
TEST(Analyzer_Chinese_ner, fuse_statis) { TEST(Analyzer_Chinese_ner, fuse_statis) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
int num_ops; int num_ops;
...@@ -152,7 +151,7 @@ TEST(Analyzer_Chinese_ner, fuse_statis) { ...@@ -152,7 +151,7 @@ TEST(Analyzer_Chinese_ner, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig // Compare result of NativeConfig and AnalysisConfig
TEST(Analyzer_Chinese_ner, compare) { TEST(Analyzer_Chinese_ner, compare) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all; std::vector<std::vector<PaddleTensor>> input_slots_all;
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
namespace paddle { namespace paddle {
namespace inference { namespace inference {
using contrib::AnalysisConfig;
struct DataRecord { struct DataRecord {
std::vector<std::vector<int64_t>> query_basic, query_phrase, title_basic, std::vector<std::vector<int64_t>> query_basic, query_phrase, title_basic,
...@@ -103,7 +102,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data, ...@@ -103,7 +102,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
} }
} }
void SetConfig(contrib::AnalysisConfig *cfg) { void SetConfig(AnalysisConfig *cfg) {
cfg->SetModel(FLAGS_infer_model); cfg->SetModel(FLAGS_infer_model);
cfg->DisableGpu(); cfg->DisableGpu();
cfg->SwitchSpecifyInputNames(); cfg->SwitchSpecifyInputNames();
...@@ -123,7 +122,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) { ...@@ -123,7 +122,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently. // Easy for profiling independently.
TEST(Analyzer_Pyramid_DNN, profile) { TEST(Analyzer_Pyramid_DNN, profile) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
std::vector<PaddleTensor> outputs; std::vector<PaddleTensor> outputs;
...@@ -147,7 +146,7 @@ TEST(Analyzer_Pyramid_DNN, profile) { ...@@ -147,7 +146,7 @@ TEST(Analyzer_Pyramid_DNN, profile) {
// Check the fuse status // Check the fuse status
TEST(Analyzer_Pyramid_DNN, fuse_statis) { TEST(Analyzer_Pyramid_DNN, fuse_statis) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
int num_ops; int num_ops;
...@@ -158,7 +157,7 @@ TEST(Analyzer_Pyramid_DNN, fuse_statis) { ...@@ -158,7 +157,7 @@ TEST(Analyzer_Pyramid_DNN, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig // Compare result of NativeConfig and AnalysisConfig
TEST(Analyzer_Pyramid_DNN, compare) { TEST(Analyzer_Pyramid_DNN, compare) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all; std::vector<std::vector<PaddleTensor>> input_slots_all;
......
...@@ -223,7 +223,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) { ...@@ -223,7 +223,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently. // Easy for profiling independently.
TEST(Analyzer_rnn1, profile) { TEST(Analyzer_rnn1, profile) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
cfg.DisableGpu(); cfg.DisableGpu();
cfg.SwitchIrDebug(); cfg.SwitchIrDebug();
...@@ -237,7 +237,7 @@ TEST(Analyzer_rnn1, profile) { ...@@ -237,7 +237,7 @@ TEST(Analyzer_rnn1, profile) {
// Check the fuse status // Check the fuse status
TEST(Analyzer_rnn1, fuse_statis) { TEST(Analyzer_rnn1, fuse_statis) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
int num_ops; int num_ops;
...@@ -254,7 +254,7 @@ TEST(Analyzer_rnn1, fuse_statis) { ...@@ -254,7 +254,7 @@ TEST(Analyzer_rnn1, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig // Compare result of NativeConfig and AnalysisConfig
TEST(Analyzer_rnn1, compare) { TEST(Analyzer_rnn1, compare) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all; std::vector<std::vector<PaddleTensor>> input_slots_all;
...@@ -276,7 +276,7 @@ TEST(Analyzer_rnn1, compare_determine) { ...@@ -276,7 +276,7 @@ TEST(Analyzer_rnn1, compare_determine) {
// Test Multi-Thread. // Test Multi-Thread.
TEST(Analyzer_rnn1, multi_thread) { TEST(Analyzer_rnn1, multi_thread) {
contrib::AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
std::vector<PaddleTensor> outputs; std::vector<PaddleTensor> outputs;
......
...@@ -20,7 +20,6 @@ limitations under the License. */ ...@@ -20,7 +20,6 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace inference { namespace inference {
namespace analysis { namespace analysis {
using contrib::AnalysisConfig;
struct Record { struct Record {
std::vector<float> data; std::vector<float> data;
......
...@@ -58,9 +58,8 @@ std::ostream &operator<<(std::ostream &os, const NativeConfig &config) { ...@@ -58,9 +58,8 @@ std::ostream &operator<<(std::ostream &os, const NativeConfig &config) {
return os; return os;
} }
std::ostream &operator<<(std::ostream &os, std::ostream &operator<<(std::ostream &os, const AnalysisConfig &config) {
const contrib::AnalysisConfig &config) { os << GenSpaces(num_spaces) << "AnalysisConfig {\n";
os << GenSpaces(num_spaces) << "contrib::AnalysisConfig {\n";
num_spaces++; num_spaces++;
os << config.ToNativeConfig(); os << config.ToNativeConfig();
if (!config.model_from_memory()) { if (!config.model_from_memory()) {
......
...@@ -65,7 +65,7 @@ float Random(float low, float high) { ...@@ -65,7 +65,7 @@ float Random(float low, float high) {
void PrintConfig(const PaddlePredictor::Config *config, bool use_analysis) { void PrintConfig(const PaddlePredictor::Config *config, bool use_analysis) {
const auto *analysis_config = const auto *analysis_config =
reinterpret_cast<const contrib::AnalysisConfig *>(config); reinterpret_cast<const AnalysisConfig *>(config);
if (use_analysis) { if (use_analysis) {
LOG(INFO) << *analysis_config; LOG(INFO) << *analysis_config;
return; return;
...@@ -109,9 +109,9 @@ void CompareResult(const std::vector<PaddleTensor> &outputs, ...@@ -109,9 +109,9 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
std::unique_ptr<PaddlePredictor> CreateTestPredictor( std::unique_ptr<PaddlePredictor> CreateTestPredictor(
const PaddlePredictor::Config *config, bool use_analysis = true) { const PaddlePredictor::Config *config, bool use_analysis = true) {
const auto *analysis_config = const auto *analysis_config =
reinterpret_cast<const contrib::AnalysisConfig *>(config); reinterpret_cast<const AnalysisConfig *>(config);
if (use_analysis) { if (use_analysis) {
return CreatePaddlePredictor<contrib::AnalysisConfig>(*analysis_config); return CreatePaddlePredictor<AnalysisConfig>(*analysis_config);
} }
auto native_config = analysis_config->ToNativeConfig(); auto native_config = analysis_config->ToNativeConfig();
return CreatePaddlePredictor<NativeConfig>(native_config); return CreatePaddlePredictor<NativeConfig>(native_config);
......
...@@ -42,9 +42,9 @@ void SetConfig(ConfigType* config, std::string model_dir, bool use_gpu, ...@@ -42,9 +42,9 @@ void SetConfig(ConfigType* config, std::string model_dir, bool use_gpu,
} }
template <> template <>
void SetConfig<contrib::AnalysisConfig>(contrib::AnalysisConfig* config, void SetConfig<AnalysisConfig>(AnalysisConfig* config, std::string model_dir,
std::string model_dir, bool use_gpu, bool use_gpu, bool use_tensorrt,
bool use_tensorrt, int batch_size) { int batch_size) {
if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) { if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
config->SetModel(model_dir + "/" + FLAGS_prog_filename, config->SetModel(model_dir + "/" + FLAGS_prog_filename,
model_dir + "/" + FLAGS_param_filename); model_dir + "/" + FLAGS_param_filename);
...@@ -75,11 +75,11 @@ void profile(std::string model_dir, bool use_analysis, bool use_tensorrt) { ...@@ -75,11 +75,11 @@ void profile(std::string model_dir, bool use_analysis, bool use_tensorrt) {
std::vector<PaddleTensor> outputs; std::vector<PaddleTensor> outputs;
if (use_analysis || use_tensorrt) { if (use_analysis || use_tensorrt) {
contrib::AnalysisConfig config; AnalysisConfig config;
config.EnableUseGpu(100, 0); config.EnableUseGpu(100, 0);
config.pass_builder()->TurnOnDebug(); config.pass_builder()->TurnOnDebug();
SetConfig<contrib::AnalysisConfig>(&config, model_dir, true, use_tensorrt, SetConfig<AnalysisConfig>(&config, model_dir, true, use_tensorrt,
FLAGS_batch_size); FLAGS_batch_size);
TestPrediction(reinterpret_cast<PaddlePredictor::Config*>(&config), TestPrediction(reinterpret_cast<PaddlePredictor::Config*>(&config),
inputs_all, &outputs, FLAGS_num_threads, true); inputs_all, &outputs, FLAGS_num_threads, true);
} else { } else {
...@@ -99,18 +99,18 @@ void compare(std::string model_dir, bool use_tensorrt) { ...@@ -99,18 +99,18 @@ void compare(std::string model_dir, bool use_tensorrt) {
SetFakeImageInput(&inputs_all, model_dir, false, "__model__", ""); SetFakeImageInput(&inputs_all, model_dir, false, "__model__", "");
} }
contrib::AnalysisConfig analysis_config; AnalysisConfig analysis_config;
SetConfig<contrib::AnalysisConfig>(&analysis_config, model_dir, true, SetConfig<AnalysisConfig>(&analysis_config, model_dir, true, use_tensorrt,
use_tensorrt, FLAGS_batch_size); FLAGS_batch_size);
CompareNativeAndAnalysis( CompareNativeAndAnalysis(
reinterpret_cast<const PaddlePredictor::Config*>(&analysis_config), reinterpret_cast<const PaddlePredictor::Config*>(&analysis_config),
inputs_all); inputs_all);
} }
void compare_continuous_input(std::string model_dir, bool use_tensorrt) { void compare_continuous_input(std::string model_dir, bool use_tensorrt) {
contrib::AnalysisConfig analysis_config; AnalysisConfig analysis_config;
SetConfig<contrib::AnalysisConfig>(&analysis_config, model_dir, true, SetConfig<AnalysisConfig>(&analysis_config, model_dir, true, use_tensorrt,
use_tensorrt, FLAGS_batch_size); FLAGS_batch_size);
auto config = auto config =
reinterpret_cast<const PaddlePredictor::Config*>(&analysis_config); reinterpret_cast<const PaddlePredictor::Config*>(&analysis_config);
auto native_pred = CreateTestPredictor(config, false); auto native_pred = CreateTestPredictor(config, false);
......
...@@ -33,7 +33,6 @@ using paddle::PaddlePredictor; ...@@ -33,7 +33,6 @@ using paddle::PaddlePredictor;
using paddle::NativeConfig; using paddle::NativeConfig;
using paddle::NativePaddlePredictor; using paddle::NativePaddlePredictor;
using paddle::AnalysisPredictor; using paddle::AnalysisPredictor;
using paddle::contrib::AnalysisConfig;
static void BindPaddleDType(py::module *m); static void BindPaddleDType(py::module *m);
static void BindPaddleBuf(py::module *m); static void BindPaddleBuf(py::module *m);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册