提交 f5c0221c 编写于 作者: S superjomn

clean CreatePaddlePredictor

test=develop
上级 ae8b1c32
...@@ -51,9 +51,7 @@ void TestWord2vecPrediction(const std::string& model_path) { ...@@ -51,9 +51,7 @@ void TestWord2vecPrediction(const std::string& model_path) {
config.model_dir = model_path; config.model_dir = model_path;
config.use_gpu = false; config.use_gpu = false;
config.device = 0; config.device = 0;
auto predictor = auto predictor = ::paddle::CreatePaddlePredictor<NativeConfig>(config);
::paddle::CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(
config);
// One single batch // One single batch
......
...@@ -27,9 +27,7 @@ TEST(AnalysisPredictor, ZeroCopy) { ...@@ -27,9 +27,7 @@ TEST(AnalysisPredictor, ZeroCopy) {
config.model_dir = FLAGS_dirname + "/word2vec.inference.model"; config.model_dir = FLAGS_dirname + "/word2vec.inference.model";
config.use_feed_fetch_ops = false; config.use_feed_fetch_ops = false;
auto predictor = auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
config);
auto w0 = predictor->GetInputTensor("firstw"); auto w0 = predictor->GetInputTensor("firstw");
auto w1 = predictor->GetInputTensor("secondw"); auto w1 = predictor->GetInputTensor("secondw");
......
...@@ -41,11 +41,8 @@ void CompareTensorRTWithFluid(bool enable_tensorrt) { ...@@ -41,11 +41,8 @@ void CompareTensorRTWithFluid(bool enable_tensorrt) {
config1.device = 0; config1.device = 0;
config1.max_batch_size = 10; config1.max_batch_size = 10;
auto predictor0 = auto predictor0 = CreatePaddlePredictor<NativeConfig>(config0);
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config0); auto predictor1 = CreatePaddlePredictor<MixedRTConfig>(config1);
auto predictor1 =
CreatePaddlePredictor<MixedRTConfig,
PaddleEngineKind::kAutoMixedTensorRT>(config1);
for (int batch_id = 0; batch_id < 1; batch_id++) { for (int batch_id = 0; batch_id < 1; batch_id++) {
//# 2. Prepare input. //# 2. Prepare input.
......
...@@ -34,9 +34,7 @@ contrib::AnakinConfig GetConfig() { ...@@ -34,9 +34,7 @@ contrib::AnakinConfig GetConfig() {
TEST(inference, anakin) { TEST(inference, anakin) {
auto config = GetConfig(); auto config = GetConfig();
auto predictor = auto predictor = CreatePaddlePredictor<contrib::AnakinConfig>(config);
CreatePaddlePredictor<contrib::AnakinConfig, PaddleEngineKind::kAnakin>(
config);
float data[1 * 3 * 224 * 224] = {1.0f}; float data[1 * 3 * 224 * 224] = {1.0f};
PaddleTensor tensor; PaddleTensor tensor;
......
...@@ -308,18 +308,14 @@ TEST(Analyzer_rnn1, ZeroCopy) { ...@@ -308,18 +308,14 @@ TEST(Analyzer_rnn1, ZeroCopy) {
PaddlePlace place; PaddlePlace place;
int output_size{0}; int output_size{0};
auto predictor = auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
config);
config.use_feed_fetch_ops = true; config.use_feed_fetch_ops = true;
auto native_predictor = auto native_predictor =
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config); CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
config.use_feed_fetch_ops = true; // the analysis predictor needs feed/fetch. config.use_feed_fetch_ops = true; // the analysis predictor needs feed/fetch.
auto analysis_predictor = auto analysis_predictor = CreatePaddlePredictor<AnalysisConfig>(config);
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
config);
#define NEW_TENSOR(name__) \ #define NEW_TENSOR(name__) \
auto name__##_tensor = predictor->GetInputTensor(#name__); auto name__##_tensor = predictor->GetInputTensor(#name__);
......
...@@ -77,8 +77,7 @@ void CompareResult(const std::vector<PaddleTensor> &outputs, ...@@ -77,8 +77,7 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
std::unique_ptr<PaddlePredictor> CreateTestPredictor( std::unique_ptr<PaddlePredictor> CreateTestPredictor(
const AnalysisConfig &config, bool use_analysis = true) { const AnalysisConfig &config, bool use_analysis = true) {
if (use_analysis) { if (use_analysis) {
return CreatePaddlePredictor<contrib::AnalysisConfig, return CreatePaddlePredictor<contrib::AnalysisConfig>(config);
PaddleEngineKind::kAnalysis>(config);
} else { } else {
return CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>( return CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(
config); config);
......
...@@ -51,11 +51,8 @@ void CompareTensorRTWithFluid(int batch_size, std::string model_dirname) { ...@@ -51,11 +51,8 @@ void CompareTensorRTWithFluid(int batch_size, std::string model_dirname) {
config1.model_dir = model_dirname; config1.model_dir = model_dirname;
config1.max_batch_size = batch_size; config1.max_batch_size = batch_size;
auto predictor0 = auto predictor0 = CreatePaddlePredictor<NativeConfig>(config0);
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config0); auto predictor1 = CreatePaddlePredictor<MixedRTConfig>(config1);
auto predictor1 =
CreatePaddlePredictor<MixedRTConfig,
PaddleEngineKind::kAutoMixedTensorRT>(config1);
// Prepare inputs // Prepare inputs
int height = 224; int height = 224;
int width = 224; int width = 224;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册