analysis_predictor.h 3.4 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#pragma once
16 17
#include <algorithm>
#include <map>
18 19
#include <string>
#include <vector>
20
#include "paddle/fluid/framework/naive_executor.h"
Y
Yan Chunwei 已提交
21 22
#include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/api/api_impl.h"
Y
Yan Chunwei 已提交
23
#include "paddle/fluid/inference/api/details/reset_tensor_array.h"
Y
Yan Chunwei 已提交
24
#include "paddle/fluid/inference/api/paddle_inference_api.h"
25
#include "paddle/fluid/string/printf.h"
Y
Yan Chunwei 已提交
26 27 28 29 30 31

namespace paddle {

using inference::analysis::Argument;
using inference::analysis::Analyzer;
using framework::proto::ProgramDesc;
32 33
using framework::NaiveExecutor;
using contrib::AnalysisConfig;
Y
Yan Chunwei 已提交
34 35 36 37 38

/* This predictor is based on the original native predictor with IR and Analysis
 * support. It will optimize IR and Parameters in the runtime.
 * TODO(Superjomn) Replace the Navive predictor?
 */
39
class AnalysisPredictor : public PaddlePredictor {
Y
Yan Chunwei 已提交
40
 public:
41
  explicit AnalysisPredictor(const AnalysisConfig &config) : config_(config) {}
Y
Yan Chunwei 已提交
42

43 44
  bool Init(const std::shared_ptr<framework::Scope> &parent_scope,
            const std::shared_ptr<framework::ProgramDesc> &program = nullptr);
Y
Yan Chunwei 已提交
45

46 47 48 49 50 51 52 53 54 55 56 57
  bool Run(const std::vector<PaddleTensor> &inputs,
           std::vector<PaddleTensor> *output_data,
           int batch_size = -1) override;

  std::unique_ptr<ZeroCopyTensor> GetInputTensor(
      const std::string &name) override;
  std::unique_ptr<ZeroCopyTensor> GetOutputTensor(
      const std::string &name) override;

  bool ZeroCopyRun() override;

  void PrepareFeedFetch();
Y
Yan Chunwei 已提交
58 59 60

  void OptimizeInferenceProgram();

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
  Argument &analysis_argument() { return argument_; }

  std::unique_ptr<PaddlePredictor> Clone() override;

  framework::Scope *scope() { return executor_->scope(); }
  framework::ProgramDesc &program() { return *inference_program_; }

 protected:
  bool LoadProgramDesc();

  bool SetFeed(const std::vector<PaddleTensor> &input_datas,
               framework::Scope *scope);
  bool GetFetch(std::vector<PaddleTensor> *output_data,
                framework::Scope *scope);
  template <typename T>
  void GetFetchOne(const framework::LoDTensor &fetchs,
                   PaddleTensor *output_data);
78
  ~AnalysisPredictor();
Y
Yan Chunwei 已提交
79 80

 private:
Y
Yan Chunwei 已提交
81
  contrib::AnalysisConfig config_;
Y
Yan Chunwei 已提交
82
  Argument argument_;
83 84 85 86 87 88 89 90 91 92 93
  std::unique_ptr<NaiveExecutor> executor_;
  platform::Place place_;
  std::shared_ptr<framework::Scope> scope_;
  framework::Scope *sub_scope_{nullptr};
  std::shared_ptr<framework::ProgramDesc> inference_program_;
  std::vector<framework::OpDesc *> feeds_;
  std::map<std::string, size_t> feed_names_;
  std::vector<framework::OpDesc *> fetchs_;
  // Memory buffer for feed inputs. The temporary LoDTensor will cause serious
  // concurrency problems, so cache them.
  std::vector<framework::LoDTensor> feed_tensors_;
Y
Yan Chunwei 已提交
94
  details::TensorArrayBatchCleaner tensor_array_batch_cleaner_;
Y
Yan Chunwei 已提交
95 96 97
};

}  // namespace paddle