analysis_predictor.h 6.2 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#pragma once
16 17
#include <algorithm>
#include <map>
N
nhzlx 已提交
18
#include <memory>
19 20
#include <string>
#include <vector>
21
#include "paddle/fluid/framework/naive_executor.h"
Y
Yan Chunwei 已提交
22 23
#include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/api/api_impl.h"
Y
Yan Chunwei 已提交
24
#include "paddle/fluid/inference/api/details/reset_tensor_array.h"
N
nhzlx 已提交
25
#include "paddle/fluid/inference/api/helper.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/inference/api/paddle_inference_api.h"
27
#include "paddle/fluid/string/printf.h"
28 29 30 31
#ifdef PADDLE_WITH_TESTING
#include <gtest/gtest.h>
#include <gtest/gtest_prod.h>
#endif
Y
Yan Chunwei 已提交
32 33 34 35 36
namespace paddle {

using inference::analysis::Argument;
using inference::analysis::Analyzer;
using framework::proto::ProgramDesc;
37
using framework::NaiveExecutor;
Y
Yan Chunwei 已提交
38

39 40 41 42 43
/** \brief This predictor is based on the original native predictor with IR and
 * Analysis support.
 *
 * It will optimize IR and Parameters in the runtime.
 *
Y
Yan Chunwei 已提交
44 45
 * TODO(Superjomn) Replace the Navive predictor?
 */
46
class AnalysisPredictor : public PaddlePredictor {
Y
Yan Chunwei 已提交
47
 public:
48 49 50
  explicit AnalysisPredictor(const AnalysisConfig &config) : config_(config) {
    predictor_id_ = inference::GetUniqueId();
  }
F
flame 已提交
51
  ~AnalysisPredictor();
Y
Yan Chunwei 已提交
52

53 54
  bool Init(const std::shared_ptr<framework::Scope> &parent_scope,
            const std::shared_ptr<framework::ProgramDesc> &program = nullptr);
Y
Yan Chunwei 已提交
55

56 57 58 59
  bool Run(const std::vector<PaddleTensor> &inputs,
           std::vector<PaddleTensor> *output_data,
           int batch_size = -1) override;

N
nhzlx 已提交
60 61 62
  std::vector<std::string> GetInputNames();
  std::vector<std::string> GetOutputNames();

63 64 65 66 67 68 69
  std::unique_ptr<ZeroCopyTensor> GetInputTensor(
      const std::string &name) override;
  std::unique_ptr<ZeroCopyTensor> GetOutputTensor(
      const std::string &name) override;

  bool ZeroCopyRun() override;

70
  void CreateFeedFetchVar(framework::Scope *scope);
71
  void PrepareFeedFetch();
Y
Yan Chunwei 已提交
72

73
  void PrepareArgument();
Y
Yan Chunwei 已提交
74 75
  void OptimizeInferenceProgram();

76 77 78 79
  Argument &analysis_argument() { return argument_; }

  std::unique_ptr<PaddlePredictor> Clone() override;

80
  framework::Scope *scope() { return scope_.get(); }
81 82
  framework::ProgramDesc &program() { return *inference_program_; }

83
  std::string GetSerializedProgram() const override;
Y
Yan Chunwei 已提交
84

85 86
  bool MkldnnQuantize();

87 88 89 90
  // save program to  model
  // save parameters to params
  void SaveOptimModel(const std::string &dir);

91
 protected:
Y
Yan Chunwei 已提交
92 93 94 95 96
  // For memory optimization.
  bool need_collect_var_shapes_for_memory_optim();
  void CollectVarShapes();
  void SerializeBatchVarShapes(const std::string &path);

97 98 99 100 101
  bool PrepareProgram(const std::shared_ptr<framework::ProgramDesc> &program);
  bool PrepareScope(const std::shared_ptr<framework::Scope> &parent_scope);
  bool CreateExecutor();
  bool PrepareExecutor();

102
  bool LoadProgramDesc();
103
  bool LoadParameters();
104 105 106 107 108 109 110 111

  bool SetFeed(const std::vector<PaddleTensor> &input_datas,
               framework::Scope *scope);
  bool GetFetch(std::vector<PaddleTensor> *output_data,
                framework::Scope *scope);
  template <typename T>
  void GetFetchOne(const framework::LoDTensor &fetchs,
                   PaddleTensor *output_data);
112 113 114 115 116
  // PreSet and PostReset for Mkldnn multi-thread and dynamic shape input.
  // Used in AnalysisPredictor::Run(), do not support
  // AnalysisPredictor::ZeroRun() now.
  void MkldnnPreSet(const std::vector<PaddleTensor> &inputs);
  void MkldnnPostReset();
Y
Yan Chunwei 已提交
117

N
nhzlx 已提交
118 119 120 121 122 123 124 125 126 127 128 129
#if PADDLE_WITH_TENSORRT
  // When we use Paddle-TRT INT8 engine, we need to generate calibration table
  // data first,
  // the calibration table contains the range for each op's input and output,
  // this whole process can be divided into several steps:
  //
  // 1. Builds a 32-bit engine, runs it on the calibration set, and records a
  // histogram for each
  // tensor of the distribution of activation values.
  // 2. Builds a calibration table from the histograms.
  //
  // After step 2, we need to store the calibration table on disk
N
nhzlx 已提交
130
  bool SaveTrtCalibToDisk();
N
nhzlx 已提交
131
#endif
N
nhzlx 已提交
132

133 134 135 136 137 138 139 140
// Some more detailed tests, they are made the friends of the predictor, so that
// the all the details can be tested.
#if PADDLE_WITH_TESTING
  FRIEND_TEST(AnalysisPredictor, analysis_off);
  FRIEND_TEST(AnalysisPredictor, analysis_on);
  FRIEND_TEST(AnalysisPredictor, with_gpu);
#endif

Y
Yan Chunwei 已提交
141
 private:
142
  AnalysisConfig config_;
Y
Yan Chunwei 已提交
143
  Argument argument_;
144 145 146 147 148 149 150
  std::unique_ptr<NaiveExecutor> executor_;
  platform::Place place_;
  std::shared_ptr<framework::Scope> scope_;
  framework::Scope *sub_scope_{nullptr};
  std::shared_ptr<framework::ProgramDesc> inference_program_;
  std::vector<framework::OpDesc *> feeds_;
  std::map<std::string, size_t> feed_names_;
N
nhzlx 已提交
151 152
  // Sorted according to the idx.
  std::map<size_t, std::string> idx2feeds_;
Y
Yan Chunwei 已提交
153
  std::vector<framework::OpDesc *> fetches_;
N
nhzlx 已提交
154 155
  std::map<size_t, std::string> idx2fetches_;

156 157 158 159 160 161 162 163 164 165
#if PADDLE_WITH_MKLDNN
  // Helper class to perform quantization
  class MkldnnQuantizer;
  MkldnnQuantizer *mkldnn_quantizer_{nullptr};

#if PADDLE_WITH_TESTING
  friend class MkldnnQuantizerTest;
#endif
#endif

166
  // Memory buffer for feed inputs. The temporary LoDTensor will cause serious
167
  // concurrency problems, wrong results and memory leak, so cache them.
168
  std::vector<framework::LoDTensor> feed_tensors_;
Y
Yan Chunwei 已提交
169
  details::TensorArrayBatchCleaner tensor_array_batch_cleaner_;
Y
Yan Chunwei 已提交
170 171
  // A mutex help to make Clone thread safe.
  std::mutex clone_mutex_;
172

Y
Yan Chunwei 已提交
173 174 175 176
  // For memory optimization.
  const size_t max_shape_collect_count_{1000};
  int need_collect_var_shapes_{-1};  // -1 for default, 0 for false, 1 for true.
  std::vector<std::map<std::string, std::vector<int>>> batch_var_shapes_;
177
  int predictor_id_;
Y
Yan Chunwei 已提交
178

179 180 181 182
 private:
  // Some status here that help to determine the status inside the predictor.
  bool status_is_cloned_{false};
  bool status_use_gpu_{false};
Y
Yan Chunwei 已提交
183 184 185
};

}  // namespace paddle