analysis_predictor.h 5.5 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#pragma once
16 17
#include <algorithm>
#include <map>
18 19
#include <string>
#include <vector>
20
#include "paddle/fluid/framework/naive_executor.h"
Y
Yan Chunwei 已提交
21 22
#include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/api/api_impl.h"
Y
Yan Chunwei 已提交
23
#include "paddle/fluid/inference/api/details/reset_tensor_array.h"
N
nhzlx 已提交
24
#include "paddle/fluid/inference/api/helper.h"
Y
Yan Chunwei 已提交
25
#include "paddle/fluid/inference/api/paddle_inference_api.h"
26
#include "paddle/fluid/string/printf.h"
27 28 29 30
#ifdef PADDLE_WITH_TESTING
#include <gtest/gtest.h>
#include <gtest/gtest_prod.h>
#endif
Y
Yan Chunwei 已提交
31 32 33 34 35
namespace paddle {

using inference::analysis::Argument;
using inference::analysis::Analyzer;
using framework::proto::ProgramDesc;
36
using framework::NaiveExecutor;
Y
Yan Chunwei 已提交
37

38 39 40 41 42
/** \brief This predictor is based on the original native predictor with IR and
 * Analysis support.
 *
 * It will optimize IR and Parameters in the runtime.
 *
Y
Yan Chunwei 已提交
43 44
 * TODO(Superjomn) Replace the Navive predictor?
 */
45
class AnalysisPredictor : public PaddlePredictor {
Y
Yan Chunwei 已提交
46
 public:
N
nhzlx 已提交
47 48 49
  explicit AnalysisPredictor(const AnalysisConfig &config) : config_(config) {
    predictor_id_ = inference::GetUniqueId();
  }
F
flame 已提交
50
  ~AnalysisPredictor();
Y
Yan Chunwei 已提交
51

52 53
  bool Init(const std::shared_ptr<framework::Scope> &parent_scope,
            const std::shared_ptr<framework::ProgramDesc> &program = nullptr);
Y
Yan Chunwei 已提交
54

55 56 57 58 59 60 61 62 63 64 65
  bool Run(const std::vector<PaddleTensor> &inputs,
           std::vector<PaddleTensor> *output_data,
           int batch_size = -1) override;

  std::unique_ptr<ZeroCopyTensor> GetInputTensor(
      const std::string &name) override;
  std::unique_ptr<ZeroCopyTensor> GetOutputTensor(
      const std::string &name) override;

  bool ZeroCopyRun() override;

66
  void CreateFeedFetchVar(framework::Scope *scope);
67
  void PrepareFeedFetch();
Y
Yan Chunwei 已提交
68 69 70

  void OptimizeInferenceProgram();

71 72 73 74
  Argument &analysis_argument() { return argument_; }

  std::unique_ptr<PaddlePredictor> Clone() override;

75
  framework::Scope *scope() { return scope_.get(); }
76 77
  framework::ProgramDesc &program() { return *inference_program_; }

L
luotao1 已提交
78
  void SetMkldnnThreadID(int tid);
L
luotao1 已提交
79

80
  std::string GetSerializedProgram() const override;
Y
Yan Chunwei 已提交
81

82
 protected:
Y
Yan Chunwei 已提交
83 84 85 86 87
  // For memory optimization.
  bool need_collect_var_shapes_for_memory_optim();
  void CollectVarShapes();
  void SerializeBatchVarShapes(const std::string &path);

88 89 90 91 92
  bool PrepareProgram(const std::shared_ptr<framework::ProgramDesc> &program);
  bool PrepareScope(const std::shared_ptr<framework::Scope> &parent_scope);
  bool CreateExecutor();
  bool PrepareExecutor();

93
  bool LoadProgramDesc();
94
  bool LoadParameters();
95 96 97 98 99 100 101 102

  bool SetFeed(const std::vector<PaddleTensor> &input_datas,
               framework::Scope *scope);
  bool GetFetch(std::vector<PaddleTensor> *output_data,
                framework::Scope *scope);
  template <typename T>
  void GetFetchOne(const framework::LoDTensor &fetchs,
                   PaddleTensor *output_data);
Y
Yan Chunwei 已提交
103

N
nhzlx 已提交
104 105 106 107 108 109 110 111 112 113 114 115
#if PADDLE_WITH_TENSORRT
  // When we use Paddle-TRT INT8 engine, we need to generate calibration table
  // data first,
  // the calibration table contains the range for each op's input and output,
  // this whole process can be divided into several steps:
  //
  // 1. Builds a 32-bit engine, runs it on the calibration set, and records a
  // histogram for each
  // tensor of the distribution of activation values.
  // 2. Builds a calibration table from the histograms.
  //
  // After step 2, we need to store the calibration table on disk
N
nhzlx 已提交
116
  bool SaveTrtCalibToDisk();
N
nhzlx 已提交
117
#endif
N
nhzlx 已提交
118

119 120 121 122 123 124 125 126
// Some more detailed tests, they are made the friends of the predictor, so that
// the all the details can be tested.
#if PADDLE_WITH_TESTING
  FRIEND_TEST(AnalysisPredictor, analysis_off);
  FRIEND_TEST(AnalysisPredictor, analysis_on);
  FRIEND_TEST(AnalysisPredictor, with_gpu);
#endif

Y
Yan Chunwei 已提交
127
 private:
128
  AnalysisConfig config_;
Y
Yan Chunwei 已提交
129
  Argument argument_;
130 131 132 133 134 135 136
  std::unique_ptr<NaiveExecutor> executor_;
  platform::Place place_;
  std::shared_ptr<framework::Scope> scope_;
  framework::Scope *sub_scope_{nullptr};
  std::shared_ptr<framework::ProgramDesc> inference_program_;
  std::vector<framework::OpDesc *> feeds_;
  std::map<std::string, size_t> feed_names_;
Y
Yan Chunwei 已提交
137
  std::vector<framework::OpDesc *> fetches_;
138
  // Memory buffer for feed inputs. The temporary LoDTensor will cause serious
139
  // concurrency problems, wrong results and memory leak, so cache them.
140
  std::vector<framework::LoDTensor> feed_tensors_;
Y
Yan Chunwei 已提交
141
  details::TensorArrayBatchCleaner tensor_array_batch_cleaner_;
Y
Yan Chunwei 已提交
142 143
  // A mutex help to make Clone thread safe.
  std::mutex clone_mutex_;
144

Y
Yan Chunwei 已提交
145 146 147 148
  // For memory optimization.
  const size_t max_shape_collect_count_{1000};
  int need_collect_var_shapes_{-1};  // -1 for default, 0 for false, 1 for true.
  std::vector<std::map<std::string, std::vector<int>>> batch_var_shapes_;
N
nhzlx 已提交
149
  int predictor_id_;
Y
Yan Chunwei 已提交
150

151 152 153 154 155 156
 private:
  // Some status here that help to determine the status inside the predictor.
  bool status_program_optimized_{false};
  bool status_is_cloned_{false};
  bool status_use_gpu_{false};
  bool status_ir_optim_enabled_{false};
Y
Yan Chunwei 已提交
157 158 159
};

}  // namespace paddle