analysis_predictor.h 13.1 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#pragma once
16 17
#include <algorithm>
#include <map>
N
nhzlx 已提交
18
#include <memory>
19 20
#include <string>
#include <vector>
21
#include "paddle/fluid/framework/naive_executor.h"
22
#include "paddle/fluid/framework/op_compatible_info.h"
Y
Yan Chunwei 已提交
23 24
#include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/api/api_impl.h"
Y
Yan Chunwei 已提交
25
#include "paddle/fluid/inference/api/details/reset_tensor_array.h"
N
nhzlx 已提交
26
#include "paddle/fluid/inference/api/helper.h"
Y
Yan Chunwei 已提交
27
#include "paddle/fluid/inference/api/paddle_inference_api.h"
W
Wilber 已提交
28
#include "paddle/fluid/platform/device/gpu/gpu_types.h"
29
#include "paddle/fluid/platform/float16.h"
30
#include "paddle/fluid/string/printf.h"
31 32 33 34
#ifdef PADDLE_WITH_TESTING
#include <gtest/gtest.h>
#include <gtest/gtest_prod.h>
#endif
35

36 37 38
namespace paddle_infer {
using float16 = paddle::platform::float16;
}
39 40 41 42 43 44 45 46 47 48 49
///
/// \file analysis_predictor.h
///
/// \brief Compared to NativePredictor, AnalysisPredictor is a high-performance
/// predictor that includes many optimizations
///
/// \author paddle-infer@baidu.com
/// \date 2020-01-01
/// \since 1.7.0
///

Y
Yan Chunwei 已提交
50 51 52 53 54
namespace paddle {

using inference::analysis::Argument;
using inference::analysis::Analyzer;
using framework::proto::ProgramDesc;
55
using framework::NaiveExecutor;
Y
Yan Chunwei 已提交
56

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
///
/// \class AnalysisPredictor
///
/// \brief The analysis predictor is based on the original native predictor with
/// IR and Analysis support. It will optimize IR and Parameters in the runtime.
///
/// The predictor has the following typical uses:
///
/// Get predictor
/// \code{cpp}
///   auto predictor = CreatePaddlePredictor(config);
/// \endcode
///
/// Get input or output names
/// \code{cpp}
///   auto input_names = predictor->GetInputNames();
///   auto output_names = predictor->GetOutputNames();
/// \endcode
///
/// Get input or output tensors
/// \code{cpp}
///   auto input_t = predictor->GetInputTensor(input_names[0]);
///   auto output_t = predictor->GetOutputTensor(output_names[0]);
/// \endcode
///
/// Run predictor
/// \code{cpp}
///   predictor->ZeroCopyRun();
/// \endcode
///
87
class AnalysisPredictor : public PaddlePredictor {
Y
Yan Chunwei 已提交
88
 public:
89 90 91 92 93
  ///
  /// \brief Construct a new Analysis Predictor object
  ///
  /// \param[in] AnalysisConfig config
  ///
94
  explicit AnalysisPredictor(const AnalysisConfig &config) : config_(config) {
95 96 97 98
    if (config_.shape_range_info_collected()) {
      config_.SwitchIrOptim(false);
      config_.EnableMemoryOptim(false);
    }
99 100
    predictor_id_ = inference::GetUniqueId();
  }
101 102 103
  ///
  /// \brief Destroy the Analysis Predictor object
  ///
F
flame 已提交
104
  ~AnalysisPredictor();
Y
Yan Chunwei 已提交
105

106 107 108 109 110 111 112 113 114 115 116 117
  ///
  /// \brief Initialize predictor
  ///
  /// Initializing predictor mainly includes the following tasks:
  /// preparing scope, creating executor, preparing program, initializing the
  /// variables required by the executor, getting the feed_target_names and
  /// fetch_target_names, etc.
  ///
  /// \param[in] parent_scope parent scope
  /// \param[in] program program
  /// \return Whether the init function executed successfully
  ///
118 119
  bool Init(const std::shared_ptr<framework::Scope> &parent_scope,
            const std::shared_ptr<framework::ProgramDesc> &program = nullptr);
Y
Yan Chunwei 已提交
120

121 122 123 124 125 126 127 128
  ///
  /// \brief Run the prediction engine. Deprecated. Please refer to ZeroCopyRun
  ///
  /// \param[in] inputs input tensors
  /// \param[out] output_data output tensors
  /// \param[in] batch_size data's batch size
  /// \return Whether the function executed successfully
  ///
129 130 131 132
  bool Run(const std::vector<PaddleTensor> &inputs,
           std::vector<PaddleTensor> *output_data,
           int batch_size = -1) override;

133 134 135 136 137
  ///
  /// \brief Get the input names
  ///
  /// \return input names
  ///
N
nhzlx 已提交
138
  std::vector<std::string> GetInputNames();
139 140 141 142 143
  ///
  /// \brief Get the output names
  ///
  /// \return output names
  ///
N
nhzlx 已提交
144 145
  std::vector<std::string> GetOutputNames();

146 147 148 149 150 151
  ///
  /// \brief Get the Input Tensor object
  ///
  /// \param[in] name input name
  /// \return input tensor
  ///
152 153
  std::unique_ptr<ZeroCopyTensor> GetInputTensor(
      const std::string &name) override;
154 155 156 157 158 159
  ///
  /// \brief Get the Output Tensor object
  ///
  /// \param[in] name otuput name
  /// \return output tensor
  ///
160 161
  std::unique_ptr<ZeroCopyTensor> GetOutputTensor(
      const std::string &name) override;
162 163 164 165 166
  ///
  /// \brief Get all input names and their corresponding shapes
  ///
  /// \return the map of input names and shapes
  ///
167 168
  std::map<std::string, std::vector<int64_t>> GetInputTensorShape() override;

169 170 171 172 173
  ///
  /// \brief Run the prediction engine
  ///
  /// \return Whether the function executed successfully
  ///
174 175
  bool ZeroCopyRun() override;

W
Wilber 已提交
176 177 178 179 180
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  // Note: Can only be used under thread_local semantics.
  bool ExpRunWithExternalStream(const gpuStream_t stream);
#endif

181 182 183 184 185
  ///
  /// \brief Create feed fetch variables
  ///
  /// \param[in] scope Scope needed to create variables
  ///
186
  void CreateFeedFetchVar(framework::Scope *scope);
187 188 189 190
  ///
  /// \brief Determine the model's inputs and outputs based on the program's
  /// feed fetch op
  ///
191
  void PrepareFeedFetch();
Y
Yan Chunwei 已提交
192

193 194 195 196
  ///
  /// \brief Set predictor's argument according to config, which mainly includes
  /// execution information and graph optimization related pass information
  ///
197
  void PrepareArgument();
198 199 200 201
  ///
  /// \brief According to argument information, execute the relevant pass
  /// to get the optimized model program
  ///
Y
Yan Chunwei 已提交
202 203
  void OptimizeInferenceProgram();

204 205 206 207 208 209
  ///
  /// \brief Clear the intermediate tensors of the predictor
  ///
  ///
  void ClearIntermediateTensor();

210 211 212 213 214 215 216 217 218 219 220
  ///
  /// \brief Release all tmp tensor to compress the size of the memory pool.
  /// The memory pool is considered to be composed of a list of chunks, if
  /// the chunk is not occupied, it can be released.
  ///
  /// \return Number of bytes released. It may be smaller than the actual
  /// released memory, because part of the memory is not managed by the
  /// MemoryPool.
  ///
  uint64_t TryShrinkMemory() override;

221 222 223 224 225
  ///
  /// \brief Get the argument used by predictor
  ///
  /// \return the argument obtained by config
  ///
226
  Argument &analysis_argument() { return argument_; }
227 228 229 230 231
  ///
  /// \brief Clone to get the new predictor. thread safe.
  ///
  /// \return get a new predictor
  ///
232
  std::unique_ptr<PaddlePredictor> Clone() override;
233 234 235 236 237
  ///
  /// \brief Get the scope used by predictor
  ///
  /// \return scope
  ///
238
  framework::Scope *scope() { return scope_.get(); }
239 240 241 242 243
  ///
  /// \brief Get the inference program
  ///
  /// \return the inference program
  ///
244 245
  framework::ProgramDesc &program() { return *inference_program_; }

246 247 248 249 250
  ///
  /// \brief Get the serialized program
  ///
  /// \return the serialized program
  ///
251
  std::string GetSerializedProgram() const override;
Y
Yan Chunwei 已提交
252

253 254 255 256 257
  ///
  /// \brief Initialize mkldnn quantizer and execute mkldnn quantization pass
  ///
  /// \return Whether the function executed successfully
  ///
258 259
  bool MkldnnQuantize();

260 261 262 263 264
  ///
  /// \brief save program to model and save parameters to params
  ///
  /// \param[in] dir path to save the model
  ///
265 266
  void SaveOptimModel(const std::string &dir);

267
 protected:
268 269 270 271 272 273 274
  ///
  /// \brief Prepare predictor's required programs, including loading model
  /// information, graph optimization, and executor creation variables, etc.
  ///
  /// \param[in] program paddle program
  /// \return Whether the function executed successfully
  ///
275
  bool PrepareProgram(const std::shared_ptr<framework::ProgramDesc> &program);
276 277 278 279 280 281
  ///
  /// \brief Prepare scope environment, each predictor has its own scope
  ///
  /// \param[in] parent_scope The scope of the predictor to be cloned, or null
  /// \return Whether the function executed successfully
  ///
282
  bool PrepareScope(const std::shared_ptr<framework::Scope> &parent_scope);
283 284 285 286 287
  ///
  /// \brief Create an Executor object
  ///
  /// \return Whether the function executed successfully
  ///
288
  bool CreateExecutor();
289 290 291 292 293
  ///
  /// \brief According to the model's program, the executor creates ops
  ///
  /// \return Whether the function executed successfully
  ///
294 295
  bool PrepareExecutor();

296 297 298 299 300
  ///
  /// \brief Load model program.
  ///
  /// \return Whether the function executed successfully
  ///
301
  bool LoadProgramDesc();
302 303 304 305 306
  ///
  /// \brief Load model parameters.
  ///
  /// \return Whether the function executed successfully
  ///
307
  bool LoadParameters();
308

309 310 311 312 313 314 315
  ///
  /// \brief Prepare input data, only used in Run()
  ///
  /// \param[in] input_datas inpute tensors
  /// \param[in] scope the scope used by predictor
  /// \return Whether the function executed successfully
  ///
316 317
  bool SetFeed(const std::vector<PaddleTensor> &input_datas,
               framework::Scope *scope);
318 319 320 321 322 323 324
  ///
  /// \brief Get the output data, only used in Run()
  ///
  /// \param[out] output_data output tensors
  /// \param[in] scope the scope used by predictor
  /// \return Whether the function executed successfully
  ///
325 326
  bool GetFetch(std::vector<PaddleTensor> *output_data,
                framework::Scope *scope);
327 328 329 330 331 332
  ///
  /// \brief Get the output data, only used in GetFetch()
  ///
  /// \param[in] tensor for fetch op
  /// \param[out] output_data output tensor
  ///
333 334 335
  template <typename T>
  void GetFetchOne(const framework::LoDTensor &fetchs,
                   PaddleTensor *output_data);
336 337 338 339 340 341 342 343
  ///
  /// \brief PreSet for Mkldnn multi-thread and dynamic shape input.
  ///
  /// Used in AnalysisPredictor::Run(), do not support
  /// AnalysisPredictor::ZeroCopyRun() now.
  ///
  /// \param[in] inputs tensors
  ///
344
  void MkldnnPreSet(const std::vector<PaddleTensor> &inputs);
W
Wilber 已提交
345 346 347 348 349 350 351 352 353 354 355

  ///
  /// \brief PreSet for Mkldnn multi-thread and dynamic shape input.
  ///
  /// Used in AnalysisPredictor::Run(), do not support
  /// AnalysisPredictor::ZeroCopyRun() now.
  ///
  /// \param[in] inputs tensor shape
  ///
  void MkldnnPreSet(const std::vector<std::vector<int>> &inputs_shape);

356 357 358 359 360 361
  ///
  /// \brief PostReset for Mkldnn multi-thread and dynamic shape input.
  ///
  /// Used in AnalysisPredictor::Run(), do not support
  /// AnalysisPredictor::ZeroCopyRun() now.
  ///
362
  void MkldnnPostReset();
Y
Yan Chunwei 已提交
363

N
nhzlx 已提交
364
#if PADDLE_WITH_TENSORRT
365 366 367 368 369 370 371 372 373 374 375 376 377 378
  ///
  /// \brief save calibration table
  ///
  /// When we use Paddle-TRT INT8 engine, we need to generate calibration table
  /// data first,
  /// the calibration table contains the range for each op's input and output,
  /// this whole process can be divided into several steps:
  /// 1. Builds a 32-bit engine, runs it on the calibration set, and records a
  ///  histogram for each tensor of the distribution of activation values.
  /// 2. Builds a calibration table from the histograms.
  /// After step 2, we need to store the calibration table on disk.
  ///
  /// \return Whether the function executed successfully
  ///
N
nhzlx 已提交
379
  bool SaveTrtCalibToDisk();
N
nhzlx 已提交
380
#endif
N
nhzlx 已提交
381

382 383 384 385 386 387 388 389
// Some more detailed tests, they are made the friends of the predictor, so that
// the all the details can be tested.
#if PADDLE_WITH_TESTING
  FRIEND_TEST(AnalysisPredictor, analysis_off);
  FRIEND_TEST(AnalysisPredictor, analysis_on);
  FRIEND_TEST(AnalysisPredictor, with_gpu);
#endif

390 391 392 393
 private:
  void StatisticShapeRangeInfo();
  void CollectShapeRangeInfo();

Y
Yan Chunwei 已提交
394
 private:
395
  AnalysisConfig config_;
Y
Yan Chunwei 已提交
396
  Argument argument_;
397 398 399 400 401
  std::unique_ptr<NaiveExecutor> executor_;
  platform::Place place_;
  std::shared_ptr<framework::Scope> scope_;
  framework::Scope *sub_scope_{nullptr};
  std::shared_ptr<framework::ProgramDesc> inference_program_;
402
  framework::OpCompatibleMap op_compatible_map_;
403 404
  std::vector<framework::OpDesc *> feeds_;
  std::map<std::string, size_t> feed_names_;
N
nhzlx 已提交
405 406
  // Sorted according to the idx.
  std::map<size_t, std::string> idx2feeds_;
Y
Yan Chunwei 已提交
407
  std::vector<framework::OpDesc *> fetches_;
N
nhzlx 已提交
408 409
  std::map<size_t, std::string> idx2fetches_;

410 411 412 413 414 415 416 417 418 419
#if PADDLE_WITH_MKLDNN
  // Helper class to perform quantization
  class MkldnnQuantizer;
  MkldnnQuantizer *mkldnn_quantizer_{nullptr};

#if PADDLE_WITH_TESTING
  friend class MkldnnQuantizerTest;
#endif
#endif

420
  // Memory buffer for feed inputs. The temporary LoDTensor will cause serious
421
  // concurrency problems, wrong results and memory leak, so cache them.
422
  std::vector<framework::LoDTensor> feed_tensors_;
Y
Yan Chunwei 已提交
423
  details::TensorArrayBatchCleaner tensor_array_batch_cleaner_;
Y
Yan Chunwei 已提交
424 425
  // A mutex help to make Clone thread safe.
  std::mutex clone_mutex_;
426

Y
Yan Chunwei 已提交
427 428 429 430
  // For memory optimization.
  const size_t max_shape_collect_count_{1000};
  int need_collect_var_shapes_{-1};  // -1 for default, 0 for false, 1 for true.
  std::vector<std::map<std::string, std::vector<int>>> batch_var_shapes_;
431
  int predictor_id_;
Y
Yan Chunwei 已提交
432

433 434 435
 private:
  // Some status here that help to determine the status inside the predictor.
  bool status_is_cloned_{false};
436 437

  std::map<std::string, std::vector<std::vector<int32_t>>> shape_info_;
W
wenbin 已提交
438
  int clone_num_{1};
Y
Yan Chunwei 已提交
439 440 441
};

}  // namespace paddle