analysis_predictor.h 19.0 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#pragma once
16 17
#include <algorithm>
#include <map>
N
nhzlx 已提交
18
#include <memory>
19 20
#include <string>
#include <vector>
21
#include "paddle/phi/common/data_type.h"
22
#if defined(PADDLE_WITH_DISTRIBUTE) && defined(PADDLE_WITH_PSCORE)
23 24
#include "paddle/fluid/distributed/fleet_executor/fleet_executor.h"
#endif
25
#include "paddle/fluid/framework/naive_executor.h"
26
#include "paddle/fluid/framework/op_compatible_info.h"
Y
Yan Chunwei 已提交
27 28
#include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/api/api_impl.h"
Y
Yan Chunwei 已提交
29
#include "paddle/fluid/inference/api/details/reset_tensor_array.h"
N
nhzlx 已提交
30
#include "paddle/fluid/inference/api/helper.h"
Y
Yan Chunwei 已提交
31
#include "paddle/fluid/inference/api/paddle_inference_api.h"
32
#include "paddle/fluid/inference/api/resource_manager.h"
W
Wilber 已提交
33
#include "paddle/fluid/platform/device/gpu/gpu_types.h"
34
#include "paddle/fluid/string/printf.h"
35
#include "paddle/phi/core/dense_tensor.h"
36 37 38 39
#ifdef PADDLE_WITH_TESTING
#include <gtest/gtest.h>
#include <gtest/gtest_prod.h>
#endif
40

41
namespace paddle_infer {
W
Wilber 已提交
42 43 44
namespace experimental {
class InternalUtils;
};
45
}  // namespace paddle_infer
46 47 48 49 50 51 52 53 54 55 56
///
/// \file analysis_predictor.h
///
/// \brief Compared to NativePredictor, AnalysisPredictor is a high-performance
/// predictor that includes many optimizations
///
/// \author paddle-infer@baidu.com
/// \date 2020-01-01
/// \since 1.7.0
///

Y
Yan Chunwei 已提交
57 58
namespace paddle {

59
using framework::NaiveExecutor;
60 61 62
using framework::proto::ProgramDesc;
using inference::analysis::Analyzer;
using inference::analysis::Argument;
Y
Yan Chunwei 已提交
63

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
///
/// \class AnalysisPredictor
///
/// \brief The analysis predictor is based on the original native predictor with
/// IR and Analysis support. It will optimize IR and Parameters in the runtime.
///
/// The predictor has the following typical uses:
///
/// Get predictor
/// \code{cpp}
///   auto predictor = CreatePaddlePredictor(config);
/// \endcode
///
/// Get input or output names
/// \code{cpp}
///   auto input_names = predictor->GetInputNames();
///   auto output_names = predictor->GetOutputNames();
/// \endcode
///
/// Get input or output tensors
/// \code{cpp}
///   auto input_t = predictor->GetInputTensor(input_names[0]);
///   auto output_t = predictor->GetOutputTensor(output_names[0]);
/// \endcode
///
/// Run predictor
/// \code{cpp}
///   predictor->ZeroCopyRun();
/// \endcode
///
94
class AnalysisPredictor : public PaddlePredictor {
Y
Yan Chunwei 已提交
95
 public:
96 97 98 99 100
  ///
  /// \brief Construct a new Analysis Predictor object
  ///
  /// \param[in] AnalysisConfig config
  ///
101
  explicit AnalysisPredictor(const AnalysisConfig &config) : config_(config) {
102 103 104
    if (config_.shape_range_info_collected()) {
      config_.SwitchIrOptim(false);
    }
105
    int trt_identifier = config_.trt_engine_memory_sharing_identifier_;
106
    if (trt_identifier > 0) {
Y
Yuanle Liu 已提交
107 108 109
      // NOTE(liuyuanle): For convenience, we set the id of the predictor to
      // negative sharing_identifier directly. In the future, this may affect
      // the meaning of negative predictor id.
110
      predictor_id_ = -trt_identifier;
111
      LOG(WARNING)
Y
Yuanle Liu 已提交
112
          << "Since the engine context memory of multiple predictors "
113 114 115
             "is enabled in Paddle-TRT, we set the id of these predictors to "
             "negative sharing_identifier you specified : "
          << predictor_id_;
116 117 118
    } else {
      predictor_id_ = inference::GetUniqueId();
    }
119
  }
120 121 122
  ///
  /// \brief Destroy the Analysis Predictor object
  ///
F
flame 已提交
123
  ~AnalysisPredictor();
Y
Yan Chunwei 已提交
124

125 126 127 128 129 130 131 132 133 134 135 136
  ///
  /// \brief Initialize predictor
  ///
  /// Initializing predictor mainly includes the following tasks:
  /// preparing scope, creating executor, preparing program, initializing the
  /// variables required by the executor, getting the feed_target_names and
  /// fetch_target_names, etc.
  ///
  /// \param[in] parent_scope parent scope
  /// \param[in] program program
  /// \return Whether the init function executed successfully
  ///
137 138
  bool Init(const std::shared_ptr<framework::Scope> &parent_scope,
            const std::shared_ptr<framework::ProgramDesc> &program = nullptr);
Y
Yan Chunwei 已提交
139

140 141 142 143 144 145 146 147
  ///
  /// \brief Run the prediction engine. Deprecated. Please refer to ZeroCopyRun
  ///
  /// \param[in] inputs input tensors
  /// \param[out] output_data output tensors
  /// \param[in] batch_size data's batch size
  /// \return Whether the function executed successfully
  ///
148 149 150 151
  bool Run(const std::vector<PaddleTensor> &inputs,
           std::vector<PaddleTensor> *output_data,
           int batch_size = -1) override;

152 153 154 155 156 157 158 159 160 161
  ///
  /// \brief Run the prediction engine (Recommended).
  ///
  /// \param[in] inputs input tensors
  /// \param[out] outputs output tensors
  /// \return Whether the function executed successfully
  ///
  bool Run(const std::vector<paddle::Tensor> &inputs,
           std::vector<paddle::Tensor> *outputs) override;

162 163 164 165 166
  ///
  /// \brief Get the input names
  ///
  /// \return input names
  ///
167
  std::vector<std::string> GetInputNames() override;
168 169 170 171 172
  ///
  /// \brief Get the output names
  ///
  /// \return output names
  ///
173
  std::vector<std::string> GetOutputNames() override;
N
nhzlx 已提交
174

175 176 177 178 179 180
  ///
  /// \brief Get the Input Tensor object
  ///
  /// \param[in] name input name
  /// \return input tensor
  ///
181 182
  std::unique_ptr<ZeroCopyTensor> GetInputTensor(
      const std::string &name) override;
183 184 185 186 187 188
  ///
  /// \brief Get the Output Tensor object
  ///
  /// \param[in] name otuput name
  /// \return output tensor
  ///
189 190
  std::unique_ptr<ZeroCopyTensor> GetOutputTensor(
      const std::string &name) override;
191 192 193 194 195
  ///
  /// \brief Get all input names and their corresponding shapes
  ///
  /// \return the map of input names and shapes
  ///
196
  std::map<std::string, std::vector<int64_t>> GetInputTensorShape() override;
197 198 199 200 201 202
  ///
  /// \brief Get all input names and their corresponding type
  ///
  /// \return the map of input names and type
  ///
  std::map<std::string, paddle_infer::DataType> GetInputTypes() override;
203 204 205 206 207 208 209 210 211 212 213 214
  ///
  /// \brief Get all output names and their corresponding shapes
  ///
  /// \return the map of output names and shapes
  ///
  std::map<std::string, std::vector<int64_t>> GetOutputTensorShape() override;
  ///
  /// \brief Get all output names and their corresponding type
  ///
  /// \return the map of output names and type
  ///
  std::map<std::string, paddle_infer::DataType> GetOutputTypes() override;
215

216 217 218 219 220
  ///
  /// \brief Run the prediction engine
  ///
  /// \return Whether the function executed successfully
  ///
221 222
  bool ZeroCopyRun() override;

W
Wilber 已提交
223 224 225 226 227
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  // Note: Can only be used under thread_local semantics.
  bool ExpRunWithExternalStream(const gpuStream_t stream);
#endif

228 229 230 231 232 233 234 235
  ///
  /// \brief Get the execution stream on devices with a concept of stream,
  /// otherwise returns nullptr.
  ///
  /// \return The execution stream or nullptr (CPU).
  ///
  void *GetExecStream() const override;

236 237 238 239 240
  ///
  /// \brief Create feed fetch variables
  ///
  /// \param[in] scope Scope needed to create variables
  ///
241
  void CreateFeedFetchVar(framework::Scope *scope);
242 243 244 245
  ///
  /// \brief Determine the model's inputs and outputs based on the program's
  /// feed fetch op
  ///
246
  void PrepareFeedFetch();
Y
Yan Chunwei 已提交
247

248 249 250 251
  ///
  /// \brief Set predictor's argument according to config, which mainly includes
  /// execution information and graph optimization related pass information
  ///
252
  void PrepareArgument();
253 254 255 256
  ///
  /// \brief According to argument information, execute the relevant pass
  /// to get the optimized model program
  ///
Y
Yan Chunwei 已提交
257 258
  void OptimizeInferenceProgram();

259 260 261 262
  ///
  /// \brief Clear the intermediate tensors of the predictor
  ///
  ///
263
  void ClearIntermediateTensor() override;
264

265 266 267 268 269 270 271 272 273 274 275
  ///
  /// \brief Release all tmp tensor to compress the size of the memory pool.
  /// The memory pool is considered to be composed of a list of chunks, if
  /// the chunk is not occupied, it can be released.
  ///
  /// \return Number of bytes released. It may be smaller than the actual
  /// released memory, because part of the memory is not managed by the
  /// MemoryPool.
  ///
  uint64_t TryShrinkMemory() override;

276 277 278 279 280
  ///
  /// \brief Get the argument used by predictor
  ///
  /// \return the argument obtained by config
  ///
281
  Argument &analysis_argument() { return *argument_; }
282 283 284 285 286
  ///
  /// \brief Clone to get the new predictor. thread safe.
  ///
  /// \return get a new predictor
  ///
287
  std::unique_ptr<PaddlePredictor> Clone(void *stream = nullptr) override;
288 289 290 291 292
  ///
  /// \brief Get the scope used by predictor
  ///
  /// \return scope
  ///
293
  framework::Scope *scope() { return scope_.get(); }
294 295 296 297 298
  ///
  /// \brief Get the inference program
  ///
  /// \return the inference program
  ///
299 300
  framework::ProgramDesc &program() { return *inference_program_; }

301 302 303 304 305
  ///
  /// \brief Get the serialized program
  ///
  /// \return the serialized program
  ///
306
  std::string GetSerializedProgram() const override;
Y
Yan Chunwei 已提交
307

308 309 310 311 312 313 314
  ///
  /// \brief Get the fusion_statis_t
  ///
  /// \return the fusion_statis_t
  ///
  Argument::fusion_statis_t fusion_statis() { return fusion_statis_; }

315 316 317 318 319 320 321 322 323 324
  ///
  /// \brief Register a output hook function to operate the intermediate tensor
  /// of op output. when using this function, memory reuse should be tured off.
  /// The hook function signature is void(const std::string&, const
  /// std::string&, const Tensor&>). Here, the first parameter is op's
  /// type, the second param is output var name of the op, and the third
  /// parameter is output tensor with the var name.
  ///
  void RegisterOutputHook(const Exp_OutputHookFunc &hookfunc) override;

325 326 327 328 329
  ///
  /// \brief Initialize mkldnn quantizer and execute mkldnn quantization pass
  ///
  /// \return Whether the function executed successfully
  ///
330 331
  bool MkldnnQuantize();

332 333 334 335 336
  ///
  /// \brief save program to model and save parameters to params
  ///
  /// \param[in] dir path to save the model
  ///
337 338
  void SaveOptimModel(const std::string &dir);

339
 protected:
340 341 342 343 344 345 346
  ///
  /// \brief Prepare predictor's required programs, including loading model
  /// information, graph optimization, and executor creation variables, etc.
  ///
  /// \param[in] program paddle program
  /// \return Whether the function executed successfully
  ///
347
  bool PrepareProgram(const std::shared_ptr<framework::ProgramDesc> &program);
348 349 350 351 352 353
  ///
  /// \brief Prepare scope environment, each predictor has its own scope
  ///
  /// \param[in] parent_scope The scope of the predictor to be cloned, or null
  /// \return Whether the function executed successfully
  ///
354
  bool PrepareScope(const std::shared_ptr<framework::Scope> &parent_scope);
355 356 357 358 359
  ///
  /// \brief Create an Executor object
  ///
  /// \return Whether the function executed successfully
  ///
360
  bool CreateExecutor();
361 362 363 364 365
  ///
  /// \brief According to the model's program, the executor creates ops
  ///
  /// \return Whether the function executed successfully
  ///
366 367
  bool PrepareExecutor();

368 369 370 371 372
  ///
  /// \brief Load model program.
  ///
  /// \return Whether the function executed successfully
  ///
373
  bool LoadProgramDesc();
374 375 376 377 378
  ///
  /// \brief Load model parameters.
  ///
  /// \return Whether the function executed successfully
  ///
379
  bool LoadParameters();
380

381 382 383 384 385 386 387
  ///
  /// \brief Prepare input data, only used in Run()
  ///
  /// \param[in] input_datas inpute tensors
  /// \param[in] scope the scope used by predictor
  /// \return Whether the function executed successfully
  ///
388 389
  bool SetFeed(const std::vector<PaddleTensor> &input_datas,
               framework::Scope *scope);
390 391 392 393 394 395 396 397 398 399 400

  ///
  /// \brief Prepare input data, only used in Run()
  ///
  /// \param[in] inputs inpute tensors
  /// \param[in] scope the scope used by predictor
  /// \return Whether the function executed successfully
  ///
  bool SetFeed(const std::vector<paddle::Tensor> &inputs,
               framework::Scope *scope);

401 402 403 404 405 406 407
  ///
  /// \brief Get the output data, only used in Run()
  ///
  /// \param[out] output_data output tensors
  /// \param[in] scope the scope used by predictor
  /// \return Whether the function executed successfully
  ///
408 409
  bool GetFetch(std::vector<PaddleTensor> *output_data,
                framework::Scope *scope);
410 411 412 413 414 415 416 417 418 419

  ///
  /// \brief Get the output data, only used in Run()
  ///
  /// \param[out] outputs output tensors
  /// \param[in] scope the scope used by predictor
  /// \return Whether the function executed successfully
  ///
  bool GetFetch(std::vector<paddle::Tensor> *outputs, framework::Scope *scope);

420 421 422 423 424 425
  ///
  /// \brief Get the output data, only used in GetFetch()
  ///
  /// \param[in] tensor for fetch op
  /// \param[out] output_data output tensor
  ///
426
  template <typename T>
427
  void GetFetchOne(const phi::DenseTensor &fetchs, PaddleTensor *output_data);
428 429 430 431 432 433 434 435
  ///
  /// \brief PreSet for Mkldnn multi-thread and dynamic shape input.
  ///
  /// Used in AnalysisPredictor::Run(), do not support
  /// AnalysisPredictor::ZeroCopyRun() now.
  ///
  /// \param[in] inputs tensors
  ///
436
  void MkldnnPreSet(const std::vector<PaddleTensor> &inputs);
437 438 439 440 441 442 443 444
  ///
  /// \brief PreSet for Mkldnn multi-thread and dynamic shape input.
  ///
  /// Used in AnalysisPredictor::Run().
  ///
  /// \param[in] inputs tensors
  ///
  void MkldnnPreSet(const std::vector<paddle::Tensor> &inputs);
W
Wilber 已提交
445 446 447 448 449 450 451 452 453 454 455

  ///
  /// \brief PreSet for Mkldnn multi-thread and dynamic shape input.
  ///
  /// Used in AnalysisPredictor::Run(), do not support
  /// AnalysisPredictor::ZeroCopyRun() now.
  ///
  /// \param[in] inputs tensor shape
  ///
  void MkldnnPreSet(const std::vector<std::vector<int>> &inputs_shape);

456 457 458 459 460 461
  ///
  /// \brief PostReset for Mkldnn multi-thread and dynamic shape input.
  ///
  /// Used in AnalysisPredictor::Run(), do not support
  /// AnalysisPredictor::ZeroCopyRun() now.
  ///
462
  void MkldnnPostReset();
Y
Yan Chunwei 已提交
463

464
#ifdef PADDLE_WITH_TENSORRT
465 466 467 468 469 470 471 472 473 474 475 476 477 478
  ///
  /// \brief save calibration table
  ///
  /// When we use Paddle-TRT INT8 engine, we need to generate calibration table
  /// data first,
  /// the calibration table contains the range for each op's input and output,
  /// this whole process can be divided into several steps:
  /// 1. Builds a 32-bit engine, runs it on the calibration set, and records a
  ///  histogram for each tensor of the distribution of activation values.
  /// 2. Builds a calibration table from the histograms.
  /// After step 2, we need to store the calibration table on disk.
  ///
  /// \return Whether the function executed successfully
  ///
N
nhzlx 已提交
479
  bool SaveTrtCalibToDisk();
N
nhzlx 已提交
480
#endif
N
nhzlx 已提交
481

482 483 484 485 486 487 488 489
// Some more detailed tests, they are made the friends of the predictor, so that
// the all the details can be tested.
#if PADDLE_WITH_TESTING
  FRIEND_TEST(AnalysisPredictor, analysis_off);
  FRIEND_TEST(AnalysisPredictor, analysis_on);
  FRIEND_TEST(AnalysisPredictor, with_gpu);
#endif

490 491 492
 protected:
  const void *GetDeviceContexts() const override;

493 494 495 496
 private:
  void StatisticShapeRangeInfo();
  void CollectShapeRangeInfo();

497 498 499 500
  void InitPlace();
  void InitDeviceContexts();
  void InitResourceManager(void *stream);

501
#if defined(PADDLE_WITH_DISTRIBUTE) && defined(PADDLE_WITH_PSCORE)
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
  // fleet exe related

  ///
  /// \brief prepare for fleet executor to run
  ///
  /// Used in AnalysisPredictor::Init(),
  ///
  bool PrepareFleetExecutor();

  ///
  /// \brief init NCCL env for multi gpus inference
  ///
  /// Used in AnalysisPredictor::PrepareFleetExecutor()
  ///
  bool CommInit();

  ///
  /// \brief read the config to init NCCL env
  ///
  /// Used in AnalysisPredictor::CommInit()
  ///
  /// \param[in] ring_id_to_ranks: a ptr to ring_id_to_ranks
  /// \param[in] rank_to_ring_ids: a ptr to rank_to_ring_ids
  ///
  bool LoadConverterConfig(
      std::map<int64_t, std::vector<int64_t>> *ring_id_to_ranks,
      std::map<int64_t, std::vector<int64_t>> *rank_to_ring_ids);

  ///
  /// \brief add ops and run them with NaiveExecutor to init NCCL env
  ///
  /// Used in AnalysisPredictor::CommInit()
  ///
  /// \param[in] tmp_var_name: var name to hold NCCL unique id
  /// \param[in] nranks: number of ranks in one comm group
  /// \param[in] rank: relative rank of current rank in the comm group
  /// \param[in] peer_endpoints: group's peers' endpoints
  /// \param[in] block: the block to insert comm ops
  /// \param[in] ring_id: the ring id to be used to init NCCL env
  ///
542 543 544
  void InsertCommOp(std::string tmp_var_name,
                    int nranks,
                    int rank,
545
                    const std::vector<std::string> &peer_endpoints,
546 547
                    framework::BlockDesc *block,
                    int ring_id);
548 549
#endif

Y
Yan Chunwei 已提交
550
 private:
551
  AnalysisConfig config_;
552 553
  std::unique_ptr<Argument> argument_;
  Argument::fusion_statis_t fusion_statis_;
554 555 556 557 558
  std::unique_ptr<NaiveExecutor> executor_;
  platform::Place place_;
  std::shared_ptr<framework::Scope> scope_;
  framework::Scope *sub_scope_{nullptr};
  std::shared_ptr<framework::ProgramDesc> inference_program_;
559
  framework::OpCompatibleMap op_compatible_map_;
560 561
  std::vector<framework::OpDesc *> feeds_;
  std::map<std::string, size_t> feed_names_;
N
nhzlx 已提交
562 563
  // Sorted according to the idx.
  std::map<size_t, std::string> idx2feeds_;
Y
Yan Chunwei 已提交
564
  std::vector<framework::OpDesc *> fetches_;
N
nhzlx 已提交
565 566
  std::map<size_t, std::string> idx2fetches_;

567 568
  phi::DataType model_precision_{phi::DataType::FLOAT32};

569 570 571 572 573 574 575 576 577 578
#if PADDLE_WITH_MKLDNN
  // Helper class to perform quantization
  class MkldnnQuantizer;
  MkldnnQuantizer *mkldnn_quantizer_{nullptr};

#if PADDLE_WITH_TESTING
  friend class MkldnnQuantizerTest;
#endif
#endif

579
  // Memory buffer for feed inputs. The temporary LoDTensor will cause serious
580
  // concurrency problems, wrong results and memory leak, so cache them.
581
  std::vector<phi::DenseTensor> feed_tensors_;
Y
Yan Chunwei 已提交
582
  details::TensorArrayBatchCleaner tensor_array_batch_cleaner_;
Y
Yan Chunwei 已提交
583 584
  // A mutex help to make Clone thread safe.
  std::mutex clone_mutex_;
585

Y
Yan Chunwei 已提交
586 587 588 589
  // For memory optimization.
  const size_t max_shape_collect_count_{1000};
  int need_collect_var_shapes_{-1};  // -1 for default, 0 for false, 1 for true.
  std::vector<std::map<std::string, std::vector<int>>> batch_var_shapes_;
590
  int predictor_id_;
591
  int root_predictor_id_{-1};
Y
Yan Chunwei 已提交
592

593
 private:
594 595
  std::vector<Exp_OutputHookFunc> hookfuncs_;

596 597
  // Some status here that help to determine the status inside the predictor.
  bool status_is_cloned_{false};
598 599

  std::map<std::string, std::vector<std::vector<int32_t>>> shape_info_;
600
  std::map<std::string, std::vector<std::vector<int32_t>>> shape_tensor_value_;
601
  static int clone_num_;
602

603 604 605 606 607
  bool private_context_{false};
  void *predictor_stream_{nullptr};
  std::map<phi::Place, std::shared_future<std::unique_ptr<phi::DeviceContext>>>
      device_contexts_;

608
#if defined(PADDLE_WITH_DISTRIBUTE) && defined(PADDLE_WITH_PSCORE)
609 610 611 612 613
  // fleet executor related
  distributed::FleetExecutorDesc executor_desc_;
  std::shared_ptr<distributed::FleetExecutor> fleet_exe_;
  std::shared_ptr<distributed::TaskNode> task_node_;
#endif
W
Wilber 已提交
614
  friend class paddle_infer::experimental::InternalUtils;
Y
Yan Chunwei 已提交
615 616 617
};

}  // namespace paddle