analysis_predictor.cc 36.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yan Chunwei 已提交
15
#include "paddle/fluid/inference/api/analysis_predictor.h"
16 17
#include <glog/logging.h>
#include <algorithm>
N
nhzlx 已提交
18
#include <fstream>
19
#include <memory>
20
#include <set>
21
#include <string>
22
#include <utility>
23
#include <vector>
24
#include "paddle/fluid/framework/feed_fetch_method.h"
25
#include "paddle/fluid/framework/feed_fetch_type.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
27
#include "paddle/fluid/framework/ir/pass.h"
28
#include "paddle/fluid/framework/naive_executor.h"
29
#include "paddle/fluid/framework/scope.h"
Y
Yan Chunwei 已提交
30
#include "paddle/fluid/framework/var_type_traits.h"
31
#include "paddle/fluid/framework/version.h"
32
#include "paddle/fluid/inference/analysis/helper.h"
Y
Yan Chunwei 已提交
33
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
34
#include "paddle/fluid/inference/api/helper.h"
35
#include "paddle/fluid/inference/api/paddle_inference_api.h"
L
luotao1 已提交
36
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
37
#include "paddle/fluid/inference/utils/singleton.h"
38
#include "paddle/fluid/memory/memcpy.h"
39
#include "paddle/fluid/platform/cpu_helper.h"
40
#include "paddle/fluid/platform/device_context.h"
41
#include "paddle/fluid/platform/gpu_info.h"
42
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
43 44
#include "paddle/fluid/platform/profiler.h"

45 46 47 48
#ifdef PADDLE_WITH_MKLML
#include "paddle/fluid/platform/dynload/mklml.h"
#endif

49 50 51 52
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/inference/api/mkldnn_quantizer.h"
#endif

Y
Yan Chunwei 已提交
53 54
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
55
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
Y
Yan Chunwei 已提交
56 57
#endif

58 59
namespace paddle {

N
nhzlx 已提交
60
using inference::Singleton;
N
nhzlx 已提交
61
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
62
using inference::tensorrt::TRTInt8Calibrator;
N
nhzlx 已提交
63 64
using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager;
N
nhzlx 已提交
65
#endif
66

67 68 69 70
namespace {
bool IsPersistable(const framework::VarDesc *var) {
  if (var->Persistable() &&
      var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
71 72
      var->GetType() != framework::proto::VarType::FETCH_LIST &&
      var->GetType() != framework::proto::VarType::RAW) {
73 74 75 76 77 78
    return true;
  }
  return false;
}
}  // namespace

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
bool PaddleTensorToLoDTensor(const PaddleTensor &pt, framework::LoDTensor *t,
                             const platform::Place &place) {
  framework::DDim ddim = framework::make_ddim(pt.shape);
  void *input_ptr;
  if (pt.dtype == PaddleDType::INT64) {
    input_ptr = t->mutable_data<int64_t>(ddim, place);
  } else if (pt.dtype == PaddleDType::FLOAT32) {
    input_ptr = t->mutable_data<float>(ddim, place);
  } else if (pt.dtype == PaddleDType::INT32) {
    input_ptr = t->mutable_data<int32_t>(ddim, place);
  } else {
    LOG(ERROR) << "unsupported feed type " << pt.dtype;
    return false;
  }

  PADDLE_ENFORCE_NOT_NULL(
      input_ptr,
      paddle::platform::errors::Fatal(
          "Cannot convert to LoDTensor because LoDTensor creation failed."));
  PADDLE_ENFORCE_NOT_NULL(
      pt.data.data(),
      paddle::platform::errors::InvalidArgument(
          "The data contained in the input PaddleTensor is illegal."));

  if (platform::is_cpu_place(place)) {
    // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
    std::memcpy(static_cast<void *>(input_ptr), pt.data.data(),
                pt.data.length());
  } else {
#ifdef PADDLE_WITH_CUDA
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto *dev_ctx =
        static_cast<const platform::CUDADeviceContext *>(pool.Get(place));
112
    auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, place);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
    memory::Copy(dst_gpu_place, static_cast<void *>(input_ptr),
                 platform::CPUPlace(), pt.data.data(), pt.data.length(),
                 dev_ctx->stream());
#else
    PADDLE_THROW(paddle::platform::errors::Fatal(
        "Not compile with CUDA, should not reach here."));
#endif
  }
  // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
  framework::LoD lod;
  for (auto &level : pt.lod) {
    lod.emplace_back(level);
  }
  t->set_lod(lod);
  return true;
}

Y
Yan Chunwei 已提交
130
bool AnalysisPredictor::Init(
131 132
    const std::shared_ptr<framework::Scope> &parent_scope,
    const std::shared_ptr<framework::ProgramDesc> &program) {
M
minqiyang 已提交
133
  VLOG(3) << "Predictor::init()";
134 135
  if (config_.with_profile_) {
    LOG(WARNING) << "Profiler is activated, which might affect the performance";
136 137
    auto tracking_device = config_.use_gpu() ? platform::ProfilerState::kAll
                                             : platform::ProfilerState::kCPU;
T
tensor-tang 已提交
138
    platform::EnableProfiler(tracking_device);
139 140 141
  } else {
    LOG(INFO) << "Profiler is deactivated, and no profiling report will be "
                 "generated.";
T
tensor-tang 已提交
142 143
  }

144
  // no matter with or without MKLDNN
L
luotao1 已提交
145
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
146

147 148 149 150 151 152 153 154 155 156 157 158 159
  if (!PrepareScope(parent_scope)) {
    return false;
  }
  if (!CreateExecutor()) {
    return false;
  }
  if (!PrepareProgram(program)) {
    return false;
  }

  // Prepare executor, create local variables.
  if (!PrepareExecutor()) {
    return true;
Y
Yan Chunwei 已提交
160
  }
161 162 163 164 165 166 167 168 169

  // Get the feed_target_names and fetch_target_names
  PrepareFeedFetch();

  return true;
}

bool AnalysisPredictor::PrepareScope(
    const std::shared_ptr<framework::Scope> &parent_scope) {
Y
Yan Chunwei 已提交
170
  if (parent_scope) {
171 172 173
    PADDLE_ENFORCE_NOT_NULL(
        parent_scope,
        "Both program and parent_scope should be set in Clone mode.");
Y
Yan Chunwei 已提交
174
    scope_ = parent_scope;
175
    status_is_cloned_ = true;
Y
Yan Chunwei 已提交
176
  } else {
177
    paddle::framework::InitDevices(false);
Y
Yan Chunwei 已提交
178
    scope_.reset(new paddle::framework::Scope());
179
    status_is_cloned_ = false;
Y
Yan Chunwei 已提交
180
  }
181 182 183 184 185
  sub_scope_ = &scope_->NewScope();
  return true;
}
bool AnalysisPredictor::PrepareProgram(
    const std::shared_ptr<framework::ProgramDesc> &program) {
186 187
  if (!program) {
    if (!LoadProgramDesc()) return false;
188 189 190 191 192 193 194
    // If not cloned, the parameters should be loaded.
    // If config_.ir_optim() is True, parameters is loaded in
    // OptimizeInferenceProgram(), but other persistable variables
    // (like RAW type var) are not created in scope.
    // If config_.ir_optim() is False, parameters is loaded in LoadParameters(),
    // still need to create other persistable variables.
    // So in both case, create persistable variables at first.
195 196
    if (!CheckOperatorCompatible()) {
      LOG(WARNING) << "WARNING: Results may be DIFF! "
197 198
                      "Please use the corresponding version of the model and "
                      "prediction library, and do not use the develop branch.";
199
    }
200 201
    executor_->CreateVariables(*inference_program_, 0, true, sub_scope_);

202 203 204 205
    // if enable_ir_optim_ is false,
    // the analysis pass(op fuse, graph analysis, trt subgraph, mkldnn etc) will
    // not be executed.
    OptimizeInferenceProgram();
Y
Yan Chunwei 已提交
206
  } else {
207 208
    // If the program is passed from external, no need to optimize it, this
    // logic is used in the clone scenario.
209 210
    inference_program_ = program;
  }
M
Michal Gallus 已提交
211

212 213 214 215 216
  executor_->CreateVariables(*inference_program_, 0, false, sub_scope_);

  return true;
}
bool AnalysisPredictor::CreateExecutor() {
217
  if (config_.use_gpu()) {
218
    status_use_gpu_ = true;
219 220 221 222 223 224 225 226 227 228
    place_ = paddle::platform::CUDAPlace(config_.gpu_device_id());
#ifdef PADDLE_WITH_CUDA
    if (config_.thread_local_stream_enabled()) {
      auto *ctx = static_cast<platform::CUDADeviceContext *>(
          platform::DeviceContextPool::Instance().Get(place_));
      VLOG(3) << "The prediction process will be completed using a separate "
                 "normal-priority stream on each thread.";
      ctx->ResetThreadContext(platform::stream::Priority::kNormal);
    }
#endif
229 230 231 232 233 234 235 236
  } else {
    place_ = paddle::platform::CPUPlace();
  }
  executor_.reset(new paddle::framework::NaiveExecutor(place_));
  return true;
}
bool AnalysisPredictor::PrepareExecutor() {
  executor_->Prepare(sub_scope_, *inference_program_, 0,
237
                     config_.use_feed_fetch_ops_);
238

239
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
Y
Yan Chunwei 已提交
240

241 242 243
  return true;
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
void AnalysisPredictor::MkldnnPreSet(const std::vector<PaddleTensor> &inputs) {
#ifdef PADDLE_WITH_MKLDNN
  VLOG(2) << "AnalysisPredictor::Run get_cur_mkldnn_session_id="
          << platform::get_cur_mkldnn_session_id();
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
    VLOG(2) << "In mkldnn cache clear mode.";
    platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_CacheClearing);
    platform::set_cur_input_shape_cache_capacity(
        config_.mkldnn_cache_capacity_);
    // Set current_input_shape for caching dynamic shape.
    std::stringstream ss;
    for (size_t i = 0; i < inputs.size(); ++i) {
      for (size_t j = 0; j < inputs[i].shape.size(); ++j) {
        ss << inputs[i].shape[j] << "-";
      }
    }
    VLOG(2) << "Set input shape=" << ss.str();
    platform::set_cur_input_shape_str(ss.str());
  }
#endif
}

void AnalysisPredictor::MkldnnPostReset() {
#ifdef PADDLE_WITH_MKLDNN
  // In cache clearing mode.
  if (config_.mkldnn_cache_capacity_ > 0) {
272 273 274 275 276 277 278 279
    if (VLOG_IS_ON(2)) {
      auto shape_blob_size = static_cast<platform::MKLDNNDeviceContext *>(
                                 (&platform::DeviceContextPool::Instance())
                                     ->Get(platform::CPUPlace()))
                                 ->GetShapeBlobSize();
      CHECK_LE(shape_blob_size,
               static_cast<size_t>(config_.mkldnn_cache_capacity_));
    }
280 281 282 283 284 285 286 287
    paddle::platform::set_cur_mkldnn_session_id(
        platform::kMKLDNNSessionID_Default);
    platform::set_cur_input_shape_cache_capacity(0);
    platform::set_cur_input_shape_str("");
  }
#endif
}

288 289 290
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
                            std::vector<PaddleTensor> *output_data,
                            int batch_size) {
291
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
292 293 294
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPreSet(inputs);
#endif
M
minqiyang 已提交
295
  VLOG(3) << "Predictor::predict";
296 297 298 299
  inference::Timer timer;
  timer.tic();
  // set feed variable
  framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
300
  PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr.");
301 302
  if (!SetFeed(inputs, scope)) {
    LOG(ERROR) << "fail to set feed";
Y
Yan Chunwei 已提交
303
    return false;
304
  }
M
Michal Gallus 已提交
305

306 307 308
  // Run the inference program
  // if share variables, we need not create variables
  executor_->Run();
309

310 311 312 313
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
    LOG(ERROR) << "fail to get fetches";
    return false;
T
tensor-tang 已提交
314
  }
Y
Yan Chunwei 已提交
315

M
minqiyang 已提交
316
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
317

Y
Yan Chunwei 已提交
318 319 320 321 322
  // All the containers in the scope will be hold in inference, but the
  // operators assume that the container will be reset after each batch.
  // Here is a bugfix, collect all the container variables, and reset then to a
  // bool; the next time, the operator will call MutableData and construct a new
  // container again, so that the container will be empty for each batch.
323 324 325
  if (sub_scope_) {
    tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
  }
Y
Yan Chunwei 已提交
326
  tensor_array_batch_cleaner_.ResetNoTensorVars();
327 328 329 330

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
331 332
#ifdef PADDLE_WITH_MKLDNN
  if (config_.use_mkldnn_) MkldnnPostReset();
T
Tao Luo 已提交
333
#endif
334
#if defined(PADDLE_WITH_MKLML)
T
Tao Luo 已提交
335 336 337 338
  // Frees unused memory allocated by the Intel® MKL Memory Allocator to
  // avoid memory leak. See:
  // https://software.intel.com/en-us/mkl-developer-reference-c-mkl-free-buffers
  platform::dynload::MKL_Free_Buffers();
339
#endif
340 341
  return true;
}
342

343 344
bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                framework::Scope *scope) {
M
minqiyang 已提交
345
  VLOG(3) << "Predictor::set_feed";
346 347 348 349 350 351 352 353 354 355
  if (inputs.size() != feeds_.size()) {
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
    return false;
  }

  // Cache the inputs memory for better concurrency performance.
  feed_tensors_.resize(inputs.size());

  for (size_t i = 0; i < inputs.size(); ++i) {
356 357
    framework::LoDTensor *input = &feed_tensors_[i];
    if (!PaddleTensorToLoDTensor(inputs[i], input, place_)) {
358 359 360
      return false;
    }
    int idx = -1;
361
    if (config_.specify_input_name_) {
T
tensor-tang 已提交
362 363
      auto name = inputs[i].name;
      if (feed_names_.find(name) == feed_names_.end()) {
T
tensor-tang 已提交
364 365
        LOG(ERROR) << "feed names from program do not have name: [" << name
                   << "] from specified input";
T
tensor-tang 已提交
366 367
      }
      idx = feed_names_[name];
368
    } else {
369
      idx = BOOST_GET_CONST(int, feeds_[i]->GetAttr("col"));
370
    }
371
    framework::SetFeedVariable(scope, *input, "feed", idx);
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
  }
  return true;
}

template <typename T>
void AnalysisPredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                    PaddleTensor *output) {
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
  }
}

bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                 framework::Scope *scope) {
M
minqiyang 已提交
398
  VLOG(3) << "Predictor::get_fetch";
Y
Yan Chunwei 已提交
399 400
  outputs->resize(fetches_.size());
  for (size_t i = 0; i < fetches_.size(); ++i) {
401
    int idx = BOOST_GET_CONST(int, fetches_[i]->GetAttr("col"));
402
    PADDLE_ENFORCE((size_t)idx == i);
403
    framework::FetchType &fetch_var =
404
        framework::GetFetchVariable(*scope, "fetch", idx);
405
    auto &fetch = BOOST_GET(framework::LoDTensor, fetch_var);
406 407
    auto type = fetch.type();
    auto output = &(outputs->at(i));
Y
Yan Chunwei 已提交
408
    output->name = fetches_[idx]->Input("X")[0];
Y
Yu Yang 已提交
409
    if (type == framework::proto::VarType::FP32) {
410 411
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
Y
Yu Yang 已提交
412
    } else if (type == framework::proto::VarType::INT64) {
413 414
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
415 416 417
    } else if (type == framework::proto::VarType::INT32) {
      GetFetchOne<int32_t>(fetch, output);
      output->dtype = PaddleDType::INT32;
418
    } else {
419
      LOG(ERROR) << "unknown type, only support float32, int64 and int32 now.";
420 421
    }
  }
Y
Yan Chunwei 已提交
422 423
  return true;
}
424

425
void AnalysisPredictor::PrepareArgument() {
426
  argument_.SetUseGPU(config_.use_gpu());
427
  argument_.SetUseFcPadding(config_.use_fc_padding());
428
  argument_.SetGPUDeviceId(config_.gpu_device_id());
429
  argument_.SetEnableAnalysisOptim(config_.enable_ir_optim_);
Y
Yan Chunwei 已提交
430
  argument_.SetEnableMemoryOptim(config_.enable_memory_optim());
T
Tao Luo 已提交
431
  argument_.SetModelFromMemory(config_.model_from_memory_);
Y
Yan Chunwei 已提交
432
  // Analyze inference_program
433
  argument_.SetPredictorID(predictor_id_);
434
  argument_.SetOptimCacheDir(config_.opt_cache_dir_);
435 436
  if (!config_.model_dir().empty()) {
    argument_.SetModelDir(config_.model_dir());
T
Tao Luo 已提交
437 438
  } else {
    PADDLE_ENFORCE(
439
        !config_.params_file().empty(),
T
Tao Luo 已提交
440
        "Either model_dir or (param_file, prog_file) should be set.");
441
    PADDLE_ENFORCE(!config_.prog_file().empty());
N
nhzlx 已提交
442
    std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
N
nhzlx 已提交
443

444 445
    argument_.SetModelProgramPath(config_.prog_file());
    argument_.SetModelParamsPath(config_.params_file());
Y
Yan Chunwei 已提交
446
  }
447

448
  if (config_.use_gpu() && config_.tensorrt_engine_enabled()) {
Y
Yan Chunwei 已提交
449
    LOG(INFO) << "TensorRT subgraph engine is enabled";
450 451 452
    argument_.SetUseTensorRT(true);
    argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_);
    argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_);
453
    argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_);
N
nhzlx 已提交
454
    argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_);
N
nhzlx 已提交
455
    argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_);
456
    argument_.SetTensorRtUseCalibMode(config_.trt_use_calib_mode_);
457 458 459
    argument_.SetMinInputShape(config_.min_input_shape_);
    argument_.SetMaxInputShape(config_.max_input_shape_);
    argument_.SetOptimInputShape(config_.optim_input_shape_);
460
    argument_.SetCloseTrtPluginFp16(config_.disable_trt_plugin_fp16_);
W
Wojciech Uss 已提交
461
  }
462

石晓伟 已提交
463 464 465 466 467 468 469
  if (config_.lite_engine_enabled()) {
    argument_.SetLitePrecisionMode(config_.lite_precision_mode_);
    argument_.SetLitePassesFilter(config_.lite_passes_filter_);
    argument_.SetLiteOpsFilter(config_.lite_ops_filter_);
    LOG(INFO) << "Lite subgraph engine is enabled";
  }

470
  if (config_.use_mkldnn_) {
Y
Yan Chunwei 已提交
471
    LOG(INFO) << "MKLDNN is enabled";
472 473 474
    argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_);
  }

475 476 477 478 479 480 481 482 483 484
#ifdef PADDLE_WITH_MKLDNN
  if (config_.mkldnn_quantizer_enabled()) {
    LOG(INFO) << "Quantization is enabled";
    argument_.SetQuantizeEnabledOpTypes(
        config_.mkldnn_quantizer_config()->enabled_op_types());
    argument_.SetQuantizeExcludedOpIds(
        config_.mkldnn_quantizer_config()->excluded_op_ids());
  }
#endif

485
  auto passes = config_.pass_builder()->AllPasses();
Y
Yan Chunwei 已提交
486 487 488 489
  if (!config_.ir_optim()) {
    passes.clear();
    LOG(INFO) << "ir_optim is turned off, no IR pass will be executed";
  }
490
  argument_.SetDisableLogs(config_.glog_info_disabled());
491
  argument_.SetIrAnalysisPasses(passes);
Y
Yan Chunwei 已提交
492
  argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses());
493
  argument_.SetScopeNotOwned(scope_.get());
494 495 496 497 498
}

// NOTE All the members in AnalysisConfig should be copied to Argument.
void AnalysisPredictor::OptimizeInferenceProgram() {
  PrepareArgument();
499 500 501 502 503
  Analyzer().Run(&argument_);

  PADDLE_ENFORCE(argument_.scope_valid());
  VLOG(5) << "to prepare executor";
  ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
Y
Yan Chunwei 已提交
504
  inference_program_.reset(
505
      new framework::ProgramDesc(argument_.ir_analyzed_program()));
506 507 508 509
  // The config and argument take a lot of storage,
  // when the predictor settings are complete, we release these stores.
  argument_.PartiallyRelease();
  config_.PartiallyRelease();
510
  LOG(INFO) << "======= optimize end =======";
Y
Yan Chunwei 已提交
511
}
512 513

template <>
514 515
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) {
P
Pei Yang 已提交
516 517 518 519
  if (config.glog_info_disabled()) {
    FLAGS_logtostderr = 1;
    FLAGS_minloglevel = 2;  // GLOG_ERROR
  }
M
minqiyang 已提交
520
  VLOG(3) << "create AnalysisConfig";
521 522
  PADDLE_ENFORCE(config.is_valid(),
                 "Note: Each config can only be used for one predictor.");
523

524
  if (config.use_gpu()) {
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
    static std::once_flag gflags_initialized;
    static bool process_level_allocator_enabled;

    std::call_once(gflags_initialized, [&]() {
      std::vector<std::string> gflags;
      PADDLE_ENFORCE_GE(
          config.memory_pool_init_size_mb(), 0.f,
          platform::errors::InvalidArgument(
              "The size of memory pool should be greater than 0."));
      PADDLE_ENFORCE_GE(
          config.gpu_device_id(), 0,
          platform::errors::InvalidArgument(
              "Invalid device id (%d). The device id should be greater than 0.",
              config.gpu_device_id()));
      gflags.push_back("dummy");

      float fraction_of_gpu_memory = config.fraction_of_gpu_memory_for_pool();
      if (fraction_of_gpu_memory > 0.95f) {
        LOG(ERROR)
            << "Allocate too much memory for the GPU memory pool, assigned "
            << config.memory_pool_init_size_mb() << " MB";
        LOG(ERROR) << "Try to shink the value by setting "
                      "AnalysisConfig::EnableGpu(...)";
      }
549

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
      if (fraction_of_gpu_memory >= 0.0f || fraction_of_gpu_memory <= 0.95f) {
        std::string flag = "--fraction_of_gpu_memory_to_use=" +
                           std::to_string(fraction_of_gpu_memory);
        VLOG(3) << "set flag: " << flag;
        gflags.push_back(flag);
        gflags.push_back("--cudnn_deterministic=True");
      }

      if (config.thread_local_stream_enabled()) {
        gflags.push_back("--allocator_strategy=thread_local");
        process_level_allocator_enabled = false;
      } else {
        gflags.push_back("--allocator_strategy=naive_best_fit");
        process_level_allocator_enabled = true;
      }

      if (framework::InitGflags(gflags)) {
        VLOG(3) << "The following gpu analysis configurations only take effect "
                   "for the first predictor: ";
        for (size_t i = 1; i < gflags.size(); ++i) {
          VLOG(3) << gflags[i];
        }
      } else {
        LOG(WARNING) << "The one-time configuration of analysis predictor "
                        "failed, which may be due to native predictor called "
                        "first and its configurations taken effect.";
      }
    });

    if (config.thread_local_stream_enabled() &&
        process_level_allocator_enabled) {
      LOG(FATAL) << " When binding threads and streams, the use of "
                    "process-level allocators will result in undefined result "
                    "errors due to memory asynchronous operations."
                    "The thread and stream binding configuration of all "
                    "predictors should be the same in a single process.";
586 587 588 589
    }
  }

  std::unique_ptr<PaddlePredictor> predictor(new AnalysisPredictor(config));
590 591
  // Each config can only be used for one predictor.
  config.SetInValid();
592 593 594 595 596 597 598
  auto predictor_p = dynamic_cast<AnalysisPredictor *>(predictor.get());

  if (!predictor_p->Init(nullptr)) {
    return nullptr;
  }

  if (config.mkldnn_quantizer_enabled() && !predictor_p->MkldnnQuantize()) {
599 600
    return nullptr;
  }
601

G
Gabor Buella 已提交
602
  return predictor;
603 604
}

605 606 607 608 609 610 611 612 613 614 615 616
bool AnalysisPredictor::MkldnnQuantize() {
#if PADDLE_WITH_MKLDNN
  if (!mkldnn_quantizer_)
    mkldnn_quantizer_ = new AnalysisPredictor::MkldnnQuantizer(
        *this, config_.mkldnn_quantizer_config());
  return mkldnn_quantizer_->Quantize();
#else
  LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
  return false;
#endif
}

617
void AnalysisPredictor::PrepareFeedFetch() {
618 619
  PADDLE_ENFORCE_NOT_NULL(sub_scope_);
  CreateFeedFetchVar(sub_scope_);
620 621
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
622
      int idx = BOOST_GET_CONST(int, op->GetAttr("col"));
623 624 625 626 627
      if (feeds_.size() <= static_cast<size_t>(idx)) {
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
N
nhzlx 已提交
628
      idx2feeds_[idx] = op->Output("Out")[0];
629
    } else if (op->Type() == "fetch") {
630
      int idx = BOOST_GET_CONST(int, op->GetAttr("col"));
Y
Yan Chunwei 已提交
631 632
      if (fetches_.size() <= static_cast<size_t>(idx)) {
        fetches_.resize(idx + 1);
633
      }
Y
Yan Chunwei 已提交
634
      fetches_[idx] = op;
N
nhzlx 已提交
635
      idx2fetches_[idx] = op->Input("X")[0];
636 637 638 639
    }
  }
}

640 641 642
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
  PADDLE_ENFORCE_NOT_NULL(scope);
  auto *var = scope->Var("feed");
643
  var->GetMutable<framework::FeedList>();
644
  var = scope->Var("fetch");
645
  var->GetMutable<framework::FetchList>();
646 647
}

N
nhzlx 已提交
648 649 650 651 652 653 654 655
std::vector<std::string> AnalysisPredictor::GetInputNames() {
  std::vector<std::string> input_names;
  for (auto &item : idx2feeds_) {
    input_names.push_back(item.second);
  }
  return input_names;
}

656 657 658 659 660 661 662 663 664 665 666 667
std::map<std::string, std::vector<int64_t>>
AnalysisPredictor::GetInputTensorShape() {
  std::map<std::string, std::vector<int64_t>> input_shapes;
  std::vector<std::string> names = GetInputNames();
  for (std::string name : names) {
    auto *var = inference_program_->Block(0).FindVar(name);
    PADDLE_ENFORCE_NOT_NULL(var, "input %s does not exist.", name);
    input_shapes[name] = var->GetShape();
  }
  return input_shapes;
}

N
nhzlx 已提交
668 669 670 671 672 673 674 675
std::vector<std::string> AnalysisPredictor::GetOutputNames() {
  std::vector<std::string> output_names;
  for (auto &item : idx2fetches_) {
    output_names.push_back(item.second);
  }
  return output_names;
}

676 677 678 679 680 681 682
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = true;
  res->SetName(name);
N
nhzlx 已提交
683 684 685
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
686
    auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, place_);
N
nhzlx 已提交
687 688 689
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }

690 691 692 693 694 695 696 697 698 699
  return res;
}

std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
    const std::string &name) {
  PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name);
  std::unique_ptr<ZeroCopyTensor> res(
      new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
  res->input_or_output_ = false;
  res->SetName(name);
N
nhzlx 已提交
700 701 702
  if (platform::is_cpu_place(place_)) {
    res->SetPlace(PaddlePlace::kCPU);
  } else {
703
    auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, place_);
N
nhzlx 已提交
704 705
    res->SetPlace(PaddlePlace::kGPU, gpu_place.GetDeviceId());
  }
706 707 708 709
  return res;
}

bool AnalysisPredictor::ZeroCopyRun() {
710
  paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
711
  executor_->Run();
Y
Yan Chunwei 已提交
712
  // Fix TensorArray reuse not cleaned bug.
Y
Yan Chunwei 已提交
713
  tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
Y
Yan Chunwei 已提交
714
  tensor_array_batch_cleaner_.ResetTensorArray();
715 716 717 718

  // recover the cpu_math_library_num_threads to 1, in order to avoid thread
  // conflict when integrating it into deployment service.
  paddle::platform::SetNumThreads(1);
719
#if defined(PADDLE_WITH_MKLML)
T
Tao Luo 已提交
720 721 722 723 724
  // Frees unused memory allocated by the Intel® MKL Memory Allocator to
  // avoid memory leak. See:
  // https://software.intel.com/en-us/mkl-developer-reference-c-mkl-free-buffers
  platform::dynload::MKL_Free_Buffers();
#endif
725 726 727 728 729
  return true;
}

bool AnalysisPredictor::LoadProgramDesc() {
  // Initialize the inference program
730
  std::string filename;
731 732 733
  if (!config_.model_dir().empty()) {
    filename = config_.model_dir() + "/__model__";
  } else if (!config_.prog_file().empty() && !config_.params_file().empty()) {
734 735 736
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
737
    filename = config_.prog_file();
738
  } else {
739
    if (config_.model_dir().empty() && config_.prog_file().empty()) {
740 741 742 743
      LOG(ERROR)
          << "Either model_dir or (prog_file, param_file) should be set.";
      return false;
    }
744
    LOG(ERROR) << string::Sprintf(
745 746
        "not valid model path '%s' or program path '%s'.", config_.model_dir(),
        config_.params_file());
747 748
    return false;
  }
749 750 751

  // Create ProgramDesc
  framework::proto::ProgramDesc proto;
T
Tao Luo 已提交
752
  if (!config_.model_from_memory()) {
T
Tao Luo 已提交
753 754 755
    std::string pb_content;
    // Read binary
    std::ifstream fin(filename, std::ios::in | std::ios::binary);
T
Tao Luo 已提交
756 757
    PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s",
                   filename);
T
Tao Luo 已提交
758 759 760 761 762 763 764 765
    fin.seekg(0, std::ios::end);
    pb_content.resize(fin.tellg());
    fin.seekg(0, std::ios::beg);
    fin.read(&(pb_content.at(0)), pb_content.size());
    fin.close();

    proto.ParseFromString(pb_content);
  } else {
766
    proto.ParseFromString(config_.prog_file());
T
Tao Luo 已提交
767
  }
768 769 770 771 772 773 774
  inference_program_.reset(new framework::ProgramDesc(proto));
  return true;
}

bool AnalysisPredictor::LoadParameters() {
  PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
                          "The inference program should be loaded first.");
T
Tao Luo 已提交
775

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
  const auto &global_block = inference_program_->MutableBlock(0);

  // create a temporary program to load parameters.

  std::unique_ptr<framework::ProgramDesc> load_program(
      new framework::ProgramDesc());
  framework::BlockDesc *load_block = load_program->MutableBlock(0);
  std::vector<std::string> params;

  for (auto *var : global_block->AllVars()) {
    if (IsPersistable(var)) {
      VLOG(3) << "persistable variable's name: " << var->Name();

      framework::VarDesc *new_var = load_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

796
      if (!config_.params_file().empty()) {
797 798 799 800 801 802
        params.push_back(new_var->Name());
      } else {
        // append_op
        framework::OpDesc *op = load_block->AppendOp();
        op->SetType("load");
        op->SetOutput("Out", {new_var->Name()});
803
        op->SetAttr("file_path", {config_.model_dir() + "/" + new_var->Name()});
804 805 806 807 808
        op->CheckAttrs();
      }
    }
  }

809
  if (!config_.params_file().empty()) {
810 811 812 813 814 815
    // sort paramlist to have consistent ordering
    std::sort(params.begin(), params.end());
    // append just the load_combine op
    framework::OpDesc *op = load_block->AppendOp();
    op->SetType("load_combine");
    op->SetOutput("Out", params);
816
    op->SetAttr("file_path", {config_.params_file()});
817 818 819 820
    op->CheckAttrs();
  }

  // Use NaiveExecutor to Load parameters.
S
superjomn 已提交
821
  framework::NaiveExecutor e(place_);
822 823 824 825
  e.Prepare(scope_.get(), *load_program, 0, false);
  e.Run();
  VLOG(3) << "get " << scope_->LocalVarNames().size() << " vars after load";

826 827
  return true;
}
828

N
nhzlx 已提交
829
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
830 831 832 833 834 835 836
bool AnalysisPredictor::SaveTrtCalibToDisk() {
  PADDLE_ENFORCE(config_.tensorrt_engine_enabled(),
                 "This func can be invoked only in trt mode");
  auto &block = inference_program_->Block(0);
  for (auto &op_desc : block.AllOps()) {
    if (op_desc->Type() == "tensorrt_engine") {
      std::string engine_name =
837
          BOOST_GET_CONST(std::string, op_desc->GetAttr("engine_key"));
N
nhzlx 已提交
838
      if (!Singleton<TRTCalibratorEngineManager>::Global().Has(engine_name)) {
N
nhzlx 已提交
839 840 841 842
        LOG(ERROR) << "You should run the predictor(with trt) on the real data "
                      "to generate calibration info";
        return false;
      }
N
nhzlx 已提交
843 844
      TRTCalibratorEngine *calib_engine =
          Singleton<TRTCalibratorEngineManager>::Global().Get(engine_name);
N
nhzlx 已提交
845
      LOG(INFO) << "Wait for calib threads done.";
N
nhzlx 已提交
846
      calib_engine->calib_->waitAndSetDone();
N
nhzlx 已提交
847 848
      LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot "
                   "of time...";
N
nhzlx 已提交
849 850 851
      calib_engine->thr_->join();
      std::string calibration_table_data =
          calib_engine->calib_->getCalibrationTableAsString();
N
nhzlx 已提交
852

N
nhzlx 已提交
853
      if (calibration_table_data.empty()) {
N
nhzlx 已提交
854 855 856
        LOG(ERROR) << "the calibration table is empty.";
        return false;
      }
N
nhzlx 已提交
857

N
nhzlx 已提交
858 859 860 861 862
      std::string model_opt_cache_dir =
          argument_.Has("model_dir")
              ? argument_.model_dir()
              : inference::analysis::GetDirRoot(argument_.model_program_path());

N
nhzlx 已提交
863
      std::string calibration_table_data_path =
N
nhzlx 已提交
864 865 866 867
          inference::analysis::GetTrtCalibPath(
              inference::analysis::GetOrCreateModelOptCacheDir(
                  model_opt_cache_dir),
              engine_name);
N
nhzlx 已提交
868 869 870 871 872

      std::ofstream ofile(calibration_table_data_path, std::ios::out);
      LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file "
                << calibration_table_data_path;
      ofile << calibration_table_data;
N
nhzlx 已提交
873 874 875 876
      ofile.close();
    }
  }
  // Free all calibrator resources.
N
nhzlx 已提交
877
  Singleton<TRTCalibratorEngineManager>::Global().DeleteALL();
N
nhzlx 已提交
878 879
  return true;
}
N
nhzlx 已提交
880
#endif
N
nhzlx 已提交
881

882
AnalysisPredictor::~AnalysisPredictor() {
N
nhzlx 已提交
883
#if PADDLE_WITH_TENSORRT
N
nhzlx 已提交
884
  if (config_.tensorrt_engine_enabled() &&
N
nhzlx 已提交
885 886
      config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&
      Singleton<TRTCalibratorEngineManager>::Global().Has()) {
N
nhzlx 已提交
887 888
    SaveTrtCalibToDisk();
  }
N
nhzlx 已提交
889
#endif
890
  if (config_.with_profile_) {
891 892 893 894 895 896
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
Y
Yan Chunwei 已提交
897

898 899 900 901 902 903
#if PADDLE_WITH_MKLDNN
  if (mkldnn_quantizer_) {
    delete mkldnn_quantizer_;
    mkldnn_quantizer_ = nullptr;
  }
#endif
904 905
}

906
std::unique_ptr<PaddlePredictor> AnalysisPredictor::Clone() {
Y
Yan Chunwei 已提交
907
  std::lock_guard<std::mutex> lk(clone_mutex_);
908 909 910 911 912
  auto *x = new AnalysisPredictor(config_);
  x->Init(scope_, inference_program_);
  return std::unique_ptr<PaddlePredictor>(x);
}

913
std::string AnalysisPredictor::GetSerializedProgram() const {
Y
Yan Chunwei 已提交
914 915 916
  return inference_program_->Proto()->SerializeAsString();
}

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
bool AnalysisPredictor::CheckOperatorCompatible() {
  if (!inference_program_) {
    LOG(FATAL) << "Inference program version check failed because the program "
                  "does not exist.";
    return false;
  }
  bool res = true;
  op_compatible_map_.ReadFromProto(*inference_program_->OpCompatibleMap());
  const auto &version = framework::DumpVersion(framework::kCurProgramVersion);
  LOG(INFO) << "MODEL VERSION: "
            << framework::DumpVersion(inference_program_->Version());
  LOG(INFO) << "PREDICTOR VERSION: " << version;
  std::set<std::string> op_types;
  for (size_t i = 0; i < inference_program_->Size(); ++i) {
    const auto &block = inference_program_->Block(i);
    for (const auto *op : block.AllOps()) {
      op_types.insert(op->Type());
    }
  }
  for (const auto type : op_types) {
    auto compatible_type =
        op_compatible_map_.IsRequireMiniVersion(type, version);
    if (compatible_type != framework::OpCompatibleType::compatible) {
940 941 942 943
      if (!framework::kCurProgramVersion) {
        LOG(WARNING) << " - Version incompatible ("
                     << static_cast<int>(compatible_type) << ") " << type;
      }
944 945 946 947 948 949
      res = false;
    }
  }
  return res;
}

950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
// Add SaveOptimModel
void AnalysisPredictor::SaveOptimModel(const std::string &dir) {
  // save model
  std::string model_name = dir + "/model";
  std::ofstream outfile;
  outfile.open(model_name, std::ios::out | std::ios::binary);
  std::string inference_prog_desc = GetSerializedProgram();
  outfile << inference_prog_desc;
  // save params
  framework::ProgramDesc save_program;
  auto *save_block = save_program.MutableBlock(0);

  const framework::ProgramDesc &main_program = program();
  const framework::BlockDesc &global_block = main_program.Block(0);
  std::vector<std::string> save_var_list;
  for (framework::VarDesc *var : global_block.AllVars()) {
    if (IsPersistable(var)) {
      framework::VarDesc *new_var = save_block->Var(var->Name());
      new_var->SetShape(var->GetShape());
      new_var->SetDataType(var->GetDataType());
      new_var->SetType(var->GetType());
      new_var->SetLoDLevel(var->GetLoDLevel());
      new_var->SetPersistable(true);

      save_var_list.push_back(new_var->Name());
    }
  }
  std::sort(save_var_list.begin(), save_var_list.end());
  auto *op = save_block->AppendOp();
  op->SetType("save_combine");
  op->SetInput("X", save_var_list);
  op->SetAttr("file_path", dir + "/params");
  op->CheckAttrs();

  platform::CPUPlace place;
  framework::Executor exe(place);
  exe.Run(save_program, scope(), 0, true, true);
}

Y
Yan Chunwei 已提交
989
template <>
990 991 992 993
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
    const AnalysisConfig &config) {
  return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
      config);
Y
Yan Chunwei 已提交
994 995
}

996
}  // namespace paddle
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018

#if PADDLE_WITH_TENSORRT
USE_TRT_CONVERTER(elementwise_add_weight);
USE_TRT_CONVERTER(elementwise_add_tensor);
USE_TRT_CONVERTER(elementwise_sub_tensor);
USE_TRT_CONVERTER(elementwise_div_tensor);
USE_TRT_CONVERTER(elementwise_mul_tensor);
USE_TRT_CONVERTER(elementwise_max_tensor);
USE_TRT_CONVERTER(elementwise_min_tensor);
USE_TRT_CONVERTER(elementwise_pow_tensor);
USE_TRT_CONVERTER(mul);
USE_TRT_CONVERTER(conv2d);
USE_TRT_CONVERTER(relu);
USE_TRT_CONVERTER(sigmoid);
USE_TRT_CONVERTER(tanh);
USE_TRT_CONVERTER(fc);
USE_TRT_CONVERTER(pool2d);
USE_TRT_CONVERTER(softmax);
USE_TRT_CONVERTER(batch_norm);
USE_TRT_CONVERTER(concat);
USE_TRT_CONVERTER(dropout);
USE_TRT_CONVERTER(pad);
1019 1020
USE_TRT_CONVERTER(hard_sigmoid);
USE_TRT_CONVERTER(hard_swish);
1021
USE_TRT_CONVERTER(split);
1022 1023
USE_TRT_CONVERTER(prelu);
USE_TRT_CONVERTER(conv2d_transpose);
H
hjchen2 已提交
1024
USE_TRT_CONVERTER(leaky_relu);
1025 1026
USE_TRT_CONVERTER(shuffle_channel);
USE_TRT_CONVERTER(swish);
1027
USE_TRT_CONVERTER(instance_norm);
P
Pei Yang 已提交
1028 1029 1030
USE_TRT_CONVERTER(layer_norm);
USE_TRT_CONVERTER(gelu);
USE_TRT_CONVERTER(multihead_matmul);
1031 1032
USE_TRT_CONVERTER(fused_embedding_eltwise_layernorm);
USE_TRT_CONVERTER(skip_layernorm);
1033
USE_TRT_CONVERTER(slice);
1034
USE_TRT_CONVERTER(scale);
1035
#endif