op_params.h 31.7 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
16
#include <memory>
Y
Yan Chunwei 已提交
17
#include <string>
18
#include <utility>
Y
Yan Chunwei 已提交
19
#include <vector>
20
#include "lite/api/paddle_place.h"
Y
Yan Chunwei 已提交
21 22
#include "lite/core/scope.h"
#include "lite/core/tensor.h"
23
#include "lite/core/types.h"
Y
Yan Chunwei 已提交
24 25 26
#include "lite/model_parser/cpp/block_desc.h"
#include "lite/model_parser/desc_apis.h"
#include "lite/utils/all.h"
27
#include "lite/utils/variant.h"
Y
Yan Chunwei 已提交
28 29 30 31 32 33 34 35
/*
 * This file contains all the argument parameter data structure for operators.
 */

namespace paddle {
namespace lite {
namespace operators {

36 37 38 39 40 41 42 43 44 45
struct ParamBase {
 public:
  const std::vector<Tensor*>* input_tensor_ptrs() const { return nullptr; }
  std::vector<Tensor*>* output_tensor_ptrs() { return nullptr; }

 protected:
  std::shared_ptr<std::vector<const Tensor*>> input_tensor_ptrs_cache_{nullptr};
  std::shared_ptr<std::vector<Tensor*>> output_tensor_ptrs_cache_{nullptr};
};

Y
Yan Chunwei 已提交
46 47 48 49 50
using param_t = Any;
#define WITH_INT8_CONFIG             \
  bool enable_int8{false};           \
  float input_scale{1.0};            \
  std::vector<float> weight_scale{}; \
51 52
  float output_scale{1.0};           \
  int bit_length{8};
Y
Yan Chunwei 已提交
53 54

/// ----------------------- Functional operators ------------------------------
55
struct FeedParam : ParamBase {
Y
Yan Chunwei 已提交
56 57 58 59 60
  std::vector<lite::Tensor>* feed_list{};
  lite::Tensor* out{};
  int col;
};

61
struct FetchParam : ParamBase {
Y
Yan Chunwei 已提交
62 63 64 65 66 67
  const lite::Tensor* input{};
  std::vector<lite::Tensor>* fetch_list{};
  int col;
};

// Helper op for lite framework
68
struct IoCopyParam : ParamBase {
Y
Yan Chunwei 已提交
69 70
  const lite::Tensor* x{};
  lite::Tensor* y{};
71
  int process_type{0};
Y
Yan Chunwei 已提交
72 73
};

74
struct LayoutParam : ParamBase {
Y
Yan Chunwei 已提交
75 76
  const lite::Tensor* x{};
  lite::Tensor* y{};
77
  int process_type{0};
Y
Yan Chunwei 已提交
78 79
};

80
struct CalibParam : ParamBase {
Y
Yan Chunwei 已提交
81 82 83 84 85
  const lite::Tensor* input{};
  lite::Tensor* output{};
  float scale;
};

86
struct SubgraphParam : ParamBase {
87 88 89 90 91 92 93
  std::vector<std::string> input_names{};
  std::vector<std::string> output_names{};
  std::vector<std::string> input_data_names{};
  std::vector<std::string> output_data_names{};
  int sub_block_idx{-1};
  cpp::BlockDesc* sub_block_desc{nullptr};
  Scope* scope{nullptr};
Y
Yan Chunwei 已提交
94 95 96 97
};

/// -------------------------- NN operators ------------------------------------

98
struct FcParam : ParamBase {
Y
Yan Chunwei 已提交
99 100 101 102 103 104
  lite::Tensor* input{nullptr};
  lite::Tensor* w{nullptr};
  lite::Tensor* bias{nullptr};
  lite::Tensor* output{nullptr};
  lite::DDim in_mat_dims;
  int in_num_col_dims{1};
105
  std::string activation_type{""};
106
  bool padding_weights{false};
Y
Yan Chunwei 已提交
107 108
  // for int8
  WITH_INT8_CONFIG
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
  ///////////////////////////////////////////////////////////////////////////////////
  // get a vector of input tensors
  const std::vector<const Tensor*>* input_tensor_ptrs() {
    if (UNLIKELY(input_tensor_ptrs_cache_)) {
      input_tensor_ptrs_cache_.reset(new std::vector<const Tensor*>({input}));
    }
    return input_tensor_ptrs_cache_.get();
  }
  // get a vector of output tensors
  const std::vector<Tensor*>* output_tensor_ptrs() {
    if (UNLIKELY(output_tensor_ptrs_cache_)) {
      output_tensor_ptrs_cache_.reset(new std::vector<lite::Tensor*>({output}));
    }
    return output_tensor_ptrs_cache_.get();
  }
};

struct SearchSeqFcParam : ParamBase {
127 128 129 130 131 132 133
  lite::Tensor* x{nullptr};
  lite::Tensor* w{nullptr};
  lite::Tensor* b{nullptr};
  lite::Tensor* out{nullptr};
  int out_size;
};

Y
Yan Chunwei 已提交
134
// For Interpolate Op
135
struct InterpolateParam : ParamBase {
Y
Yan Chunwei 已提交
136 137 138
  lite::Tensor* X{};
  lite::Tensor* OutSize{};
  lite::Tensor* Out{};
L
liu zhengxi 已提交
139
  std::vector<const lite::Tensor*> SizeTensor;
140
  lite::Tensor* Scale{};
Y
Yan Chunwei 已提交
141 142 143 144 145

  float scale{0.f};
  int out_h{-1};
  int out_w{-1};
  bool align_corners{true};
146
  int align_mode{1};
Y
Yan Chunwei 已提交
147
  std::string interp_method{"Nearest"};
L
liu zhengxi 已提交
148
  DataLayoutType data_layout{DATALAYOUT(kNCHW)};
Y
Yan Chunwei 已提交
149 150 151
};

// For Mul Op
152
struct MulParam : ParamBase {
Y
Yan Chunwei 已提交
153 154 155 156 157 158 159 160 161 162
  const lite::Tensor* x{};
  const lite::Tensor* y{};
  lite::Tensor* output{};

  int x_num_col_dims{1};
  int y_num_col_dims{1};
  // for int8
  WITH_INT8_CONFIG
};

163
struct MulGradParam : ParamBase {
Y
Yan Chunwei 已提交
164 165 166 167 168 169 170 171 172 173
  const lite::Tensor* x{};
  const lite::Tensor* y{};
  const lite::Tensor* output_grad{};
  lite::Tensor* x_grad{};
  lite::Tensor* y_grad{};

  int x_num_col_dims{1};
  int y_num_col_dims{1};
};

174
// For ReduceMean Op
175
struct ReduceMeanParam : ParamBase {
176 177 178 179 180 181 182 183
  lite::Tensor* X{};
  lite::Tensor* Out{};

  std::vector<int> dim;
  bool keep_dim{false};
};

// For Stack Op
184
struct StackParam : ParamBase {
185 186 187 188 189 190
  std::vector<lite::Tensor*> X;
  lite::Tensor* Out{};

  int axis{0};
};

Y
Yan Chunwei 已提交
191
// For Power Op
192
struct PowerParam : ParamBase {
Y
Yan Chunwei 已提交
193 194 195 196 197 198 199 200
  const lite::Tensor* X{};
  lite::Tensor* Out{};

  float scale{};
  float shift{};
  float power{};
};

201
struct ShuffleChannelParam : ParamBase {
Y
Yan Chunwei 已提交
202 203 204 205 206 207 208
  const lite::Tensor* X{};
  lite::Tensor* Out{};

  int group;
};

// For Yolobox
209
struct YoloBoxParam : ParamBase {
Y
Yan Chunwei 已提交
210 211 212 213 214 215 216 217 218 219 220 221
  lite::Tensor* X{};
  lite::Tensor* ImgSize{};
  lite::Tensor* Boxes{};
  lite::Tensor* Scores{};

  std::vector<int> anchors{};
  int class_num{0};
  float conf_thresh{0.f};
  int downsample_ratio{0};
};

// For Scale Op
222
struct ScaleParam : ParamBase {
Y
Yan Chunwei 已提交
223 224 225 226 227 228 229 230 231
  lite::Tensor* x{};
  lite::Tensor* output{};

  float scale{1.};
  float bias{};
  bool bias_after_scale{true};
};

// For Softmax op
232
struct SoftmaxParam : ParamBase {
Y
Yan Chunwei 已提交
233 234 235
  lite::Tensor* x{};
  lite::Tensor* output{};
  int axis{-1};
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
  ///////////////////////////////////////////////////////////////////////////////////
  // get a vector of input tensors
  const std::vector<const Tensor*>* input_tensor_ptrs() {
    if (UNLIKELY(input_tensor_ptrs_cache_)) {
      input_tensor_ptrs_cache_.reset(new std::vector<const Tensor*>({x}));
    }
    return input_tensor_ptrs_cache_.get();
  }
  // get a vector of output tensors
  const std::vector<Tensor*>* output_tensor_ptrs() {
    if (UNLIKELY(output_tensor_ptrs_cache_)) {
      output_tensor_ptrs_cache_.reset(new std::vector<lite::Tensor*>({output}));
    }
    return output_tensor_ptrs_cache_.get();
  }
Y
Yan Chunwei 已提交
251 252 253
};

// For Reshape and Reshape2 Op
254
struct ReshapeParam : ParamBase {
Y
Yan Chunwei 已提交
255
  const lite::Tensor* x{};
256 257 258
  std::vector<const lite::Tensor*> shape_tensor_vct{};
  const lite::Tensor* shape_tensor{};
  std::vector<int> shape_vct{};
Y
Yan Chunwei 已提交
259 260
  lite::Tensor* output{};

261
  lite::Tensor* xshape{};
Y
Yan Chunwei 已提交
262 263 264 265
  bool inplace{false};
};

// For Concat op
266
struct ConcatParam : ParamBase {
Y
Yan Chunwei 已提交
267 268 269
  std::vector<lite::Tensor*> x{};
  lite::Tensor* output{};
  int axis{0};
270
  lite::Tensor* axis_tensor{};
Y
Yan Chunwei 已提交
271 272
};

273
/// ----------------------- activation operators ----------------------
274
struct ActivationParam : ParamBase {
275 276 277 278 279 280 281
  const lite::Tensor* X{};
  float Leaky_relu_alpha{0};   // leaky_relu param
  float Relu_clipped_coef{6};  // relu_clipped param
  std::string Prelu_mode{
      "channel"};  // prelu param, can be "all", "channel" or "element"
  lite::Tensor* Prelu_alpha{};  // prelu param
  float Swish_beta;             // swish param
282 283
  float hard_sigmoid_slope{0.2};
  float hard_sigmoid_offset{0.5};
284 285 286 287 288
  lite::Tensor* Out{};
  bool has_active{false};
  lite_api::ActivationType active_type;
};

289
struct ActivationGradParam : ParamBase {
290 291 292 293 294 295 296
  const lite::Tensor* X{};
  const lite::Tensor* Out{};
  // for backward
  lite::Tensor* X_grad{};
  const lite::Tensor* Out_grad{};
};

Y
Yan Chunwei 已提交
297
// For Convolution op
298
struct ConvParam : ParamBase {
Y
Yan Chunwei 已提交
299 300 301 302 303 304
  lite::Tensor* x{};
  lite::Tensor* filter{};
  lite::Tensor* bias{nullptr};
  lite::Tensor* residualData{nullptr};
  lite::Tensor* output{};
  std::vector<int> strides{1, 1};
H
HappyAngel 已提交
305 306 307 308 309 310
  /* paddings type change
  * from std::vector<int> to std::shared_ptr<std::vector<int>>
  * to support dynamically modify padding
  * let kernel param and operator param Synchronous update
  */
  std::shared_ptr<std::vector<int>> paddings;
Y
Yan Chunwei 已提交
311
  int groups{1};
H
HappyAngel 已提交
312 313 314 315 316 317
  /* dilations type change
  * from std::vector<int> to std::shared_ptr<std::vector<int>>
  * to support dynamically modify padding
  * let kernel param and operator param Synchronous update
  */
  std::shared_ptr<std::vector<int>> dilations;
Y
Yan Chunwei 已提交
318 319 320 321 322 323 324 325 326 327 328 329
  bool fuse_relu_before_depthwise_conv{false};
  bool use_mkldnn{false};
  bool fuse_relu{false};  // only used in mkldnn kernel
  bool use_quantizer{
      false};  // set true for op that should be quantized, only used for cpu
  bool fuse_residual_connection{false};
  float scale_in{1.0f};           // only used with mkl-dnn int8
  float scale_out{1.0f};          // only used with mkl-dnn int8
  float scale_in_eltwise{1.0f};   // only used with mkl-dnn int8
  float scale_weights{1.0f};      // only used with mkl-dnn int8
  bool force_fp32_output{false};  // only used in mkl-dnn int8
  std::string data_format{"Anylayout"};
330 331
  // for activation
  ActivationParam activation_param;
W
Wilber 已提交
332 333
  // support var_length or not
  bool var_length{false};
334 335
  // only used in conv_transpose.
  std::vector<int> output_size;
Y
Yan Chunwei 已提交
336 337
  // for int8
  WITH_INT8_CONFIG
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353

  ///////////////////////////////////////////////////////////////////////////////////
  // get a vector of input tensors
  const std::vector<const Tensor*>* input_tensor_ptrs() {
    if (UNLIKELY(input_tensor_ptrs_cache_)) {
      input_tensor_ptrs_cache_.reset(new std::vector<const Tensor*>({x}));
    }
    return input_tensor_ptrs_cache_.get();
  }
  // get a vector of output tensors
  const std::vector<Tensor*>* output_tensor_ptrs() {
    if (UNLIKELY(output_tensor_ptrs_cache_)) {
      output_tensor_ptrs_cache_.reset(new std::vector<lite::Tensor*>({output}));
    }
    return output_tensor_ptrs_cache_.get();
  }
Y
Yan Chunwei 已提交
354 355 356
};

// For BatchNorm op
357
struct BatchNormParam : ParamBase {
Y
Yan Chunwei 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
  lite::Tensor* x{};
  lite::Tensor* bias{};
  lite::Tensor* scale{};
  lite::Tensor* mean{};
  lite::Tensor* variance{};
  lite::Tensor* y{};
  lite::Tensor* mean_out{};
  lite::Tensor* variance_out{};
  lite::Tensor* saved_mean{};
  lite::Tensor* saved_variance{};
  bool is_test{true};
  bool use_global_stats{false};
  float epsilon;
  float momentum;
  DataLayoutType data_layout{DATALAYOUT(kNCHW)};
};

// For Pooling op
376
struct PoolParam : ParamBase {
Y
Yan Chunwei 已提交
377 378 379 380 381 382 383
  lite::Tensor* x{};
  lite::Tensor* output{};
  std::string pooling_type{""};
  std::vector<int> ksize{};
  bool global_pooling{
      false};  // if true, knernel size and paddings will be ignored
  std::vector<int> strides{1, 1};
384 385 386 387 388 389
  /* paddings type change
  * from std::vector<int> to std::shared_ptr<std::vector<int>>
  * to support dynamically modify padding
  * let kernel param and operator param Synchronous update
  */
  std::shared_ptr<std::vector<int>> paddings;
Y
Yan Chunwei 已提交
390 391 392 393 394
  bool exclusive{true};
  bool adaptive{false};
  bool ceil_mode{false};
  bool use_quantizer{false};
  std::string data_format{"AnyLayout"};
J
juncaipeng 已提交
395 396
  // for int8
  WITH_INT8_CONFIG
Y
Yan Chunwei 已提交
397 398 399
};

// For Dropout op
400
struct DropoutParam : ParamBase {
Y
Yan Chunwei 已提交
401 402 403 404 405 406 407 408 409 410 411
  const lite::Tensor* x{};
  lite::Tensor* output{};
  lite::Tensor* mask{};
  float dropout_prob{.5f};
  bool is_test{false};
  bool fix_seed{false};
  int seed{0};
  std::string dropout_implementation{"downgrade_in_infer"};
};

// For Split op
412
struct SplitParam : ParamBase {
Y
Yan Chunwei 已提交
413 414
  lite::Tensor* x{};
  std::vector<lite::Tensor*> output{};
415 416 417
  lite::Tensor* axis_tensor;
  std::vector<lite::Tensor*> sections_tensor_list{};

Y
Yan Chunwei 已提交
418 419 420 421 422 423
  int axis{-1};
  int num{0};
  std::vector<int> sections;
};

// For Transpose op
424
struct TransposeParam : ParamBase {
Y
Yan Chunwei 已提交
425 426
  const lite::Tensor* x{};
  lite::Tensor* output{};
427 428
  lite::Tensor* xshape{};

Y
Yan Chunwei 已提交
429 430 431 432 433 434
  std::vector<int> axis;
  bool use_mkldnn{false};
  std::string data_format{"AnyLayout"};
};

/// ----------------------- element wise operators ----------------------
435
struct ElementwiseParam : ParamBase {
Y
Yan Chunwei 已提交
436 437 438 439
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  int axis{-1};  // for broadcasting.
J
juncaipeng 已提交
440
  // for int8
Z
Zhaolong Xing 已提交
441
  WITH_INT8_CONFIG
J
juncaipeng 已提交
442 443
  float x_input_scale{1.0};
  float y_input_scale{1.0};
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
  ///////////////////////////////////////////////////////////////////////////////////
  // get a vector of input tensors
  const std::vector<const Tensor*>* input_tensor_ptrs() {
    if (UNLIKELY(input_tensor_ptrs_cache_)) {
      input_tensor_ptrs_cache_.reset(new std::vector<const Tensor*>({X, Y}));
    }
    return input_tensor_ptrs_cache_.get();
  }
  // get a vector of output tensors
  const std::vector<Tensor*>* output_tensor_ptrs() {
    if (UNLIKELY(output_tensor_ptrs_cache_)) {
      output_tensor_ptrs_cache_.reset(new std::vector<lite::Tensor*>({Out}));
    }
    return output_tensor_ptrs_cache_.get();
  }
};

struct ElementwiseGradParam : ParamBase {
X
xiaogang 已提交
462
  const lite::Tensor* X{};
Y
Yan Chunwei 已提交
463
  const lite::Tensor* Y{};
X
xiaogang 已提交
464 465 466
  const lite::Tensor* OutGrad{};
  lite::Tensor* XGrad{};
  lite::Tensor* YGrad{};
Y
Yan Chunwei 已提交
467 468 469 470 471 472 473 474 475 476 477 478
  int axis{-1};  // for broadcasting.
};

struct FusionElementwiseActivationParam : public ElementwiseParam {
  std::string act_type;
};

struct FusionElementwiseActivationGradParam : public ElementwiseGradParam {
  std::string act_type;
};

/// ----------------------- mean operators ----------------------
479
struct MeanParam : ParamBase {
Y
Yan Chunwei 已提交
480 481 482 483
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

484
struct MeanGradParam : ParamBase {
Y
Yan Chunwei 已提交
485 486 487 488 489 490 491
  const lite::Tensor* X{};
  const lite::Tensor* Out_grad{};
  // for backward
  lite::Tensor* X_grad{};
};

/// ----------------------- fill_constant operators ----------------------
492
struct FillConstantParam : ParamBase {
Y
Yan Chunwei 已提交
493 494
  int dtype{static_cast<int>(VarDescAPI::VarDataType::FP32)};
  std::vector<int64_t> shape{};
495
  lite::Tensor* shape_tensor{nullptr};
496 497
  std::vector<lite::Tensor*> shape_tensor_list{};

T
TianXiaogang 已提交
498 499 500 501 502
  float value{0.0f};
  // useless for x86, keep it for compatibility
  bool force_cpu{false};
  lite::Tensor* out{};
};
Y
Yan Chunwei 已提交
503

504
struct FillConstantBatchSizeLikeParam : ParamBase {
505 506
  const lite::Tensor* input{nullptr};
  lite::Tensor* out{nullptr};
507

508
  std::vector<int> shape{};
509 510 511 512
  int input_dim_idx{0};
  int output_dim_idx{0};
  int dtype{static_cast<int>(VarDescAPI::VarDataType::FP32)};
  float value{0.0f};
513 514
  // useless for x86, keep it for compatibility
  bool force_cpu{false};
515 516
};

Y
Yan Chunwei 已提交
517
//
518
struct FakeQuantizeMovingAvgMaxAbsParam : ParamBase {
Y
Yan Chunwei 已提交
519 520 521 522 523 524 525 526 527 528 529 530 531
  const lite::Tensor* x{};
  const lite::Tensor* in_scale{};
  const lite::Tensor* in_accum{};
  const lite::Tensor* in_state{};
  lite::Tensor* out{};
  lite::Tensor* out_scale{};
  lite::Tensor* out_state{};
  lite::Tensor* out_accum{};
  int bit_length;
  bool is_test{true};
  float moving_rate{0.9};
};

532
struct FakeDequantizeMaxAbsParam : ParamBase {
Y
Yan Chunwei 已提交
533 534 535 536 537 538
  const lite::Tensor* x{};
  const lite::Tensor* in_scale{};
  lite::Tensor* out{};
  float max_range;
};

539
struct FakeChannelWiseDequantizeMaxAbsParam : ParamBase {
540 541 542 543 544 545
  const lite::Tensor* x{};
  std::vector<const lite::Tensor*> scale_tensors{};
  lite::Tensor* out{};
  std::vector<int> quant_bits;
};

Y
Yan Chunwei 已提交
546
/// ----------------------- sgd operators ----------------------
547
struct SGDParam : ParamBase {
Y
Yan Chunwei 已提交
548 549 550 551 552 553 554 555 556
  int dtype{static_cast<int>(VarDescAPI::VarDataType::FP32)};

  const lite::Tensor* Param{};
  const lite::Tensor* LearningRate{};
  const lite::Tensor* Grad{};
  lite::Tensor* ParamOut{};
};

/// ----------------------- uniform_random operators ----------------------
557
struct UniformRandomParam : ParamBase {
Y
Yan Chunwei 已提交
558 559 560 561 562 563 564 565
  std::vector<int64_t> shape{};
  float min{-1.0f};
  float max{1.0f};
  int seed{0};
  int dtype{static_cast<int>(VarDescAPI::VarDataType::FP32)};
  lite::Tensor* Out{};
};
/// ----------------------- negative operators --------------
566
struct NegativeParam : ParamBase {
Y
Yan Chunwei 已提交
567 568 569 570
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};
/// ----------------------- pad2d operators ----------------------
571
struct Pad2dParam : ParamBase {
Y
Yan Chunwei 已提交
572 573 574 575 576 577 578 579 580
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  std::vector<int> paddings{0, 0, 0, 0};
  std::string mode{"constant"};
  float pad_value = 0.f;
  std::string data_format{"NCHW"};
};

/// ----------------------- Crop operators ----------------------
581
struct CropParam : ParamBase {
Y
Yan Chunwei 已提交
582 583 584 585 586 587 588
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  std::vector<int> offsets;
  std::vector<int> shape;
};

///----------------------- argmax operators ----------------------
589
struct ArgmaxParam : ParamBase {
Y
Yan Chunwei 已提交
590 591 592 593 594 595
  lite::Tensor* X{};
  lite::Tensor* Out{};
  int Axis{0};
};

///----------------------- axpy operators ----------------------
596
struct AxpyParam : ParamBase {
Y
Yan Chunwei 已提交
597 598 599 600 601 602
  lite::Tensor* Scale{};
  lite::Tensor* X{};
  lite::Tensor* Bias{};
  lite::Tensor* Out{};
};
/// ----------------------- GRU unit operators ----------------------f
603
struct GRUUnitParam : ParamBase {
Y
Yan Chunwei 已提交
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
  enum ActType { identity, sigmoid, tanh, relu };
  const lite::Tensor* input{nullptr};
  const lite::Tensor* hidden_prev{nullptr};
  const lite::Tensor* weight{nullptr};
  const lite::Tensor* bias{nullptr};
  lite::Tensor* gate{nullptr};
  lite::Tensor* reset_hidden_prev{nullptr};
  lite::Tensor* hidden{nullptr};

  int gate_activation{ActType::sigmoid};
  int activation{ActType::tanh};
  bool origin_mode{false};
};

/// ------------------------------ lrn operators ------------------------------
619
struct LrnParam : ParamBase {
Y
Yan Chunwei 已提交
620 621
  const lite::Tensor* X{};
  lite::Tensor* Out{};
622 623
  int n{5};
  float alpha{1e-4};
Y
Yan Chunwei 已提交
624 625 626 627 628 629
  float beta{0.75};
  float k{1.};
  std::string norm_region{"AcrossChannels"};
};

/// ----------------------- decode_bboxes operators ----------------------
630
struct DecodeBboxesParam : ParamBase {
Y
Yan Chunwei 已提交
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
  const lite::Tensor* loc_data{};
  const lite::Tensor* prior_data{};
  lite::Tensor* bbox_data{};

  int batch_num;
  int num_priors;
  int num_loc_classes{0};
  int background_label_id{0};
  bool share_location{true};
  bool variance_encoded_in_target;
  // code_type:  corner, cente_size, corner_size
  std::string code_type;
};

/// ----------------------- box_coder operators ----------------------
646
struct BoxCoderParam : ParamBase {
Y
Yan Chunwei 已提交
647 648 649 650 651
  const lite::Tensor* prior_box{};
  const lite::Tensor* prior_box_var{};
  const lite::Tensor* target_box{};
  lite::Tensor* proposals{};
  // code_type: encode_center_size and decode_center_size
652 653 654 655
  std::string code_type{"encode_center_size"};
  bool box_normalized{true};
  int axis{0};
  std::vector<float> variance{};
Y
Yan Chunwei 已提交
656 657 658
};

/// ----------------------- multiclass_nms operators ----------------------
659
struct MulticlassNmsParam : ParamBase {
660 661 662
  const lite::Tensor* bboxes{};
  const lite::Tensor* scores{};
  lite::Tensor* out{};
663
  lite::Tensor* index{};
664 665 666 667 668
  int background_label{0};
  float score_threshold{};
  int nms_top_k{};
  float nms_threshold{0.3};
  float nms_eta{1.0};
Y
Yan Chunwei 已提交
669
  int keep_top_k;
670
  bool normalized{true};
Y
Yan Chunwei 已提交
671 672 673
};

/// ----------------------- priorbox operators ----------------------
674
struct PriorBoxParam : ParamBase {
Y
Yan Chunwei 已提交
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
  lite::Tensor* input{};
  lite::Tensor* image{};
  lite::Tensor* boxes{};
  lite::Tensor* variances{};

  bool flip;
  bool clip;
  std::vector<float> min_sizes;
  std::vector<float> max_sizes;
  std::vector<float> aspect_ratios;
  std::vector<float> variances_;
  int img_w{0};
  int img_h{0};
  float step_w{0};
  float step_h{0};
  float offset{0.5};
  int prior_num{0};
  // priortype: prior_min, prior_max, prior_com
  std::vector<std::string> order;
694
  bool min_max_aspect_ratios_order{false};
Y
Yan Chunwei 已提交
695 696 697 698 699
};

struct DensityPriorBoxParam : public PriorBoxParam {
  std::vector<float> fixed_sizes;
  std::vector<float> fixed_ratios;
T
TianXiaogang 已提交
700
  std::vector<int> density_sizes;
Y
Yan Chunwei 已提交
701 702
};
/// ----------------------- GRU operators ----------------------f
703
struct GRUParam : ParamBase {
Y
Yan Chunwei 已提交
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
  const lite::Tensor* input{nullptr};
  const lite::Tensor* h0{nullptr};
  const lite::Tensor* weight{nullptr};
  const lite::Tensor* bias{nullptr};
  lite::Tensor* batch_gate{nullptr};
  lite::Tensor* batch_reset_hidden_prev{nullptr};
  lite::Tensor* batch_hidden{nullptr};
  lite::Tensor* hidden{nullptr};

  std::string gate_activation{"sigmoid"};
  std::string activation{"tanh"};
  bool is_reverse{false};
  bool origin_mode{false};
};

/// ----------------------- BeamSearchDecode operators ----------------------f
720
struct BeamSearchDecodeParam : ParamBase {
Y
Yan Chunwei 已提交
721 722 723 724 725 726 727 728 729
  std::vector<lite::Tensor>* ids{nullptr};
  std::vector<lite::Tensor>* scores{nullptr};
  lite::Tensor* sentence_ids{nullptr};
  lite::Tensor* sentence_scores{nullptr};
  int beam_size;
  int end_id;
};

/// ----------------------- LookupTable operators ----------------------f
730
struct LookupTableParam : ParamBase {
731 732
  const lite::Tensor* W{nullptr};
  const lite::Tensor* Ids{nullptr};
Y
Yan Chunwei 已提交
733 734 735 736
  lite::Tensor* Out{nullptr};
  int64_t padding_idx{-1};
};

737
struct LookupTableDequantParam : ParamBase {
M
mapingshuo 已提交
738 739 740 741 742 743
  lite::Tensor* W{nullptr};
  lite::Tensor* Ids{nullptr};
  lite::Tensor* Out{nullptr};
  int64_t padding_idx{-1};
};

744
struct Im2SequenceParam : ParamBase {
Y
Yan Chunwei 已提交
745 746 747 748 749 750 751 752 753
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  std::vector<int> kernels{3, 3};
  std::vector<int> strides{1, 1};
  std::vector<int> paddings{0, 0, 0, 0};
  std::vector<int> out_strides{1, 1};
};

754
struct SequenceSoftmaxParam : ParamBase {
Y
Yan Chunwei 已提交
755 756 757 758
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

759
struct NormParam : ParamBase {
Y
Yan Chunwei 已提交
760 761
  const lite::Tensor* X{};
  lite::Tensor* Out{};
762
  lite::Tensor* Norm{};
Y
Yan Chunwei 已提交
763 764 765
  int axis{1};
  float epsilon{1e-10};
};
766
struct LayerNormParam : ParamBase {
T
TianXiaogang 已提交
767 768 769 770 771 772 773 774 775
  const lite::Tensor* X{};
  const lite::Tensor* Scale{};
  const lite::Tensor* Bias{};
  lite::Tensor* Y{};
  lite::Tensor* Mean{};
  lite::Tensor* Variance{};
  int begin_norm_axis{1};
  float epsilon{1e-5};
};
Y
Yan Chunwei 已提交
776

777
struct LogicalParam : ParamBase {
Y
Yan Chunwei 已提交
778 779 780 781 782
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
};

783
struct CompareParam : ParamBase {
Y
Yan Chunwei 已提交
784 785 786 787 788 789 790
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  bool force_cpu{0};
  int axis{-1};
  lite::Tensor* Out{};
};

791
struct WhileParam : ParamBase {
Y
Yan Chunwei 已提交
792 793 794 795 796 797 798
  Scope* scope{};
  Tensor* cond{};
  cpp::BlockDesc* sub_block{};
  std::vector<Tensor*> x{};
  std::vector<Tensor*> outs{};
};

799
struct TopkParam : ParamBase {
Y
Yan Chunwei 已提交
800 801 802 803 804 805
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  lite::Tensor* Indices{};
  int K{1};
};

806
struct IncrementParam : ParamBase {
Y
Yan Chunwei 已提交
807 808 809 810 811
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  float step{1};
};

812
struct WriteToArrayParam : ParamBase {
813 814 815
  const lite::Tensor* X{nullptr};
  const lite::Tensor* I{nullptr};
  std::vector<lite::Tensor>* Out{nullptr};
Y
Yan Chunwei 已提交
816 817
};

818
struct ReadFromArrayParam : ParamBase {
819 820 821
  const std::vector<lite::Tensor>* X{nullptr};
  const lite::Tensor* I{nullptr};
  lite::Tensor* Out{nullptr};
Y
Yan Chunwei 已提交
822 823
};

824
struct BeamSearchParam : ParamBase {
Y
Yan Chunwei 已提交
825 826 827 828 829 830 831 832 833 834 835 836 837
  const lite::Tensor* pre_ids{};
  const lite::Tensor* pre_scores{};
  const lite::Tensor* ids{};
  const lite::Tensor* scores{};
  lite::Tensor* selected_ids{};
  lite::Tensor* selected_scores{};
  lite::Tensor* parent_idx{};
  int level;
  int beam_size;
  int end_id;
  bool is_accumulated;
};

838
struct SequencePoolParam : ParamBase {
Y
Yan Chunwei 已提交
839 840
  const lite::Tensor* X{};
  lite::Tensor* Out{};
841 842 843
  std::string pool_type{"AVERAGE"};
#ifdef LITE_WITH_X86
  float pad_value{0.0};
844
  lite::Tensor* MaxIndex{};
845
#endif
Y
Yan Chunwei 已提交
846 847
};

848
struct SequenceConvParam : ParamBase {
849 850 851 852 853 854 855 856
  const lite::Tensor* X{};
  const lite::Tensor* Filter{};
  lite::Tensor* Out{};
  int contextStart{0};
  int contextStride{1};
  int contextLength;
};

857
struct SequencePoolConcatParam : ParamBase {
858 859 860 861 862
  std::vector<lite::Tensor*> X{};
  lite::Tensor* Out{};
  std::vector<std::string> pool_type{};
};

863
struct SearchGroupPaddingParam : ParamBase {
864 865 866 867 868 869 870
  lite::Tensor* x{};
  lite::Tensor* out_emb_padding{};
  lite::Tensor* out_new{};
  lite::Tensor* out_padding{};
  int pad_id;
};

871
struct SequenceReshapeParam : ParamBase {
872 873 874 875 876
  lite::Tensor* x{};
  lite::Tensor* output{};
  int new_dim;
};

877
struct SequenceExpandParam : ParamBase {
Y
Yan Chunwei 已提交
878 879 880 881 882 883
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  int ref_level{-1};
};

884
struct SequenceExpandAsParam : ParamBase {
L
lhl960107 已提交
885 886 887 888 889
  const lite::Tensor* x{nullptr};
  const lite::Tensor* y{nullptr};
  lite::Tensor* out{nullptr};
};

890
struct SequenceReverseParam : ParamBase {
891 892 893 894
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

895
struct SequenceConcatParam : ParamBase {
896 897 898 899
  std::vector<lite::Tensor*> X{};
  lite::Tensor* Out{};
};

900
struct AttentionPaddingMaskParam : ParamBase {
901 902 903 904 905 906 907 908
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  int pad_id;
  float mask;
  lite::Tensor* Out{};
  lite::Tensor* pad_begin{};
};

909
struct SequenceArithmeticParam : ParamBase {
910 911 912 913 914 915
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  int op_type{1};
  lite::Tensor* Out{};
};

916
struct ReduceMaxParam : ParamBase {
Y
Yan Chunwei 已提交
917 918 919 920 921 922
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  std::vector<int> dim{};
  bool keep_dim{false};
};

923
struct LodResetParam : ParamBase {
Y
Yan Chunwei 已提交
924 925 926 927 928 929 930
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  std::vector<int> target_lod;
  bool append;
};

931
struct IsEmptyParam : ParamBase {
Y
Yan Chunwei 已提交
932 933 934
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};
935

936
struct ReduceParam : ParamBase {
937 938 939 940 941 942 943
  lite::Tensor* x{};
  lite::Tensor* output{};
  std::vector<int> dim{0};
  bool keep_dim{false};
  bool reduce_all{false};
};

944
struct VarConv2DParam : ParamBase {
945 946 947 948 949 950 951 952 953 954 955 956 957
  const lite::Tensor* X{};
  const lite::Tensor* ROW{};
  const lite::Tensor* COLUMN{};
  const lite::Tensor* W{};
  lite::Tensor* Out{};
  lite::Tensor* Col{};

  int input_channel;
  int output_channel;
  int stride_h;
  int stride_w;
  int kernel_h;
  int kernel_w;
958 959

  bool fuse_relu{false};
960 961
};

Y
Yan Chunwei 已提交
962
/// ----------------------- shape operators ----------------------
963
struct ShapeParam : ParamBase {
Y
Yan Chunwei 已提交
964 965 966 967
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

968
struct CastParam : ParamBase {
Y
Yan Chunwei 已提交
969 970 971 972 973 974
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  int out_dtype{2};
  int in_dtype{2};
};

975
struct SliceParam : ParamBase {
Y
Yan Chunwei 已提交
976 977 978 979 980 981
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  std::vector<int> axes{};
  std::vector<int> starts{};
  std::vector<int> ends{};
  std::vector<int> decrease_axis{};
982 983 984 985 986
  std::vector<int> infer_flags{};
  std::vector<lite::Tensor*> StartsTensorList{};
  std::vector<lite::Tensor*> EndsTensorList{};
  lite::Tensor* StartsTensor{nullptr};
  lite::Tensor* EndsTensor{nullptr};
Y
Yan Chunwei 已提交
987
};
Y
Yan Chunwei 已提交
988

989
struct AffineChannelParam : ParamBase {
990 991 992 993 994 995 996
  const lite::Tensor* X{};  // X is 4D tensor
  const lite::Tensor* Scale{};
  const lite::Tensor* Bias{};
  std::string data_layout{"NCHW"};  // optional string from: NHWC, NCHW.
  lite::Tensor* Out{};
};

997
struct AnchorGeneratorParam : ParamBase {
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
  const lite::Tensor* Input{};
  std::vector<float> anchor_sizes{};
  std::vector<float> aspect_ratios{};
  std::vector<float> stride{};
  std::vector<float> variances{{0.1, 0.1, 0.2, 0.2}};
  float offset{0.5};

  lite::Tensor* Anchors{};
  lite::Tensor* Variances{};
};

1009
struct GenerateProposalsParam : ParamBase {
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
  // inputs
  const lite::Tensor* Scores{};
  const lite::Tensor* BboxDeltas{};
  const lite::Tensor* ImInfo{};
  lite::Tensor* Anchors{};
  lite::Tensor* Variances{};

  // attrs
  int pre_nms_topN{6000};
  int post_nms_topN{1000};
  float nms_thresh{0.5};
  float min_size{0.1};
  float eta{1.0};

  // outputs
  lite::Tensor* RpnRois{};
  lite::Tensor* RpnRoiProbs{};
};
W
Wilber 已提交
1028
/// ----------------------- squeeze operators ----------------------
1029
struct SqueezeParam : ParamBase {
Y
Yan Chunwei 已提交
1030 1031 1032 1033 1034 1035
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  lite::Tensor* XShape{};
  std::vector<int> axes{};
};

1036
struct UnsqueezeParam : ParamBase {
1037 1038 1039 1040
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  lite::Tensor* XShape{};
  std::vector<int> axes{};
1041
  const lite::Tensor* axes_tensor{};
1042
  std::vector<const lite::Tensor*> axes_tensor_vct{};
1043 1044
};

Y
Yan Chunwei 已提交
1045
/// ----------------------- expand operators ----------------------
1046
struct ExpandParam : ParamBase {
Y
Yan Chunwei 已提交
1047 1048 1049 1050 1051 1052
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  std::vector<int> expand_times{};
};

/// ----------------------- matmul operators ----------------------
1053
struct MatMulParam : ParamBase {
Y
Yan Chunwei 已提交
1054 1055 1056 1057 1058 1059 1060
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  bool transpose_X{false};
  bool transpose_Y{false};
  float alpha{1.0f};
};
1061

1062
struct GatherParam : ParamBase {
T
TianXiaogang 已提交
1063 1064 1065 1066 1067
  const lite::Tensor* X{};
  const lite::Tensor* Index{};
  lite::Tensor* Out{};
};

1068
/// ----------------------- assign operators -----------------------
1069
struct AssignParam : ParamBase {
1070 1071 1072
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};
1073

1074
/// ----------------------- roi_align operators -----------------------
1075
struct RoiAlignParam : ParamBase {
1076 1077 1078 1079 1080 1081 1082 1083 1084
  lite::Tensor* X{};
  lite::Tensor* ROIs{};
  lite::Tensor* Out{};
  float spatial_scale{1.0};
  int pooled_height{1};
  int pooled_width{1};
  int sampling_ratio{-1};
};

1085
/// ----------------------- box_clip operators -----------------------
1086
struct BoxClipParam : ParamBase {
1087 1088 1089 1090 1091
  const lite::Tensor* Input{};
  const lite::Tensor* ImInfo{};
  lite::Tensor* Output{};
};

1092
struct RangeParam : ParamBase {
1093 1094 1095 1096 1097 1098
  const lite::Tensor* Start;
  const lite::Tensor* End;
  const lite::Tensor* Step;
  lite::Tensor* Out;
};

1099
/// ----------------------- assign_value operators -----------------------
1100
struct AssignValueParam : ParamBase {
1101 1102 1103 1104 1105 1106 1107
  std::vector<int> shape{};
  int dtype{};
  std::vector<float> fp32_values{};
  std::vector<int> int32_values{};
  lite::Tensor* Out{};
};

1108
/// --------------- sequence_topk_avg_pooling operators ------------------
1109
struct SequenceTopkAvgPoolingParam : ParamBase {
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
  const lite::Tensor* X{};
  const lite::Tensor* ROW{};
  const lite::Tensor* COLUMN{};
  lite::Tensor* Out{};
  lite::Tensor* pos{};
  int channel_num{};
  std::vector<int> topks{};
};

/// --------------- search_fc operators ------------------
1120
struct SearchFcParam : ParamBase {
1121 1122 1123 1124 1125 1126
  const lite::Tensor* X{};
  const lite::Tensor* W{};
  const lite::Tensor* b{};
  lite::Tensor* Out{};
  int out_size{};
};
J
juncaipeng 已提交
1127
/// --------------------- match_matrix_tensor operators --------------------
1128
struct MatchMatrixTensorParam : ParamBase {
J
juncaipeng 已提交
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
  const lite::Tensor* x{};
  const lite::Tensor* y{};
  const lite::Tensor* w{};
  lite::Tensor* out{};
  lite::Tensor* tmp{};

  int dim_t;
};

/// --------------------- search_seq_depadding operators --------------------
1139
struct SearchSeqDepaddingParam : ParamBase {
J
juncaipeng 已提交
1140 1141 1142 1143 1144 1145
  const lite::Tensor* pad{};
  const lite::Tensor* src{};
  lite::Tensor* out{};
};

/// --------------------- search_grnn operators --------------------
1146
struct SearchGrnnParam : ParamBase {
J
juncaipeng 已提交
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
  const lite::Tensor* x{};
  const lite::Tensor* wi{};
  const lite::Tensor* wh{};
  int num_input;
  int num_hidden;

  lite::Tensor* out{};
  lite::Tensor* tmp_buffer{};
  lite::Tensor* idx_sorted_by_width{};
  lite::Tensor* layout_input{};
};

1159
struct SplitLodTensorParam : ParamBase {
J
juncaipeng 已提交
1160 1161 1162 1163 1164 1165 1166
  const lite::Tensor* x{};
  const lite::Tensor* mask{};
  lite::Tensor* out_true{};
  lite::Tensor* out_false{};
  int level{};
};

1167
struct MergeLodTensorParam : ParamBase {
J
juncaipeng 已提交
1168 1169 1170 1171 1172 1173 1174 1175
  const lite::Tensor* x{};
  const lite::Tensor* mask{};
  const lite::Tensor* in_true{};
  const lite::Tensor* in_false{};
  lite::Tensor* out{};
  int level{};
};

1176
struct ConditionalBlockParam : ParamBase {
J
juncaipeng 已提交
1177 1178 1179 1180 1181 1182 1183 1184
  const lite::Tensor* cond{};
  std::vector<lite::Tensor*> x{};
  std::vector<lite::Tensor*> outs{};
  cpp::BlockDesc* sub_block{};
  Scope* scope{};
  bool is_scalar_condition{};
};

1185
struct CollectFpnProposalsParam : ParamBase {
J
juncaipeng 已提交
1186 1187 1188 1189 1190 1191
  std::vector<lite::Tensor*> multi_level_rois{};
  std::vector<lite::Tensor*> multi_level_scores{};
  lite::Tensor* fpn_rois{};
  int post_nms_topN{};
};

1192
struct DistributeFpnProposalsParam : ParamBase {
J
juncaipeng 已提交
1193 1194 1195 1196 1197 1198 1199 1200 1201
  const lite::Tensor* fpn_rois{};
  std::vector<lite::Tensor*> multi_fpn_rois{};
  lite::Tensor* restore_index{};
  int min_level{};
  int max_level{};
  int refer_level{};
  int refer_scale{};
};

1202
/// --------------------- instance_norm operators --------------------
1203
struct InstanceNormParam : ParamBase {
1204 1205 1206 1207 1208 1209 1210 1211
  lite::Tensor* x{};
  lite::Tensor* out{};
  lite::Tensor* bias{};
  lite::Tensor* scale{};
  lite::Tensor* saved_mean{};
  lite::Tensor* saved_variance{};
  float epsilon;
};
1212
/// --------------------- grid sampler operators --------------------
1213
struct GridSamplerParam : ParamBase {
1214 1215 1216 1217
  lite::Tensor* x{};
  lite::Tensor* out{};
  lite::Tensor* grid{};
};
1218
struct LstmParam : ParamBase {
X
xiaogang 已提交
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
  lite::Tensor* Input{};
  lite::Tensor* Weight{};
  lite::Tensor* Bias{};
  lite::Tensor* Hidden{};
  lite::Tensor* Cell{};
  lite::Tensor* BatchGate{};
  lite::Tensor* BatchCellPreAct{};
  lite::Tensor* H0{nullptr};
  lite::Tensor* C0{nullptr};
  bool use_peepholes;
  bool is_reverse;
  std::string gate_activation;
  std::string cell_activation;
  std::string candidate_activation;
};
1234

1235
struct CrfDecodingParam : ParamBase {
C
cc 已提交
1236 1237 1238 1239 1240 1241 1242
  lite::Tensor* emission{};
  lite::Tensor* transition{};
  lite::Tensor* label{};
  lite::Tensor* length{};
  lite::Tensor* viterbi_path{};
};

Y
Yan Chunwei 已提交
1243 1244 1245
}  // namespace operators
}  // namespace lite
}  // namespace paddle