op_params.h 26.8 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
16
#include <memory>
Y
Yan Chunwei 已提交
17
#include <string>
18
#include <utility>
Y
Yan Chunwei 已提交
19
#include <vector>
20
#include "lite/api/paddle_place.h"
Y
Yan Chunwei 已提交
21 22
#include "lite/core/scope.h"
#include "lite/core/tensor.h"
23
#include "lite/core/types.h"
Y
Yan Chunwei 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
#include "lite/model_parser/cpp/block_desc.h"
#include "lite/model_parser/desc_apis.h"
#include "lite/utils/all.h"
/*
 * This file contains all the argument parameter data structure for operators.
 */

namespace paddle {
namespace lite {
namespace operators {

using param_t = Any;
#define WITH_INT8_CONFIG             \
  bool enable_int8{false};           \
  float input_scale{1.0};            \
  std::vector<float> weight_scale{}; \
40 41
  float output_scale{1.0};           \
  int bit_length{8};
Y
Yan Chunwei 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64

/// ----------------------- Functional operators ------------------------------
struct FeedParam {
  std::vector<lite::Tensor>* feed_list{};
  lite::Tensor* out{};
  int col;
};

struct FetchParam {
  const lite::Tensor* input{};
  std::vector<lite::Tensor>* fetch_list{};
  int col;
};

// Helper op for lite framework
struct IoCopyParam {
  const lite::Tensor* x{};
  lite::Tensor* y{};
};

struct LayoutParam {
  const lite::Tensor* x{};
  lite::Tensor* y{};
65
  int process_type{0};
Y
Yan Chunwei 已提交
66 67 68 69 70 71 72 73
};

struct CalibParam {
  const lite::Tensor* input{};
  lite::Tensor* output{};
  float scale;
};

74 75 76 77 78 79 80 81
struct SubgraphParam {
  std::vector<std::string> input_names{};
  std::vector<std::string> output_names{};
  std::vector<std::string> input_data_names{};
  std::vector<std::string> output_data_names{};
  int sub_block_idx{-1};
  cpp::BlockDesc* sub_block_desc{nullptr};
  Scope* scope{nullptr};
Y
Yan Chunwei 已提交
82 83 84 85 86 87 88 89 90 91 92
};

/// -------------------------- NN operators ------------------------------------

struct FcParam {
  lite::Tensor* input{nullptr};
  lite::Tensor* w{nullptr};
  lite::Tensor* bias{nullptr};
  lite::Tensor* output{nullptr};
  lite::DDim in_mat_dims;
  int in_num_col_dims{1};
93
  std::string activation_type{""};
94
  bool padding_weights{false};
Y
Yan Chunwei 已提交
95 96 97 98
  // for int8
  WITH_INT8_CONFIG
};

99 100 101 102 103 104 105 106
struct SearchSeqFcParam {
  lite::Tensor* x{nullptr};
  lite::Tensor* w{nullptr};
  lite::Tensor* b{nullptr};
  lite::Tensor* out{nullptr};
  int out_size;
};

Y
Yan Chunwei 已提交
107 108 109 110 111
// For Interpolate Op
struct InterpolateParam {
  lite::Tensor* X{};
  lite::Tensor* OutSize{};
  lite::Tensor* Out{};
L
liu zhengxi 已提交
112
  std::vector<const lite::Tensor*> SizeTensor;
113
  lite::Tensor* Scale{};
Y
Yan Chunwei 已提交
114 115 116 117 118

  float scale{0.f};
  int out_h{-1};
  int out_w{-1};
  bool align_corners{true};
119
  int align_mode{1};
Y
Yan Chunwei 已提交
120
  std::string interp_method{"Nearest"};
L
liu zhengxi 已提交
121
  DataLayoutType data_layout{DATALAYOUT(kNCHW)};
Y
Yan Chunwei 已提交
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
};

// For Mul Op
struct MulParam {
  const lite::Tensor* x{};
  const lite::Tensor* y{};
  lite::Tensor* output{};

  int x_num_col_dims{1};
  int y_num_col_dims{1};
  // for int8
  WITH_INT8_CONFIG
};

struct MulGradParam {
  const lite::Tensor* x{};
  const lite::Tensor* y{};
  const lite::Tensor* output_grad{};
  lite::Tensor* x_grad{};
  lite::Tensor* y_grad{};

  int x_num_col_dims{1};
  int y_num_col_dims{1};
};

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
// For ReduceMean Op
struct ReduceMeanParam {
  lite::Tensor* X{};
  lite::Tensor* Out{};

  std::vector<int> dim;
  bool keep_dim{false};
};

// For Stack Op
struct StackParam {
  std::vector<lite::Tensor*> X;
  lite::Tensor* Out{};

  int axis{0};
};

Y
Yan Chunwei 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
// For Power Op
struct PowerParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};

  float scale{};
  float shift{};
  float power{};
};

struct ShuffleChannelParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};

  int group;
};

// For Yolobox
struct YoloBoxParam {
  lite::Tensor* X{};
  lite::Tensor* ImgSize{};
  lite::Tensor* Boxes{};
  lite::Tensor* Scores{};

  std::vector<int> anchors{};
  int class_num{0};
  float conf_thresh{0.f};
  int downsample_ratio{0};
};

// For Scale Op
struct ScaleParam {
  lite::Tensor* x{};
  lite::Tensor* output{};

  float scale{1.};
  float bias{};
  bool bias_after_scale{true};
};

// For Softmax op
struct SoftmaxParam {
  lite::Tensor* x{};
  lite::Tensor* output{};
  int axis{-1};
};

// For Reshape and Reshape2 Op
struct ReshapeParam {
  const lite::Tensor* x{};
214 215 216
  std::vector<const lite::Tensor*> shape_tensor_vct{};
  const lite::Tensor* shape_tensor{};
  std::vector<int> shape_vct{};
Y
Yan Chunwei 已提交
217 218
  lite::Tensor* output{};

219
  lite::Tensor* xshape{};
Y
Yan Chunwei 已提交
220 221 222 223 224 225 226 227
  bool inplace{false};
};

// For Concat op
struct ConcatParam {
  std::vector<lite::Tensor*> x{};
  lite::Tensor* output{};
  int axis{0};
228
  lite::Tensor* axis_tensor{};
Y
Yan Chunwei 已提交
229 230
};

231 232 233 234 235 236 237 238 239
/// ----------------------- activation operators ----------------------
struct ActivationParam {
  const lite::Tensor* X{};
  float Leaky_relu_alpha{0};   // leaky_relu param
  float Relu_clipped_coef{6};  // relu_clipped param
  std::string Prelu_mode{
      "channel"};  // prelu param, can be "all", "channel" or "element"
  lite::Tensor* Prelu_alpha{};  // prelu param
  float Swish_beta;             // swish param
240 241
  float hard_sigmoid_slope{0.2};
  float hard_sigmoid_offset{0.5};
242 243 244 245 246 247 248 249 250 251 252 253 254
  lite::Tensor* Out{};
  bool has_active{false};
  lite_api::ActivationType active_type;
};

struct ActivationGradParam {
  const lite::Tensor* X{};
  const lite::Tensor* Out{};
  // for backward
  lite::Tensor* X_grad{};
  const lite::Tensor* Out_grad{};
};

Y
Yan Chunwei 已提交
255 256 257 258 259 260 261 262
// For Convolution op
struct ConvParam {
  lite::Tensor* x{};
  lite::Tensor* filter{};
  lite::Tensor* bias{nullptr};
  lite::Tensor* residualData{nullptr};
  lite::Tensor* output{};
  std::vector<int> strides{1, 1};
H
HappyAngel 已提交
263 264 265 266 267 268
  /* paddings type change
  * from std::vector<int> to std::shared_ptr<std::vector<int>>
  * to support dynamically modify padding
  * let kernel param and operator param Synchronous update
  */
  std::shared_ptr<std::vector<int>> paddings;
Y
Yan Chunwei 已提交
269
  int groups{1};
H
HappyAngel 已提交
270 271 272 273 274 275
  /* dilations type change
  * from std::vector<int> to std::shared_ptr<std::vector<int>>
  * to support dynamically modify padding
  * let kernel param and operator param Synchronous update
  */
  std::shared_ptr<std::vector<int>> dilations;
Y
Yan Chunwei 已提交
276 277 278 279 280 281 282 283 284 285 286 287
  bool fuse_relu_before_depthwise_conv{false};
  bool use_mkldnn{false};
  bool fuse_relu{false};  // only used in mkldnn kernel
  bool use_quantizer{
      false};  // set true for op that should be quantized, only used for cpu
  bool fuse_residual_connection{false};
  float scale_in{1.0f};           // only used with mkl-dnn int8
  float scale_out{1.0f};          // only used with mkl-dnn int8
  float scale_in_eltwise{1.0f};   // only used with mkl-dnn int8
  float scale_weights{1.0f};      // only used with mkl-dnn int8
  bool force_fp32_output{false};  // only used in mkl-dnn int8
  std::string data_format{"Anylayout"};
288 289
  // for activation
  ActivationParam activation_param;
W
Wilber 已提交
290 291
  // support var_length or not
  bool var_length{false};
292 293
  // only used in conv_transpose.
  std::vector<int> output_size;
Y
Yan Chunwei 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
  // for int8
  WITH_INT8_CONFIG
};

// For BatchNorm op
struct BatchNormParam {
  lite::Tensor* x{};
  lite::Tensor* bias{};
  lite::Tensor* scale{};
  lite::Tensor* mean{};
  lite::Tensor* variance{};
  lite::Tensor* y{};
  lite::Tensor* mean_out{};
  lite::Tensor* variance_out{};
  lite::Tensor* saved_mean{};
  lite::Tensor* saved_variance{};
  bool is_test{true};
  bool use_global_stats{false};
  float epsilon;
  float momentum;
  DataLayoutType data_layout{DATALAYOUT(kNCHW)};
};

// For Pooling op
struct PoolParam {
  lite::Tensor* x{};
  lite::Tensor* output{};
  std::string pooling_type{""};
  std::vector<int> ksize{};
  bool global_pooling{
      false};  // if true, knernel size and paddings will be ignored
  std::vector<int> strides{1, 1};
326 327 328 329 330 331
  /* paddings type change
  * from std::vector<int> to std::shared_ptr<std::vector<int>>
  * to support dynamically modify padding
  * let kernel param and operator param Synchronous update
  */
  std::shared_ptr<std::vector<int>> paddings;
Y
Yan Chunwei 已提交
332 333 334 335 336
  bool exclusive{true};
  bool adaptive{false};
  bool ceil_mode{false};
  bool use_quantizer{false};
  std::string data_format{"AnyLayout"};
J
juncaipeng 已提交
337 338
  // for int8
  WITH_INT8_CONFIG
Y
Yan Chunwei 已提交
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
};

// For Dropout op
struct DropoutParam {
  const lite::Tensor* x{};
  lite::Tensor* output{};
  lite::Tensor* mask{};
  float dropout_prob{.5f};
  bool is_test{false};
  bool fix_seed{false};
  int seed{0};
  std::string dropout_implementation{"downgrade_in_infer"};
};

// For Split op
struct SplitParam {
  lite::Tensor* x{};
  std::vector<lite::Tensor*> output{};
357 358 359
  lite::Tensor* axis_tensor;
  std::vector<lite::Tensor*> sections_tensor_list{};

Y
Yan Chunwei 已提交
360 361 362 363 364 365 366 367 368
  int axis{-1};
  int num{0};
  std::vector<int> sections;
};

// For Transpose op
struct TransposeParam {
  const lite::Tensor* x{};
  lite::Tensor* output{};
369 370
  lite::Tensor* xshape{};

Y
Yan Chunwei 已提交
371 372 373 374 375 376 377 378 379 380 381
  std::vector<int> axis;
  bool use_mkldnn{false};
  std::string data_format{"AnyLayout"};
};

/// ----------------------- element wise operators ----------------------
struct ElementwiseParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  int axis{-1};  // for broadcasting.
J
juncaipeng 已提交
382
  // for int8
Z
Zhaolong Xing 已提交
383
  WITH_INT8_CONFIG
J
juncaipeng 已提交
384 385
  float x_input_scale{1.0};
  float y_input_scale{1.0};
Y
Yan Chunwei 已提交
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
};

struct ElementwiseGradParam {
  const lite::Tensor* Y{};
  const lite::Tensor* Out_grad{};
  lite::Tensor* X_grad{};
  lite::Tensor* Y_grad{};
  int axis{-1};  // for broadcasting.
};

struct FusionElementwiseActivationParam : public ElementwiseParam {
  std::string act_type;
};

struct FusionElementwiseActivationGradParam : public ElementwiseGradParam {
  std::string act_type;
};

/// ----------------------- mean operators ----------------------
struct MeanParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

struct MeanGradParam {
  const lite::Tensor* X{};
  const lite::Tensor* Out_grad{};
  // for backward
  lite::Tensor* X_grad{};
};

/// ----------------------- fill_constant operators ----------------------
struct FillConstantParam {
  int dtype{static_cast<int>(VarDescAPI::VarDataType::FP32)};
  std::vector<int64_t> shape{};
421 422 423
  lite::Tensor* shape_tensor;
  std::vector<lite::Tensor*> shape_tensor_list{};

Y
Yan Chunwei 已提交
424 425 426 427 428
  float value{0.0f};
  // useless for x86, keep it for compatibility
  bool force_cpu{false};
  lite::Tensor* Out{};
};
T
TianXiaogang 已提交
429 430 431 432 433 434 435 436 437 438 439
struct FillConstantBatchLikeParam {
  int dtype{static_cast<int>(VarDescAPI::VarDataType::FP32)};
  std::vector<int64_t> shape{};
  float value{0.0f};
  // useless for x86, keep it for compatibility
  bool force_cpu{false};
  lite::Tensor* out{};
  const lite::Tensor* input{};
  int input_dim_idx{0};
  int output_dim_idx{0};
};
Y
Yan Chunwei 已提交
440

441 442 443 444 445 446 447 448 449 450 451
struct FillConstantBatchSizeLikeParam {
  lite::Tensor* Input;
  lite::Tensor* Out;

  std::vector<int> shape;
  int input_dim_idx{0};
  int output_dim_idx{0};
  int dtype{static_cast<int>(VarDescAPI::VarDataType::FP32)};
  float value{0.0f};
};

Y
Yan Chunwei 已提交
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
//
struct FakeQuantizeMovingAvgMaxAbsParam {
  const lite::Tensor* x{};
  const lite::Tensor* in_scale{};
  const lite::Tensor* in_accum{};
  const lite::Tensor* in_state{};
  lite::Tensor* out{};
  lite::Tensor* out_scale{};
  lite::Tensor* out_state{};
  lite::Tensor* out_accum{};
  int bit_length;
  bool is_test{true};
  float moving_rate{0.9};
};

struct FakeDequantizeMaxAbsParam {
  const lite::Tensor* x{};
  const lite::Tensor* in_scale{};
  lite::Tensor* out{};
  float max_range;
};

474 475 476 477 478 479 480
struct FakeChannelWiseDequantizeMaxAbsParam {
  const lite::Tensor* x{};
  std::vector<const lite::Tensor*> scale_tensors{};
  lite::Tensor* out{};
  std::vector<int> quant_bits;
};

Y
Yan Chunwei 已提交
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
/// ----------------------- sgd operators ----------------------
struct SGDParam {
  int dtype{static_cast<int>(VarDescAPI::VarDataType::FP32)};

  const lite::Tensor* Param{};
  const lite::Tensor* LearningRate{};
  const lite::Tensor* Grad{};
  lite::Tensor* ParamOut{};
};

/// ----------------------- uniform_random operators ----------------------
struct UniformRandomParam {
  std::vector<int64_t> shape{};
  float min{-1.0f};
  float max{1.0f};
  int seed{0};
  int dtype{static_cast<int>(VarDescAPI::VarDataType::FP32)};
  lite::Tensor* Out{};
};
/// ----------------------- negative operators --------------
struct NegativeParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};
/// ----------------------- pad2d operators ----------------------
struct Pad2dParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  std::vector<int> paddings{0, 0, 0, 0};
  std::string mode{"constant"};
  float pad_value = 0.f;
  std::string data_format{"NCHW"};
};

/// ----------------------- Crop operators ----------------------
struct CropParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  std::vector<int> offsets;
  std::vector<int> shape;
};

///----------------------- argmax operators ----------------------
struct ArgmaxParam {
  lite::Tensor* X{};
  lite::Tensor* Out{};
  int Axis{0};
};

///----------------------- axpy operators ----------------------
struct AxpyParam {
  lite::Tensor* Scale{};
  lite::Tensor* X{};
  lite::Tensor* Bias{};
  lite::Tensor* Out{};
};
/// ----------------------- GRU unit operators ----------------------f
struct GRUUnitParam {
  enum ActType { identity, sigmoid, tanh, relu };
  const lite::Tensor* input{nullptr};
  const lite::Tensor* hidden_prev{nullptr};
  const lite::Tensor* weight{nullptr};
  const lite::Tensor* bias{nullptr};
  lite::Tensor* gate{nullptr};
  lite::Tensor* reset_hidden_prev{nullptr};
  lite::Tensor* hidden{nullptr};

  int gate_activation{ActType::sigmoid};
  int activation{ActType::tanh};
  bool origin_mode{false};
};

/// ------------------------------ lrn operators ------------------------------
struct LrnParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
557 558
  int n{5};
  float alpha{1e-4};
Y
Yan Chunwei 已提交
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
  float beta{0.75};
  float k{1.};
  std::string norm_region{"AcrossChannels"};
};

/// ----------------------- decode_bboxes operators ----------------------
struct DecodeBboxesParam {
  const lite::Tensor* loc_data{};
  const lite::Tensor* prior_data{};
  lite::Tensor* bbox_data{};

  int batch_num;
  int num_priors;
  int num_loc_classes{0};
  int background_label_id{0};
  bool share_location{true};
  bool variance_encoded_in_target;
  // code_type:  corner, cente_size, corner_size
  std::string code_type;
};

/// ----------------------- box_coder operators ----------------------
struct BoxCoderParam {
  const lite::Tensor* prior_box{};
  const lite::Tensor* prior_box_var{};
  const lite::Tensor* target_box{};
  lite::Tensor* proposals{};
  // code_type: encode_center_size and decode_center_size
587 588 589 590
  std::string code_type{"encode_center_size"};
  bool box_normalized{true};
  int axis{0};
  std::vector<float> variance{};
Y
Yan Chunwei 已提交
591 592 593 594
};

/// ----------------------- multiclass_nms operators ----------------------
struct MulticlassNmsParam {
595 596 597
  const lite::Tensor* bboxes{};
  const lite::Tensor* scores{};
  lite::Tensor* out{};
598
  lite::Tensor* index{};
599 600 601 602 603
  int background_label{0};
  float score_threshold{};
  int nms_top_k{};
  float nms_threshold{0.3};
  float nms_eta{1.0};
Y
Yan Chunwei 已提交
604
  int keep_top_k;
605
  bool normalized{true};
Y
Yan Chunwei 已提交
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
};

/// ----------------------- priorbox operators ----------------------
struct PriorBoxParam {
  lite::Tensor* input{};
  lite::Tensor* image{};
  lite::Tensor* boxes{};
  lite::Tensor* variances{};

  bool flip;
  bool clip;
  std::vector<float> min_sizes;
  std::vector<float> max_sizes;
  std::vector<float> aspect_ratios;
  std::vector<float> variances_;
  int img_w{0};
  int img_h{0};
  float step_w{0};
  float step_h{0};
  float offset{0.5};
  int prior_num{0};
  // priortype: prior_min, prior_max, prior_com
  std::vector<std::string> order;
629
  bool min_max_aspect_ratios_order{false};
Y
Yan Chunwei 已提交
630 631 632 633 634
};

struct DensityPriorBoxParam : public PriorBoxParam {
  std::vector<float> fixed_sizes;
  std::vector<float> fixed_ratios;
T
TianXiaogang 已提交
635
  std::vector<int> density_sizes;
Y
Yan Chunwei 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
};
/// ----------------------- GRU operators ----------------------f
struct GRUParam {
  const lite::Tensor* input{nullptr};
  const lite::Tensor* h0{nullptr};
  const lite::Tensor* weight{nullptr};
  const lite::Tensor* bias{nullptr};
  lite::Tensor* batch_gate{nullptr};
  lite::Tensor* batch_reset_hidden_prev{nullptr};
  lite::Tensor* batch_hidden{nullptr};
  lite::Tensor* hidden{nullptr};

  std::string gate_activation{"sigmoid"};
  std::string activation{"tanh"};
  bool is_reverse{false};
  bool origin_mode{false};
};

/// ----------------------- BeamSearchDecode operators ----------------------f
struct BeamSearchDecodeParam {
  std::vector<lite::Tensor>* ids{nullptr};
  std::vector<lite::Tensor>* scores{nullptr};
  lite::Tensor* sentence_ids{nullptr};
  lite::Tensor* sentence_scores{nullptr};
  int beam_size;
  int end_id;
};

/// ----------------------- LookupTable operators ----------------------f
struct LookupTableParam {
  lite::Tensor* W{nullptr};
  lite::Tensor* Ids{nullptr};
  lite::Tensor* Out{nullptr};
  int64_t padding_idx{-1};
};

struct Im2SequenceParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  std::vector<int> kernels{3, 3};
  std::vector<int> strides{1, 1};
  std::vector<int> paddings{0, 0, 0, 0};
  std::vector<int> out_strides{1, 1};
};

struct SequenceSoftmaxParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

struct NormParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
690
  lite::Tensor* Norm{};
Y
Yan Chunwei 已提交
691 692 693
  int axis{1};
  float epsilon{1e-10};
};
T
TianXiaogang 已提交
694 695 696 697 698 699 700 701 702 703
struct LayerNormParam {
  const lite::Tensor* X{};
  const lite::Tensor* Scale{};
  const lite::Tensor* Bias{};
  lite::Tensor* Y{};
  lite::Tensor* Mean{};
  lite::Tensor* Variance{};
  int begin_norm_axis{1};
  float epsilon{1e-5};
};
Y
Yan Chunwei 已提交
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768

struct LogicalParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
};

struct CompareParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  bool force_cpu{0};
  int axis{-1};
  lite::Tensor* Out{};
};

struct WhileParam {
  Scope* scope{};
  Tensor* cond{};
  cpp::BlockDesc* sub_block{};
  std::vector<Tensor*> x{};
  std::vector<Tensor*> outs{};
};

struct TopkParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  lite::Tensor* Indices{};
  int K{1};
};

struct IncrementParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  float step{1};
};

struct WriteToArrayParam {
  const lite::Tensor* X{};
  const lite::Tensor* I{};
  std::vector<lite::Tensor>* Out{};
};

struct ReadFromArrayParam {
  std::vector<lite::Tensor>* X{};
  lite::Tensor* I{};
  lite::Tensor* Out{};
};

struct BeamSearchParam {
  const lite::Tensor* pre_ids{};
  const lite::Tensor* pre_scores{};
  const lite::Tensor* ids{};
  const lite::Tensor* scores{};
  lite::Tensor* selected_ids{};
  lite::Tensor* selected_scores{};
  lite::Tensor* parent_idx{};
  int level;
  int beam_size;
  int end_id;
  bool is_accumulated;
};

struct SequencePoolParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
769 770 771
  std::string pool_type{"AVERAGE"};
#ifdef LITE_WITH_X86
  float pad_value{0.0};
772
  lite::Tensor* MaxIndex{};
773
#endif
Y
Yan Chunwei 已提交
774 775
};

776 777 778 779 780 781
struct SequencePoolConcatParam {
  std::vector<lite::Tensor*> X{};
  lite::Tensor* Out{};
  std::vector<std::string> pool_type{};
};

782 783 784 785 786 787 788 789
struct SearchGroupPaddingParam {
  lite::Tensor* x{};
  lite::Tensor* out_emb_padding{};
  lite::Tensor* out_new{};
  lite::Tensor* out_padding{};
  int pad_id;
};

790 791 792 793 794 795
struct SequenceReshapeParam {
  lite::Tensor* x{};
  lite::Tensor* output{};
  int new_dim;
};

Y
Yan Chunwei 已提交
796 797 798 799 800 801 802
struct SequenceExpandParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  int ref_level{-1};
};

L
lhl960107 已提交
803 804 805 806 807 808
struct SequenceExpandAsParam {
  const lite::Tensor* x{nullptr};
  const lite::Tensor* y{nullptr};
  lite::Tensor* out{nullptr};
};

809 810 811 812 813
struct SequenceReverseParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

814 815 816 817 818
struct SequenceConcatParam {
  std::vector<lite::Tensor*> X{};
  lite::Tensor* Out{};
};

819 820 821 822 823 824 825 826 827
struct AttentionPaddingMaskParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  int pad_id;
  float mask;
  lite::Tensor* Out{};
  lite::Tensor* pad_begin{};
};

828 829 830 831 832 833 834
struct SequenceArithmeticParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  int op_type{1};
  lite::Tensor* Out{};
};

Y
Yan Chunwei 已提交
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
struct ReduceMaxParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  std::vector<int> dim{};
  bool keep_dim{false};
};

struct LodResetParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  std::vector<int> target_lod;
  bool append;
};

struct IsEmptyParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};
854 855 856 857 858 859 860 861 862

struct ReduceParam {
  lite::Tensor* x{};
  lite::Tensor* output{};
  std::vector<int> dim{0};
  bool keep_dim{false};
  bool reduce_all{false};
};

863 864 865 866 867 868 869 870 871 872 873 874 875 876
struct VarConv2DParam {
  const lite::Tensor* X{};
  const lite::Tensor* ROW{};
  const lite::Tensor* COLUMN{};
  const lite::Tensor* W{};
  lite::Tensor* Out{};
  lite::Tensor* Col{};

  int input_channel;
  int output_channel;
  int stride_h;
  int stride_w;
  int kernel_h;
  int kernel_w;
877 878

  bool fuse_relu{false};
879 880
};

Y
Yan Chunwei 已提交
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
/// ----------------------- shape operators ----------------------
struct ShapeParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

struct CastParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  int out_dtype{2};
  int in_dtype{2};
};

struct SliceParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  std::vector<int> axes{};
  std::vector<int> starts{};
  std::vector<int> ends{};
  std::vector<int> decrease_axis{};
901 902 903 904 905
  std::vector<int> infer_flags{};
  std::vector<lite::Tensor*> StartsTensorList{};
  std::vector<lite::Tensor*> EndsTensorList{};
  lite::Tensor* StartsTensor{nullptr};
  lite::Tensor* EndsTensor{nullptr};
Y
Yan Chunwei 已提交
906
};
Y
Yan Chunwei 已提交
907

908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
struct AffineChannelParam {
  const lite::Tensor* X{};  // X is 4D tensor
  const lite::Tensor* Scale{};
  const lite::Tensor* Bias{};
  std::string data_layout{"NCHW"};  // optional string from: NHWC, NCHW.
  lite::Tensor* Out{};
};

struct AnchorGeneratorParam {
  const lite::Tensor* Input{};
  std::vector<float> anchor_sizes{};
  std::vector<float> aspect_ratios{};
  std::vector<float> stride{};
  std::vector<float> variances{{0.1, 0.1, 0.2, 0.2}};
  float offset{0.5};

  lite::Tensor* Anchors{};
  lite::Tensor* Variances{};
};

struct GenerateProposalsParam {
  // inputs
  const lite::Tensor* Scores{};
  const lite::Tensor* BboxDeltas{};
  const lite::Tensor* ImInfo{};
  lite::Tensor* Anchors{};
  lite::Tensor* Variances{};

  // attrs
  int pre_nms_topN{6000};
  int post_nms_topN{1000};
  float nms_thresh{0.5};
  float min_size{0.1};
  float eta{1.0};

  // outputs
  lite::Tensor* RpnRois{};
  lite::Tensor* RpnRoiProbs{};
};
W
Wilber 已提交
947
/// ----------------------- squeeze operators ----------------------
Y
Yan Chunwei 已提交
948 949 950 951 952 953 954
struct SqueezeParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  lite::Tensor* XShape{};
  std::vector<int> axes{};
};

955 956 957 958 959
struct UnsqueezeParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  lite::Tensor* XShape{};
  std::vector<int> axes{};
960
  const lite::Tensor* axes_tensor{};
961
  std::vector<const lite::Tensor*> axes_tensor_vct{};
962 963
};

Y
Yan Chunwei 已提交
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
/// ----------------------- expand operators ----------------------
struct ExpandParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
  std::vector<int> expand_times{};
};

/// ----------------------- matmul operators ----------------------
struct MatMulParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  bool transpose_X{false};
  bool transpose_Y{false};
  float alpha{1.0f};
};
980

T
TianXiaogang 已提交
981 982 983 984 985 986
struct GatherParam {
  const lite::Tensor* X{};
  const lite::Tensor* Index{};
  lite::Tensor* Out{};
};

987 988 989 990 991
/// ----------------------- assign operators -----------------------
struct AssignParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};
992

993
/// ----------------------- roi_align operators -----------------------
994 995 996 997 998 999 1000 1001 1002 1003
struct RoiAlignParam {
  lite::Tensor* X{};
  lite::Tensor* ROIs{};
  lite::Tensor* Out{};
  float spatial_scale{1.0};
  int pooled_height{1};
  int pooled_width{1};
  int sampling_ratio{-1};
};

1004
/// ----------------------- box_clip operators -----------------------
1005 1006 1007 1008 1009 1010
struct BoxClipParam {
  const lite::Tensor* Input{};
  const lite::Tensor* ImInfo{};
  lite::Tensor* Output{};
};

1011 1012 1013 1014 1015 1016 1017
struct RangeParam {
  const lite::Tensor* Start;
  const lite::Tensor* End;
  const lite::Tensor* Step;
  lite::Tensor* Out;
};

1018 1019 1020 1021 1022 1023 1024 1025 1026
/// ----------------------- assign_value operators -----------------------
struct AssignValueParam {
  std::vector<int> shape{};
  int dtype{};
  std::vector<float> fp32_values{};
  std::vector<int> int32_values{};
  lite::Tensor* Out{};
};

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
/// --------------- sequence_topk_avg_pooling operators ------------------
struct SequenceTopkAvgPoolingParam {
  const lite::Tensor* X{};
  const lite::Tensor* ROW{};
  const lite::Tensor* COLUMN{};
  lite::Tensor* Out{};
  lite::Tensor* pos{};
  int channel_num{};
  std::vector<int> topks{};
};

/// --------------- search_fc operators ------------------
struct SearchFcParam {
  const lite::Tensor* X{};
  const lite::Tensor* W{};
  const lite::Tensor* b{};
  lite::Tensor* Out{};
  int out_size{};
};
J
juncaipeng 已提交
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
/// --------------------- match_matrix_tensor operators --------------------
struct MatchMatrixTensorParam {
  const lite::Tensor* x{};
  const lite::Tensor* y{};
  const lite::Tensor* w{};
  lite::Tensor* out{};
  lite::Tensor* tmp{};

  int dim_t;
};

/// --------------------- search_seq_depadding operators --------------------
struct SearchSeqDepaddingParam {
  const lite::Tensor* pad{};
  const lite::Tensor* src{};
  lite::Tensor* out{};
};

/// --------------------- search_grnn operators --------------------
struct SearchGrnnParam {
  const lite::Tensor* x{};
  const lite::Tensor* wi{};
  const lite::Tensor* wh{};
  int num_input;
  int num_hidden;

  lite::Tensor* out{};
  lite::Tensor* tmp_buffer{};
  lite::Tensor* idx_sorted_by_width{};
  lite::Tensor* layout_input{};
};

J
juncaipeng 已提交
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
struct SplitLodTensorParam {
  const lite::Tensor* x{};
  const lite::Tensor* mask{};
  lite::Tensor* out_true{};
  lite::Tensor* out_false{};
  int level{};
};

struct MergeLodTensorParam {
  const lite::Tensor* x{};
  const lite::Tensor* mask{};
  const lite::Tensor* in_true{};
  const lite::Tensor* in_false{};
  lite::Tensor* out{};
  int level{};
};

struct ConditionalBlockParam {
  const lite::Tensor* cond{};
  std::vector<lite::Tensor*> x{};
  std::vector<lite::Tensor*> outs{};
  cpp::BlockDesc* sub_block{};
  Scope* scope{};
  bool is_scalar_condition{};
};

struct CollectFpnProposalsParam {
  std::vector<lite::Tensor*> multi_level_rois{};
  std::vector<lite::Tensor*> multi_level_scores{};
  lite::Tensor* fpn_rois{};
  int post_nms_topN{};
};

J
juncaipeng 已提交
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
struct DistributeFpnProposalsParam {
  const lite::Tensor* fpn_rois{};
  std::vector<lite::Tensor*> multi_fpn_rois{};
  lite::Tensor* restore_index{};
  int min_level{};
  int max_level{};
  int refer_level{};
  int refer_scale{};
};

1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
/// --------------------- instance_norm operators --------------------
struct InstanceNormParam {
  lite::Tensor* x{};
  lite::Tensor* out{};
  lite::Tensor* bias{};
  lite::Tensor* scale{};
  lite::Tensor* saved_mean{};
  lite::Tensor* saved_variance{};
  float epsilon;
};
1131 1132 1133 1134 1135 1136
/// --------------------- grid sampler operators --------------------
struct GridSamplerParam {
  lite::Tensor* x{};
  lite::Tensor* out{};
  lite::Tensor* grid{};
};
1137

Y
Yan Chunwei 已提交
1138 1139 1140
}  // namespace operators
}  // namespace lite
}  // namespace paddle