op_params.h 2.7 KB
Newer Older
S
update  
superjomn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
S
superjomn 已提交
16
#include <vector>
17
#include "paddle/fluid/lite/core/compatible_tensor.h"
S
update  
superjomn 已提交
18 19 20 21 22 23 24 25 26 27
#include "paddle/fluid/lite/utils/all.h"

/*
 * This file contains all the argument parameter data structure for operators.
 */

namespace paddle {
namespace lite {
namespace operators {

Y
Yan Chunwei 已提交
28 29 30
using param_t = Any;

/// ----------------------- Functional operators ------------------------------
S
Superjomn 已提交
31
struct FeedParam {
32 33
  const std::vector<lite::Tensor>* feed_list{};
  lite::Tensor* out{};
34 35 36 37
  int col;
};

struct FetchParam {
38 39
  const lite::Tensor* input{};
  std::vector<lite::Tensor>* fetch_list{};
S
Superjomn 已提交
40 41 42
  int col;
};

Y
Yan Chunwei 已提交
43 44 45 46 47 48 49 50
// Helper op for lite framework
struct IoCopyParam {
  const lite::Tensor* x{};
  lite::Tensor* y{};
};

/// -------------------------- NN operators ------------------------------------

S
update  
superjomn 已提交
51
struct FcParam {
52 53 54 55 56
  lite::Tensor* input{};
  lite::Tensor* w{};
  lite::Tensor* bias{};
  lite::Tensor* output{};
  lite::DDim in_mat_dims;
S
superjomn 已提交
57
  int in_num_col_dims{1};
S
update  
superjomn 已提交
58 59
};

S
superjomn 已提交
60
struct ReluParam {
61 62
  lite::Tensor* input{};
  lite::Tensor* output{};
S
superjomn 已提交
63 64
};

S
superjomn 已提交
65 66
// For Mul Op
struct MulParam {
67 68 69
  lite::Tensor* x{};
  lite::Tensor* y{};
  lite::Tensor* output{};
S
superjomn 已提交
70 71 72 73 74 75 76

  int x_num_col_dims{1};
  int y_num_col_dims{1};
};

// For Scale Op
struct ScaleParam {
77 78
  lite::Tensor* x{};
  lite::Tensor* output{};
S
superjomn 已提交
79 80 81 82 83 84

  float scale{1.};
  float bias{};
  bool bias_after_scale{true};
};

Y
Yan Chunwei 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97
/// ----------------------- element wise operators ----------------------
struct ElementwiseParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  int axis{-1};  // for broadcasting.
};

struct ElementwiseGradParam {
  const lite::Tensor* X_grad{};
  const lite::Tensor* Y_grad{};
  lite::Tensor* Out_grad{};
  int axis{-1};  // for broadcasting.
S
Superjomn 已提交
98 99
};

Y
Yan Chunwei 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112
/// ----------------------- activation operators ----------------------
struct ActivationParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

struct ActivationGradParam {
  const lite::Tensor* X{};
  const lite::Tensor* Out{};
  // for backward
  lite::Tensor* X_grad{};
  const lite::Tensor* Out_grad{};
};
S
update  
superjomn 已提交
113 114 115 116

}  // namespace operators
}  // namespace lite
}  // namespace paddle