op_params.h 3.7 KB
Newer Older
S
update  
superjomn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
S
superjomn 已提交
16
#include <vector>
17
#include "paddle/fluid/lite/core/compatible_tensor.h"
S
update  
superjomn 已提交
18 19 20 21 22 23 24 25 26 27
#include "paddle/fluid/lite/utils/all.h"

/*
 * This file contains all the argument parameter data structure for operators.
 */

namespace paddle {
namespace lite {
namespace operators {

Y
Yan Chunwei 已提交
28 29 30
using param_t = Any;

/// ----------------------- Functional operators ------------------------------
S
Superjomn 已提交
31
struct FeedParam {
32 33
  const std::vector<lite::Tensor>* feed_list{};
  lite::Tensor* out{};
34 35 36 37
  int col;
};

struct FetchParam {
38 39
  const lite::Tensor* input{};
  std::vector<lite::Tensor>* fetch_list{};
S
Superjomn 已提交
40 41 42
  int col;
};

Y
Yan Chunwei 已提交
43 44 45 46 47 48 49 50
// Helper op for lite framework
struct IoCopyParam {
  const lite::Tensor* x{};
  lite::Tensor* y{};
};

/// -------------------------- NN operators ------------------------------------

S
update  
superjomn 已提交
51
struct FcParam {
52 53 54 55 56
  lite::Tensor* input{};
  lite::Tensor* w{};
  lite::Tensor* bias{};
  lite::Tensor* output{};
  lite::DDim in_mat_dims;
S
superjomn 已提交
57
  int in_num_col_dims{1};
S
update  
superjomn 已提交
58 59
};

S
superjomn 已提交
60
struct ReluParam {
61 62
  lite::Tensor* input{};
  lite::Tensor* output{};
S
superjomn 已提交
63 64
};

S
superjomn 已提交
65 66
// For Mul Op
struct MulParam {
67 68 69
  lite::Tensor* x{};
  lite::Tensor* y{};
  lite::Tensor* output{};
S
superjomn 已提交
70 71 72 73 74

  int x_num_col_dims{1};
  int y_num_col_dims{1};
};

L
liuwei1031 已提交
75 76 77 78 79 80 81 82 83 84 85
struct MulGradParam {
  const lite::Tensor* x{};
  const lite::Tensor* y{};
  const lite::Tensor* output_grad{};
  lite::Tensor* x_grad{};
  lite::Tensor* y_grad{};

  int x_num_col_dims{1};
  int y_num_col_dims{1};
};

S
superjomn 已提交
86 87
// For Scale Op
struct ScaleParam {
88 89
  lite::Tensor* x{};
  lite::Tensor* output{};
S
superjomn 已提交
90 91 92 93 94 95

  float scale{1.};
  float bias{};
  bool bias_after_scale{true};
};

Y
Yan Chunwei 已提交
96 97 98 99 100 101 102 103 104
/// ----------------------- element wise operators ----------------------
struct ElementwiseParam {
  const lite::Tensor* X{};
  const lite::Tensor* Y{};
  lite::Tensor* Out{};
  int axis{-1};  // for broadcasting.
};

struct ElementwiseGradParam {
L
liuwei1031 已提交
105 106 107 108
  const lite::Tensor* Y{};
  const lite::Tensor* Out_grad{};
  lite::Tensor* X_grad{};
  lite::Tensor* Y_grad{};
Y
Yan Chunwei 已提交
109
  int axis{-1};  // for broadcasting.
S
Superjomn 已提交
110 111
};

Y
Yan Chunwei 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124
/// ----------------------- activation operators ----------------------
struct ActivationParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

struct ActivationGradParam {
  const lite::Tensor* X{};
  const lite::Tensor* Out{};
  // for backward
  lite::Tensor* X_grad{};
  const lite::Tensor* Out_grad{};
};
S
update  
superjomn 已提交
125

L
liuwei1031 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
/// ----------------------- mean operators ----------------------
struct MeanParam {
  const lite::Tensor* X{};
  lite::Tensor* Out{};
};

struct MeanGradParam {
  const lite::Tensor* X{};
  const lite::Tensor* Out_grad{};
  // for backward
  lite::Tensor* X_grad{};
};

/// ----------------------- fill_constant operators ----------------------
struct FillConstantParam {
  int dtype{framework::proto::VarType::FP32};
  std::vector<int64_t> shape{};
  float value{0.0f};
  // useless for x86, keep it for compatibility
  bool force_cpu{false};
  lite::Tensor* Out{};
};

/// ----------------------- sgd operators ----------------------
struct SGDParam {
  int dtype{framework::proto::VarType::FP32};

  const lite::Tensor* Param{};
  const lite::Tensor* LearningRate{};
  const lite::Tensor* Grad{};
  lite::Tensor* ParamOut{};
};

S
update  
superjomn 已提交
159 160 161
}  // namespace operators
}  // namespace lite
}  // namespace paddle