pooling.h 7.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Y
Yi Wang 已提交
16 17 18 19
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/hostdevice.h"
20 21 22 23 24

namespace paddle {
namespace operators {
namespace math {

C
chengduoZH 已提交
25 26
#define FLT_MAX \
  __FLT_MAX__  // It might need to be placed in another file, but I'm still
C
chengduoZH 已提交
27
               // wondering where to put it.
C
chengduoZH 已提交
28 29 30

/*
 * \brief Extracting simple operations from pooling.
C
chengduoZH 已提交
31 32
 *        Both MaxPool and AvgPool need "initial", "compute" and "finalize"
 * operation.
C
chengduoZH 已提交
33 34 35
 *        MaxPool initializes temp variable to the negative maximum to find the
 * maximum value in the pooling field.
 *        AvgPool initializes temp variable to the zero to accumulate all values
C
chengduoZH 已提交
36
 * in pool pooling, and finally takes the average.
C
chengduoZH 已提交
37 38
 *        MaxPoolGrad and AvgPoolGrad are gradient operations respectively.
 */
39
template <class T>
40
class MaxPool {
41
 public:
C
chengduoZH 已提交
42
  DEVICE inline T initial() { return static_cast<T>(-FLT_MAX); }
C
chengduoZH 已提交
43
  DEVICE inline void compute(T& y, const T& x) { y = y > x ? y : x; }
C
chengduoZH 已提交
44
  DEVICE inline void finalize(T& y, const T& pool_field) {}
C
chengduoZH 已提交
45 46 47
};

template <class T>
48
class AvgPool {
C
chengduoZH 已提交
49 50
 public:
  DEVICE inline T initial() { return static_cast<T>(0); }
C
chengduoZH 已提交
51
  DEVICE inline void compute(T& y, const T& x) { y += x; }
C
chengduoZH 已提交
52
  DEVICE inline void finalize(T& y, const T& pool_field) { y /= pool_field; }
C
chengduoZH 已提交
53
};
C
chengduoZH 已提交
54

C
chengduoZH 已提交
55
template <class T>
56
class MaxPoolGrad {
C
chengduoZH 已提交
57
 public:
C
chengduoZH 已提交
58 59
  DEVICE inline void compute(const T& x, const T& y, const T& dy, T& dx,
                             T scale) {
60 61 62 63 64
    dx += dy * (x == y);
  }
};

template <class T>
65
class AvgPoolGrad {
66
 public:
C
chengduoZH 已提交
67 68
  DEVICE inline void compute(const T& x, const T& y, const T& dy, T& dx,
                             T scale) {
69 70 71 72
    dx += (scale * dy);
  }
};

C
chengduoZH 已提交
73 74 75
/*
 * \brief Getting pooling results, and calculating gradient.
 *
C
chengduoZH 已提交
76 77 78 79
 * In pool2d, all tensors are in NCHW format. Where N is batch size, C is the
 * number of channels, H and W is the height and width of feature.
 * In pool3d, all tensors are in NCDHW format. Where N is batch size, C is the
 * number of channels, D, H and W is the depth, height and width of feature.
C
chengduoZH 已提交
80 81
 *
 * In max pooling, it is possible that the pooling region has multiple maximum
C
chengduoZH 已提交
82 83
 * elements. In this case, we should compute the gradient of the first maximum
 * element.
C
chengduoZH 已提交
84 85 86
 * This is different from average pooling. So we rewrite the max_pool_grad:
 * MaxPool2dGradFunctor, MaxPool3dGradFunctor.
 */
Q
QI JUN 已提交
87
template <typename DeviceContext, typename PoolProcess, typename T>
C
chengduoZH 已提交
88
class Pool2dFunctor {
89
 public:
Q
QI JUN 已提交
90 91 92 93
  void operator()(const DeviceContext& context, const framework::Tensor& input,
                  std::vector<int>& ksize, std::vector<int>& strides,
                  std::vector<int>& paddings, PoolProcess pool_compute,
                  framework::Tensor* output);
94 95
};

Q
QI JUN 已提交
96
template <typename DeviceContext, typename PoolProcess, typename T>
C
chengduoZH 已提交
97
class Pool2dGradFunctor {
98
 public:
Q
QI JUN 已提交
99
  void operator()(const DeviceContext& context, const framework::Tensor& input,
100 101 102
                  const framework::Tensor& output,
                  const framework::Tensor& output_grad, std::vector<int>& ksize,
                  std::vector<int>& strides, std::vector<int>& paddings,
C
chengduoZH 已提交
103
                  PoolProcess pool_compute, framework::Tensor* input_grad);
104 105
};

Q
QI JUN 已提交
106
template <typename DeviceContext, class T>
C
chengduoZH 已提交
107
class MaxPool2dGradFunctor {
108
 public:
Q
QI JUN 已提交
109
  void operator()(const DeviceContext& context, const framework::Tensor& input,
110 111
                  const framework::Tensor& output,
                  const framework::Tensor& output_grad, std::vector<int>& ksize,
C
chengduoZH 已提交
112 113
                  std::vector<int>& strides, std::vector<int>& paddings,
                  framework::Tensor* input_grad);
114 115
};

Q
QI JUN 已提交
116
template <typename DeviceContext, typename PoolProcess, typename T>
C
chengduoZH 已提交
117
class Pool3dFunctor {
118
 public:
Q
QI JUN 已提交
119 120 121 122
  void operator()(const DeviceContext& context, const framework::Tensor& input,
                  std::vector<int>& ksize, std::vector<int>& strides,
                  std::vector<int>& paddings, PoolProcess pool_compute,
                  framework::Tensor* output);
123 124
};

Q
QI JUN 已提交
125
template <typename DeviceContext, typename PoolProcess, typename T>
C
chengduoZH 已提交
126
class Pool3dGradFunctor {
127
 public:
Q
QI JUN 已提交
128
  void operator()(const DeviceContext& context, const framework::Tensor& input,
129 130 131
                  const framework::Tensor& output,
                  const framework::Tensor& output_grad, std::vector<int>& ksize,
                  std::vector<int>& strides, std::vector<int>& paddings,
C
chengduoZH 已提交
132
                  PoolProcess pool_compute, framework::Tensor* input_grad);
133 134
};

Q
QI JUN 已提交
135
template <typename DeviceContext, class T>
C
chengduoZH 已提交
136
class MaxPool3dGradFunctor {
137
 public:
Q
QI JUN 已提交
138
  void operator()(const DeviceContext& context, const framework::Tensor& input,
139 140
                  const framework::Tensor& output,
                  const framework::Tensor& output_grad, std::vector<int>& ksize,
C
chengduoZH 已提交
141 142
                  std::vector<int>& strides, std::vector<int>& paddings,
                  framework::Tensor* input_grad);
143 144
};

C
chengduoZH 已提交
145 146 147
/*
 * \brief Getting max pooling results and corresponding max index, and
 * calculating gradient.
C
chengduoZH 已提交
148
 * In up-sampling-pooling, it is necessary to know max element index.
C
chengduoZH 已提交
149 150 151
 * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in
 * NCDHW format.
 */
Q
QI JUN 已提交
152
template <typename DeviceContext, typename T1, typename T2>
C
chengduoZH 已提交
153 154
class MaxPool2dWithIndexFunctor {
 public:
Q
QI JUN 已提交
155 156 157 158
  void operator()(const DeviceContext& context, const framework::Tensor& input,
                  std::vector<int>& ksize, std::vector<int>& strides,
                  std::vector<int>& paddings, framework::Tensor* output,
                  framework::Tensor* mask);
C
chengduoZH 已提交
159 160
};

Q
QI JUN 已提交
161
template <typename DeviceContext, typename T1, typename T2>
C
chengduoZH 已提交
162 163
class MaxPool2dWithIndexGradFunctor {
 public:
Q
QI JUN 已提交
164
  void operator()(const DeviceContext& context,
C
chengduoZH 已提交
165 166
                  const framework::Tensor& output_grad,
                  const framework::Tensor& mask, std::vector<int>& ksize,
C
chengduoZH 已提交
167 168
                  std::vector<int>& strides, std::vector<int>& paddings,
                  framework::Tensor* input_grad);
C
chengduoZH 已提交
169 170
};

Q
QI JUN 已提交
171
template <typename DeviceContext, typename T1, typename T2>
C
chengduoZH 已提交
172 173
class MaxPool3dWithIndexFunctor {
 public:
Q
QI JUN 已提交
174 175 176 177
  void operator()(const DeviceContext& context, const framework::Tensor& input,
                  std::vector<int>& ksize, std::vector<int>& strides,
                  std::vector<int>& paddings, framework::Tensor* output,
                  framework::Tensor* mask);
C
chengduoZH 已提交
178 179
};

Q
QI JUN 已提交
180
template <typename DeviceContext, typename T1, typename T2>
C
chengduoZH 已提交
181 182
class MaxPool3dWithIndexGradFunctor {
 public:
Q
QI JUN 已提交
183
  void operator()(const DeviceContext& context,
C
chengduoZH 已提交
184 185
                  const framework::Tensor& output_grad,
                  const framework::Tensor& mask, std::vector<int>& ksize,
C
chengduoZH 已提交
186 187
                  std::vector<int>& strides, std::vector<int>& paddings,
                  framework::Tensor* input_grad);
C
chengduoZH 已提交
188
};
C
chengduoZH 已提交
189

190 191 192
}  // namespace math
}  // namespace operators
}  // namespace paddle