maxouting.cc 3.8 KB
Newer Older
W
wanghaox 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/operators/math/maxouting.h"

namespace paddle {
namespace operators {
namespace math {

W
wanghaox 已提交
21
// All tensors are in NCHW format, and the groups must be greater than 1
W
wanghaox 已提交
22 23
template <typename T>
class MaxOutFunctor<platform::CPUPlace, T> {
W
wanghaox 已提交
24 25
 public:
  void operator()(const platform::DeviceContext& context,
W
wanghaox 已提交
26 27
                  const framework::Tensor& input,
                  framework::Tensor * output,
W
wanghaox 已提交
28
                  int groups) {
W
wanghaox 已提交
29 30 31
    const int batch_size = input.dims()[0];
    const int input_height = input.dims()[2];
    const int input_width = input.dims()[3];
W
wanghaox 已提交
32
    const int output_channels = output->dims()[1];
W
wanghaox 已提交
33
    int fea_size = input_height * input_width;
W
wanghaox 已提交
34
    // c_size means the output size of each sample
W
wanghaox 已提交
35 36
    int c_size = fea_size * output_channels;
    const T* input_data = input.data<T>();
W
wanghaox 已提交
37
    T* output_data = output->mutable_data<T>(context.GetPlace());
W
wanghaox 已提交
38

W
wanghaox 已提交
39
    for (int i = 0; i < batch_size; ++i) {
W
wanghaox 已提交
40 41 42
      int new_bindex =  c_size * i;
      for (int c = 0; c < output_channels; ++c) {
        int new_cindex = fea_size * c;
W
wanghaox 已提交
43
        for (int f = 0; f < fea_size; ++f) {
W
wanghaox 已提交
44
          T ele = static_cast<T>(-FLT_MAX);
W
wanghaox 已提交
45
          for (int ph = 0; ph < groups; ++ph) {
W
wanghaox 已提交
46 47
            T x = input_data[(new_bindex + new_cindex) * groups
              + ph * fea_size + f];
W
wanghaox 已提交
48
            ele = ele > x ? ele : x;
W
wanghaox 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
          }
          output_data[(new_bindex+new_cindex+f)] = ele;
        }
      }
    }
  }
};



template <class T>
class MaxOutGradFunctor<platform::CPUPlace, T> {
public:
  void operator()(const platform::DeviceContext& context,
                  const framework::Tensor& input,
W
wanghaox 已提交
64
                  framework::Tensor * input_grad,
W
wanghaox 已提交
65 66
                  const framework::Tensor& output,
                  const framework::Tensor& output_grad,
W
wanghaox 已提交
67
                  int groups) {
W
wanghaox 已提交
68 69 70
    const int batch_size = input.dims()[0];
    const int input_height = input.dims()[2];
    const int input_width = input.dims()[3];
W
wanghaox 已提交
71
    const int output_channels = output.dims()[1];
W
wanghaox 已提交
72 73 74 75
    int fea_size = input_height * input_width;
    const T* input_data = input.data<T>();
    const T* output_data = output.data<T>();
    const T* output_grad_data = output_grad.data<T>();
W
wanghaox 已提交
76
    T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
W
wanghaox 已提交
77

W
wanghaox 已提交
78
    for (int i = 0; i < batch_size; ++i) {
W
wanghaox 已提交
79 80 81
      int blen = fea_size * output_channels * i;
      for (int c = 0; c < output_channels; ++c) {
        int clen = fea_size * c;
W
wanghaox 已提交
82
        for (int f = 0; f < fea_size; ++f) {
W
wanghaox 已提交
83 84
          int input_idx0 = (blen + clen) * groups + f;
          bool continue_match = true;
W
wanghaox 已提交
85
          int output_idx = blen + clen + f;
W
wanghaox 已提交
86 87
          for (int g = 0; g < groups && continue_match; ++g) {
              int input_idx = input_idx0 + fea_size * g;
W
wanghaox 已提交
88 89 90
              input_grad_data[input_idx] = 0;
              if (input_data[input_idx] == output_data[output_idx]) {
                input_grad_data[input_idx] += output_grad_data[output_idx];
W
wanghaox 已提交
91
                continue_match = false;
S
sweetsky0901 已提交
92
                break;
W
wanghaox 已提交
93 94 95 96 97 98 99 100 101 102
              }
          }
        }
      }
    }
  }
};

template class MaxOutGradFunctor<platform::CPUPlace, float>;
template class MaxOutGradFunctor<platform::CPUPlace, double>;
W
wanghaox 已提交
103 104
template class MaxOutFunctor<platform::CPUPlace, float>;
template class MaxOutFunctor<platform::CPUPlace, double>;
W
wanghaox 已提交
105 106 107 108

}  // namespace math
}  // namespace operators
}  // namespace paddle