prelu_op.h 5.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Z
zchen0211 已提交
2 3 4 5 6 7 8 9 10 11 12
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
J
jerrywgz 已提交
13
#include <string>
Y
Yi Wang 已提交
14 15 16
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/transform.h"
Z
zchen0211 已提交
17 18 19 20
namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
Z
zchen0211 已提交
21
using platform::Transform;
Z
zchen0211 已提交
22

Q
QI JUN 已提交
23
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
24
class PReluKernel : public framework::OpKernel<T> {
Z
zchen0211 已提交
25 26
 public:
  void Compute(const framework::ExecutionContext& context) const override {
Z
prelu  
zchen0211 已提交
27 28 29
    auto* x = context.Input<Tensor>("X");
    auto* alpha = context.Input<Tensor>("Alpha");
    auto* out = context.Output<Tensor>("Out");
Z
zchen0211 已提交
30

Z
prelu  
zchen0211 已提交
31 32
    const T* x_ptr = x->data<T>();
    T* o_ptr = out->mutable_data<T>(context.GetPlace());
Z
zchen0211 已提交
33

J
jerrywgz 已提交
34
    const T* alpha_ptr = alpha->data<T>();
Y
Yu Yang 已提交
35
    auto& mode = context.Attr<std::string>("mode");
36
    auto& data_format = context.Attr<std::string>("data_format");
Z
zchen0211 已提交
37

Z
prelu  
zchen0211 已提交
38
    int numel = x->numel();
J
jerrywgz 已提交
39 40 41 42
    auto dim = x->dims();
    int index = 0;
    int i = 0;
    if (mode == "channel") {
43 44 45 46 47 48 49 50 51 52 53 54 55 56
      if (data_format == "NCHW") {
        int temp = 1;
        for (int j = 2; j < dim.size(); j++) {
          temp *= dim[j];
        }
        for (i = 0; i < numel; i++) {
          index = (i / temp) % dim[1];
          o_ptr[i] = x_ptr[i] > 0 ? x_ptr[i] : alpha_ptr[index] * x_ptr[i];
        }
      } else {
        for (i = 0; i < numel; i++) {
          index = i % dim[dim.size() - 1];
          o_ptr[i] = x_ptr[i] > 0 ? x_ptr[i] : alpha_ptr[index] * x_ptr[i];
        }
J
jerrywgz 已提交
57 58
      }
    } else if (mode == "element") {
59 60 61 62
      int temp = 1;
      for (int j = 1; j < dim.size(); j++) {
        temp *= dim[j];
      }
J
jerrywgz 已提交
63
      for (i = 0; i < numel; i++) {
64 65
        index = i % temp;
        o_ptr[i] = x_ptr[i] > 0 ? x_ptr[i] : alpha_ptr[index] * x_ptr[i];
J
jerrywgz 已提交
66 67 68 69 70 71
      }
    } else {
      for (i = 0; i < numel; i++) {
        o_ptr[i] = x_ptr[i] > 0 ? x_ptr[i] : alpha_ptr[0] * x_ptr[i];
      }
    }
Z
zchen0211 已提交
72 73 74
  }
};

Q
QI JUN 已提交
75
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
76
class PReluGradKernel : public framework::OpKernel<T> {
Z
zchen0211 已提交
77 78
 public:
  void Compute(const framework::ExecutionContext& context) const override {
J
jerrywgz 已提交
79
    auto* x = context.Input<Tensor>("X");
Z
prelu  
zchen0211 已提交
80 81
    auto* dx = context.Output<Tensor>(framework::GradVarName("X"));
    auto* dout = context.Input<Tensor>(framework::GradVarName("Out"));
J
jerrywgz 已提交
82
    auto* dalpha = context.Output<Tensor>(framework::GradVarName("Alpha"));
Z
prelu  
zchen0211 已提交
83
    auto* alpha = context.Input<Tensor>("Alpha");
J
jerrywgz 已提交
84 85
    const T* alpha_ptr = alpha->data<T>();
    const T* x_ptr = x->data<T>();
Z
prelu  
zchen0211 已提交
86
    const T* dout_ptr = dout->data<T>();
J
jerrywgz 已提交
87
    std::string mode = context.Attr<std::string>("mode");
88
    auto& data_format = context.Attr<std::string>("data_format");
J
jerrywgz 已提交
89 90 91 92 93 94 95
    int numel = x->numel();
    auto dim = x->dims();
    int index = 0;
    int i = 0;
    if (dx) {
      T* dx_ptr = dx->mutable_data<T>(context.GetPlace());
      if (mode == "channel") {
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
        if (data_format == "NCHW") {
          int temp = 1;
          for (int j = 2; j < dim.size(); j++) {
            temp *= dim[j];
          }
          for (i = 0; i < numel; i++) {
            index = (i / temp) % dim[1];
            dx_ptr[i] =
                x_ptr[i] > 0 ? dout_ptr[i] : alpha_ptr[index] * dout_ptr[i];
          }
        } else {
          for (i = 0; i < numel; i++) {
            index = i % dim[dim.size() - 1];
            dx_ptr[i] =
                x_ptr[i] > 0 ? dout_ptr[i] : alpha_ptr[index] * dout_ptr[i];
          }
J
jerrywgz 已提交
112 113
        }
      } else if (mode == "element") {
114 115 116 117
        int temp = 1;
        for (int j = 1; j < dim.size(); j++) {
          temp *= dim[j];
        }
J
jerrywgz 已提交
118
        for (i = 0; i < numel; i++) {
119 120 121
          index = i % temp;
          dx_ptr[i] =
              x_ptr[i] > 0 ? dout_ptr[i] : alpha_ptr[index] * dout_ptr[i];
J
jerrywgz 已提交
122 123 124
        }
      } else {
        for (i = 0; i < numel; i++) {
125
          dx_ptr[i] = x_ptr[i] > 0 ? dout_ptr[i] : alpha_ptr[0] * dout_ptr[i];
J
jerrywgz 已提交
126 127 128 129 130 131 132
        }
      }
    }

    index = 0;
    if (dalpha) {
      T* dalpha_ptr = dalpha->mutable_data<T>(context.GetPlace());
Y
Yu Yang 已提交
133 134
      memset(dalpha_ptr, 0, sizeof(T) * dalpha->numel());

J
jerrywgz 已提交
135
      if (mode == "channel") {
136 137 138 139 140 141 142 143 144 145 146 147 148 149
        if (data_format == "NCHW") {
          int temp = 1;
          for (int j = 2; j < dim.size(); j++) {
            temp *= dim[j];
          }
          for (i = 0; i < numel; i++) {
            index = (i / temp) % dim[1];
            dalpha_ptr[index] += x_ptr[i] > 0 ? 0 : x_ptr[i] * dout_ptr[i];
          }
        } else {
          for (i = 0; i < numel; i++) {
            index = i % dim[dim.size() - 1];
            dalpha_ptr[index] += x_ptr[i] > 0 ? 0 : x_ptr[i] * dout_ptr[i];
          }
J
jerrywgz 已提交
150 151
        }
      } else if (mode == "element") {
152 153 154 155
        int temp = 1;
        for (int j = 1; j < dim.size(); j++) {
          temp *= dim[j];
        }
J
jerrywgz 已提交
156
        for (i = 0; i < numel; i++) {
157 158
          index = i % temp;
          dalpha_ptr[index] += x_ptr[i] > 0 ? 0 : x_ptr[i] * dout_ptr[i];
J
jerrywgz 已提交
159 160 161
        }
      } else {
        for (i = 0; i < numel; i++) {
162
          dalpha_ptr[0] += x_ptr[i] > 0 ? 0 : x_ptr[i] * dout_ptr[i];
J
jerrywgz 已提交
163 164 165 166 167
        }
      }
    }

    // TODO(Guanzhong): add GPU kernels
Z
zchen0211 已提交
168 169 170 171 172
  }
};

}  // namespace operators
}  // namespace paddle