prelu_op.cu 6.9 KB
Newer Older
N
nhzlx 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <string>
#include <vector>
14

N
nhzlx 已提交
15 16 17
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/prelu.h"
#include "paddle/fluid/operators/prelu_op.h"
18
#include "paddle/fluid/operators/reduce_ops/cub_reduce.h"
N
nhzlx 已提交
19 20 21 22 23 24 25
#include "paddle/fluid/platform/cuda_primitives.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

26 27 28 29 30 31
#define CUDA_NUM_THREADS 1024

inline static int PADDLE_GET_BLOCKS(const int N) {
  return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}

N
nhzlx 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
template <typename DeviceContext, typename T>
class CUDAPReluKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    auto* x = context.Input<Tensor>("X");
    auto* alpha = context.Input<Tensor>("Alpha");
    auto* out = context.Output<Tensor>("Out");

    const T* x_ptr = x->data<T>();
    T* o_ptr = out->mutable_data<T>(context.GetPlace());

    const T* alpha_ptr = alpha->data<T>();
    auto& mode = context.Attr<std::string>("mode");

    int numel = x->numel();
    auto dim = x->dims();
48 49 50

    VLOG(4) << "dim[0]:" << dim[0] << ", dim[1]:" << dim[1]
            << ", numel:" << numel;
N
nhzlx 已提交
51 52 53 54

    if (mode == "channel") {
      math::PreluChannelWiseDirectCUDAFunctor<T> prelu_channel_wise;
      prelu_channel_wise(context.cuda_device_context().stream(), x_ptr,
55
                         alpha_ptr, o_ptr, dim[0], dim[1], numel);
N
nhzlx 已提交
56 57 58
    } else if (mode == "element") {
      math::PreluElementWiseDirectCUDAFunctor<T> prelu_element_wise;
      prelu_element_wise(context.cuda_device_context().stream(), x_ptr,
59
                         alpha_ptr, o_ptr, dim[0], numel);
N
nhzlx 已提交
60 61 62
    } else {
      math::PreluScalarDirectCUDAFunctor<T> prelu_scalar;
      prelu_scalar(context.cuda_device_context().stream(), x_ptr, alpha_ptr,
63
                   o_ptr, numel);
N
nhzlx 已提交
64 65 66 67
    }
  }
};

68
enum PRELU_MODE { Element, Channel, Scalar };
69 70

template <typename T>
71 72 73 74 75
__global__ void PReluOpGradKernel(const T* x_ptr, const T* alpha_ptr,
                                  const T* dy_ptr, T* dx_ptr, T* dalpha_ptr,
                                  size_t channel_num, size_t plane_size,
                                  size_t spatial_size, size_t numel,
                                  PRELU_MODE mode) {
76 77 78 79 80 81 82 83 84 85 86 87 88 89
  CUDA_KERNEL_LOOP(index, numel) {
    T scale;
    if (mode == Element) {
      size_t element_index = index % spatial_size;
      scale = alpha_ptr[element_index];
    } else if (mode == Channel) {
      size_t temp = index / plane_size;
      size_t channel_index = temp % channel_num;
      scale = alpha_ptr[channel_index];
    } else {
      scale = alpha_ptr[0];
    }
    T x = x_ptr[index];
    T dy = dy_ptr[index];
C
cc 已提交
90 91 92
    T zero = static_cast<T>(0);
    if (dx_ptr != nullptr) dx_ptr[index] = (x > zero) ? dy : scale * dy;
    if (dalpha_ptr != nullptr) dalpha_ptr[index] = (x > zero) ? zero : x * dy;
93 94 95
  }
}

96 97
template <typename T>
class PreluOpGradFunctor {
98
 public:
99
  void operator()(gpuStream_t stream, const T* x, const T* alpha, const T* dy,
100
                  T* dx, T* dalpha, const framework::DDim& input_dims,
101
                  PRELU_MODE mode) {
102 103 104 105 106 107 108
    size_t numel = 1;
    for (size_t i = 0; i < input_dims.size(); ++i) {
      numel *= input_dims[i];
    }
    size_t plane_size = numel / input_dims[0] / input_dims[1];
    size_t spatial_size = numel / input_dims[0];

109 110
    PReluOpGradKernel<
        T><<<PADDLE_GET_BLOCKS(numel), CUDA_NUM_THREADS, 0, stream>>>(
111
        x, alpha, dy, dx, dalpha, input_dims[1], plane_size, spatial_size,
112
        numel, mode);
113 114 115 116
  }
};

struct IdentityFunctor {
C
cc 已提交
117 118 119 120
  template <typename T>
  HOSTDEVICE inline T operator()(const T& x) const {
    return x;
  }
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
};

template <typename DeviceContext, typename T>
class CUDAPReluGradKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    auto* x = context.Input<Tensor>("X");
    auto* alpha = context.Input<Tensor>("Alpha");
    auto* dx = context.Output<Tensor>(framework::GradVarName("X"));
    auto* dy = context.Input<Tensor>(framework::GradVarName("Out"));
    auto* dalpha = context.Output<Tensor>(framework::GradVarName("Alpha"));

    const T* x_ptr = x->data<T>();
    const T* alpha_ptr = alpha->data<T>();
    const T* dy_ptr = dy->data<T>();
    T* dx_ptr = dx ? dx->mutable_data<T>(context.GetPlace()) : nullptr;
    T* dalpha_ptr =
        dalpha ? dalpha->mutable_data<T>(context.GetPlace()) : nullptr;

    if (!dx && !dalpha) return;

    auto& mode = context.Attr<std::string>("mode");

    int numel = x->numel();
    auto dim = x->dims();
146
    std::vector<int> input_shape = framework::vectorize<int>(dim);
147 148 149 150
    auto stream = context.cuda_device_context().stream();

    T* dalpha_tmp_ptr;
    Tensor dalpha_tmp;
151
    if (dalpha_ptr == nullptr) {
152 153 154 155 156 157 158
      dalpha_tmp_ptr = dalpha_ptr;
    } else {
      auto& dev_ctx = context.template device_context<DeviceContext>();
      dalpha_tmp = context.AllocateTmpTensor<T, DeviceContext>(dim, dev_ctx);
      dalpha_tmp_ptr = dalpha_tmp.mutable_data<T>(context.GetPlace());
    }

159
    PRELU_MODE m;
160
    if (mode == "element") {
161
      m = Element;
162
    } else if (mode == "channel") {
163
      m = Channel;
164
    } else {
165
      m = Scalar;
166
    }
167
    PreluOpGradFunctor<T> prelu_grad;
168 169
    prelu_grad(stream, x_ptr, alpha_ptr, dy_ptr, dx_ptr, dalpha_tmp_ptr, dim,
               m);
170

171
    if (dalpha_tmp_ptr == nullptr) return;
172 173

    std::vector<int> reduce_dims;
174
    for (size_t i = 0; i < dim.size(); i++) {
175
      if (mode == "channel" && i == 1) continue;
176
      if (mode == "element" && i != 0) continue;
177 178 179
      reduce_dims.push_back(i);
    }

C
cc 已提交
180
    TensorReduce<T, T, cub::Sum, IdentityFunctor>(
181
        dalpha_tmp, dalpha, reduce_dims, static_cast<T>(0), cub::Sum(),
C
cc 已提交
182
        IdentityFunctor(), stream);
183 184 185
  }
};

N
nhzlx 已提交
186 187 188 189
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
C
cc 已提交
190
namespace plat = paddle::platform;
N
nhzlx 已提交
191 192
REGISTER_OP_CUDA_KERNEL(
    prelu, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, float>,
C
cc 已提交
193
    ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, plat::float16>,
N
nhzlx 已提交
194
    ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, double>);
195 196 197
REGISTER_OP_CUDA_KERNEL(
    prelu_grad,
    ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, float>,
C
cc 已提交
198 199
    ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext,
                             plat::float16>,
200
    ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, double>);