cross_entropy.cu 5.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14

15
#include "paddle/fluid/framework/convert_utils.h"
S
sneaxiy 已提交
16
#include "paddle/fluid/operators/math.h"
Y
Yi Wang 已提交
17
#include "paddle/fluid/operators/math/cross_entropy.h"
18 19
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
20
#include "paddle/phi/backends/gpu/gpu_context.h"
21 22 23 24 25

namespace paddle {
namespace operators {
namespace math {

26 27
template <typename T, typename LabelT>
__global__ void CrossEntropyKernel(T* Y, const T* X, const LabelT* label,
28 29
                                   const int N, const int D,
                                   const int ignore_index) {
30
  CUDA_KERNEL_LOOP(i, N) {
31 32
    auto lbl = static_cast<int64_t>(label[i]);
    PADDLE_ENFORCE(lbl >= 0 && lbl < D || lbl == ignore_index,
33 34
                   "The value of label[%d] expected >= 0 and < %ld, or == %ld, "
                   "but got %ld. Please check input value.",
35 36
                   i, D, ignore_index, lbl);
    Y[i] = ignore_index == lbl
C
chengduo 已提交
37
               ? static_cast<T>(0)
38
               : -math::TolerableValue<T>()(real_log(X[i * D + lbl]));
39 40 41 42 43 44 45
  }
}

template <typename T>
__global__ void SoftCrossEntropyKernel(T* Y, const T* X, const T* label,
                                       const int class_num) {
  int tid = threadIdx.x;
C
chengduo 已提交
46
  T val(0);
47

48 49 50
  int idx = blockIdx.x * class_num + tid;
  int end = blockIdx.x * class_num + class_num;
  for (; idx < end; idx += blockDim.x) {
C
chengduo 已提交
51
    val += math::TolerableValue<T>()(real_log(X[idx])) * label[idx];
52 53
  }

54 55 56
  val = paddle::platform::reduceSum(val, tid, blockDim.x);
  if (threadIdx.x == 0) {
    Y[blockIdx.x] = -val;
57 58 59
  }
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
template <typename T>
struct HardLabelCrossEntropyCUDAFunctorImpl {
 public:
  HardLabelCrossEntropyCUDAFunctorImpl(T* loss_data, const T* prob_data,
                                       const void* label_data,
                                       const int batch_size,
                                       const int class_num,
                                       const int ignore_index,
                                       const int block_size, gpuStream_t stream)
      : loss_data_(loss_data),
        prob_data_(prob_data),
        label_data_(label_data),
        batch_size_(batch_size),
        class_num_(class_num),
        ignore_index_(ignore_index),
        block_size_(block_size),
        stream_(stream) {}

  template <typename U>
  void apply() const {
    int grid_size = (batch_size_ + block_size_ - 1) / block_size_;
    CrossEntropyKernel<T, U><<<grid_size, block_size_, 0, stream_>>>(
        loss_data_, prob_data_, static_cast<const U*>(label_data_), batch_size_,
        class_num_, ignore_index_);
  }

 private:
  T* loss_data_;
  const T* prob_data_;
  const void* label_data_;
  const int batch_size_;
  const int class_num_;
  const int ignore_index_;
  const int block_size_;
  gpuStream_t stream_;
};

97 98 99 100 101 102 103 104 105 106
template <typename DeviceContext, typename T>
void CrossEntropyFunctor<DeviceContext, T>::operator()(
    const DeviceContext& ctx, framework::Tensor* out,
    const framework::Tensor* prob, const framework::Tensor* labels,
    const bool softLabel, const int ignore_index, const int axis_dim) {
  const T* prob_data = prob->data<T>();
  T* loss_data = out->mutable_data<T>(ctx.GetPlace());

  int batch_size = prob->dims()[0];
  int class_num = prob->dims()[1];
107
#ifdef __HIPCC__
108
  constexpr int kMaxBlockDim = 256;
109
#else
110
  constexpr int kMaxBlockDim = 512;
111
#endif
112

113 114 115 116 117 118 119 120 121 122 123 124 125 126
  if (softLabel) {
    const T* label_data = labels->data<T>();
    int block = class_num > kMaxBlockDim
                    ? kMaxBlockDim
                    : pow(2, static_cast<int>(std::log2(class_num)));

    SoftCrossEntropyKernel<T><<<batch_size, block, 0, ctx.stream()>>>(
        loss_data, prob_data, label_data, class_num);
  } else {
    HardLabelCrossEntropyCUDAFunctorImpl<T> functor(
        loss_data, prob_data, labels->data(), batch_size, class_num,
        ignore_index, kMaxBlockDim, ctx.stream());
    framework::VisitDataType(framework::TransToProtoVarType(labels->dtype()),
                             functor);
127
  }
128
}
129

Q
QI JUN 已提交
130 131
template class CrossEntropyFunctor<platform::CUDADeviceContext, float>;
template class CrossEntropyFunctor<platform::CUDADeviceContext, double>;
C
chengduo 已提交
132 133
template class CrossEntropyFunctor<platform::CUDADeviceContext,
                                   platform::float16>;
134 135 136 137 138

template class CrossEntropyFunctor<phi::GPUContext, float>;
template class CrossEntropyFunctor<phi::GPUContext, double>;
template class CrossEntropyFunctor<phi::GPUContext, platform::float16>;

139 140 141
}  // namespace math
}  // namespace operators
}  // namespace paddle