cross_entropy_op.h 7.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Y
Yi Wang 已提交
16 17
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
S
sneaxiy 已提交
18
#include "paddle/fluid/operators/math.h"
Y
Yi Wang 已提交
19 20
#include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/operators/math/math_function.h"
21
#include "paddle/fluid/platform/for_range.h"
Q
Qiao Longfei 已提交
22 23 24 25

namespace paddle {
namespace operators {

D
dongzhihong 已提交
26 27
using Tensor = framework::Tensor;

28
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
29
class CrossEntropyOpKernel : public framework::OpKernel<T> {
30
 public:
D
dongzhihong 已提交
31
  void Compute(const framework::ExecutionContext& ctx) const override {
32 33 34
    auto* x = ctx.Input<Tensor>("X");
    auto* labels = ctx.Input<Tensor>("Label");
    auto* y = ctx.Output<Tensor>("Y");
35
    y->mutable_data<T>(ctx.GetPlace());
C
caoying03 已提交
36

37
    int rank = x->dims().size();
F
fengjiayi 已提交
38 39 40
    Tensor x_2d = framework::ReshapeToMatrix(*x, rank - 1);
    Tensor labels_2d = framework::ReshapeToMatrix(*labels, rank - 1);
    Tensor y_2d = framework::ReshapeToMatrix(*y, rank - 1);
41

42
    math::CrossEntropyFunctor<DeviceContext, T>()(
43
        ctx.template device_context<DeviceContext>(), &y_2d, &x_2d, &labels_2d,
44
        ctx.Attr<bool>("soft_label"), ctx.Attr<int>("ignore_index"));
Y
Yan Chunwei 已提交
45 46 47
  }
};

48
template <typename T>
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
class XeSoftlabelGradFunctor {
 public:
  XeSoftlabelGradFunctor(T* dx,
                         const T* dy,     // NOLINT
                         const T* x,      // NOLINT
                         const T* label,  // NOLINT
                         size_t num_classes)
      : dx_(dx), dy_(dy), x_(x), label_(label), num_classes_(num_classes) {}

  HOSTDEVICE void operator()(size_t i) {
    auto row_ids = i / num_classes_;
    dx_[i] = -label_[i] * dy_[row_ids] / x_[i];
  }

 private:
  T* dx_;
  const T* dy_;
  const T* x_;
  const T* label_;
  size_t num_classes_;
};

template <typename T>
class XeGradFunctor {
 public:
  XeGradFunctor(T* dx,
                const T* dy,           // NOLINT
                const T* x,            // NOLINT
                const int64_t* label,  // NOLINT
78 79 80 81 82 83 84
                size_t num_classes, size_t ignore_index)
      : dx_(dx),
        dy_(dy),
        x_(x),
        label_(label),
        num_classes_(num_classes),
        ignore_index_(ignore_index) {}
85

Y
Yu Yang 已提交
86 87 88 89
  HOSTDEVICE void operator()(size_t sample_id) {
    auto x_is_true_offset = sample_id * num_classes_ + label_[sample_id];
    for (size_t x_offset = sample_id * num_classes_;
         x_offset < (sample_id + 1) * num_classes_; ++x_offset) {
C
chengduoZH 已提交
90 91 92 93
      dx_[x_offset] = (x_offset != x_is_true_offset ||
                       label_[sample_id] == static_cast<int64_t>(ignore_index_))
                          ? static_cast<T>(0)
                          : -dy_[sample_id] / x_[x_offset];
94 95 96 97 98 99 100 101 102
    }
  }

 private:
  T* dx_;
  const T* dy_;
  const T* x_;
  const int64_t* label_;
  size_t num_classes_;
103
  size_t ignore_index_;
104 105 106
};

template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
107
class CrossEntropyGradientOpKernel : public framework::OpKernel<T> {
Y
Yan Chunwei 已提交
108
 public:
D
dongzhihong 已提交
109
  void Compute(const framework::ExecutionContext& ctx) const override {
110 111 112 113
    auto* x = ctx.Input<Tensor>("X");
    auto* dy = ctx.Input<Tensor>(framework::GradVarName("Y"));
    auto* label = ctx.Input<Tensor>("Label");
    auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
114
    T* dx_data = dx->mutable_data<T>(ctx.GetPlace());
Y
Yan Chunwei 已提交
115

116 117 118 119
    // Following computation only depends on the last dimension size. So it's
    // unnecessary to convert tensors to 2-D views.
    int rank = x->dims().size();
    int64_t class_num = x->dims()[rank - 1];
120
    int64_t ignore_index = ctx.Attr<int>("ignore_index");
121
    if (ctx.Attr<bool>("soft_label")) {
122 123 124 125 126 127 128
      XeSoftlabelGradFunctor<T> functor(dx_data, dy->data<T>(), x->data<T>(),
                                        label->data<T>(),
                                        static_cast<size_t>(class_num));
      platform::ForRange<DeviceContext> for_range(
          ctx.template device_context<DeviceContext>(),
          static_cast<size_t>(dx->numel()));
      for_range(functor);
129
    } else {
130 131 132
      XeGradFunctor<T> functor(
          dx_data, dy->data<T>(), x->data<T>(), label->data<int64_t>(),
          static_cast<size_t>(class_num), static_cast<size_t>(ignore_index));
133 134 135 136
      platform::ForRange<DeviceContext> for_range(
          ctx.template device_context<DeviceContext>(),
          static_cast<size_t>(dy->numel()));
      for_range(functor);
Q
Qiao Longfei 已提交
137 138 139 140
    }
  }
};

S
sneaxiy 已提交
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
template <typename T>
struct HardLabelCrossEntropyBackwardFunctor {
  HardLabelCrossEntropyBackwardFunctor(T* dx, const T* y, const T* dy,
                                       const int64_t* label,
                                       int64_t ignore_index,
                                       int64_t feature_size)
      : dx_(dx),
        y_(y),
        dy_(dy),
        label_(label),
        ignore_index_(ignore_index),
        feature_size_(feature_size) {}

  HOSTDEVICE void operator()(int64_t idx) const {
    auto row_idx = idx / feature_size_;
    auto col_idx = idx % feature_size_;
    auto label = label_[row_idx];
    if (label == col_idx && label != ignore_index_) {
      dx_[idx] = -dy_[row_idx] * real_exp(y_[row_idx]);
    } else {
      dx_[idx] = 0;
    }
  }

  T* dx_;
  const T* y_;
  const T* dy_;
  const int64_t* label_;
  int64_t ignore_index_;
  int64_t feature_size_;
};

template <typename DeviceContext, typename T>
class CrossEntropyOpKernel2 : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto* x_original = ctx.Input<Tensor>("X");
    int rank = x_original->dims().size();

    auto x = framework::ReshapeToMatrix(*x_original, rank - 1);
    auto label =
        framework::ReshapeToMatrix(*ctx.Input<Tensor>("Label"), rank - 1);
    auto* y = ctx.Output<Tensor>("Y");
    y->mutable_data<T>(ctx.GetPlace());

    auto ignore_index = ctx.Attr<int>("ignore_index");

    math::CrossEntropyFunctor<DeviceContext, T>()(
        ctx.template device_context<DeviceContext>(), y, &x, &label, false,
        ignore_index);
  }
};

template <typename DeviceContext, typename T>
class CrossEntropyGradientOpKernel2 : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
    auto* y = ctx.Input<Tensor>("Y");
    auto* dy = ctx.Input<Tensor>(framework::GradVarName("Y"));
    auto* label = ctx.Input<Tensor>("Label");

    auto* p_dx = dx->mutable_data<T>(ctx.GetPlace());
    auto* p_y = y->data<T>();
    auto* p_dy = dy->data<T>();
    auto* p_label = label->data<int64_t>();

    int64_t ignore_index = ctx.Attr<int>("ignore_index");
    int rank = dx->dims().size();
    int64_t feature_size = dx->dims()[rank - 1];
    int64_t batch_size = framework::product(dx->dims()) / feature_size;

    platform::ForRange<DeviceContext> for_range(
        ctx.template device_context<DeviceContext>(),
        batch_size * feature_size);
    for_range(HardLabelCrossEntropyBackwardFunctor<T>(
        p_dx, p_y, p_dy, p_label, ignore_index, feature_size));
  }
};

Q
Qiao Longfei 已提交
221 222
}  // namespace operators
}  // namespace paddle