softmax_op.h 3.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

Q
Qiao Longfei 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

Q
Qiao Longfei 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

Q
Qiao Longfei 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15

#pragma once
Y
Yi Wang 已提交
16 17
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/softmax.h"
18 19 20 21

namespace paddle {
namespace operators {

D
dongzhihong 已提交
22
using Tensor = framework::Tensor;
23
using DDim = framework::DDim;
D
dongzhihong 已提交
24

25 26 27 28 29 30
static inline int CanonicalAxis(const int axis, const int rank) {
  if (axis < 0) {
    return axis + rank;
  }
  return axis;
}
31

32 33 34 35
static inline int SizeToAxis(const int axis, DDim dims) {
  int size = 1;
  for (int i = 0; i < axis; i++) {
    size *= dims[i];
36
  }
37 38
  return size;
}
39

40 41 42 43
static inline int SizeFromAxis(const int axis, DDim dims) {
  int size = 1;
  for (int i = axis; i < dims.size(); i++) {
    size *= dims[i];
44
  }
45
  return size;
46 47
}

Q
QI JUN 已提交
48
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
49
class SoftmaxKernel : public framework::OpKernel<T> {
50
 public:
D
dongzhihong 已提交
51
  void Compute(const framework::ExecutionContext& context) const override {
52
    auto* X = context.Input<Tensor>("X");
F
fengjiayi 已提交
53
    auto* Out = context.Output<Tensor>("Out");
54 55 56
    const int rank = X->dims().size();
    const int axis = CanonicalAxis(context.Attr<int>("axis"), rank);
    int axis_dim = X->dims()[axis];
Q
qijun 已提交
57

C
caoying03 已提交
58
    // allocate memory on device.
F
fengjiayi 已提交
59
    Out->mutable_data<T>(context.GetPlace());
Q
qijun 已提交
60

61 62
    const int n = SizeToAxis(axis, X->dims());
    const int d = SizeFromAxis(axis, X->dims());
D
dengkaipeng 已提交
63
    Tensor X_2d, Out_2d;
64 65
    X_2d.ShareDataWith(*X).Resize({n, d});
    Out_2d.ShareDataWith(*Out).Resize({n, d});
66

67
#ifdef PADDLE_ON_INFERENCE
J
Jacek Czaja 已提交
68
    math::SoftmaxFunctor<DeviceContext, T, true>()(
69
        context.template device_context<DeviceContext>(), axis_dim, &X_2d, &Out_2d);
70 71
#else
    math::SoftmaxFunctor<DeviceContext, T, false>()(
72
        context.template device_context<DeviceContext>(), axis_dim, &X_2d, &Out_2d);
73
#endif
74 75
  }
};
Q
Qiao Longfei 已提交
76

Q
QI JUN 已提交
77
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
78
class SoftmaxGradKernel : public framework::OpKernel<T> {
79
 public:
D
dongzhihong 已提交
80
  void Compute(const framework::ExecutionContext& context) const override {
F
fengjiayi 已提交
81 82
    auto* Out = context.Input<Tensor>("Out");
    auto* dOut = context.Input<Tensor>(framework::GradVarName("Out"));
83
    auto* dX = context.Output<Tensor>(framework::GradVarName("X"));
84 85 86
    const int rank = dX->dims().size();
    const int axis = CanonicalAxis(context.Attr<int>("axis"), rank);
    int axis_dim = dX->dims()[axis];
Q
Qiao Longfei 已提交
87

88 89
    // allocate memory on device.
    dX->mutable_data<T>(context.GetPlace());
Q
Qiao Longfei 已提交
90

91 92
    const int n = SizeToAxis(axis, dX->dims());
    const int d = SizeFromAxis(axis, dX->dims());
D
dengkaipeng 已提交
93
    Tensor dX_2d, Out_2d, dOut_2d;
94 95 96
    dX_2d.ShareDataWith(*dX).Resize({n, d});
    Out_2d.ShareDataWith(*Out).Resize({n, d});
    dOut_2d.ShareDataWith(*dOut).Resize({n, d});
F
fengjiayi 已提交
97

Q
QI JUN 已提交
98
    math::SoftmaxGradFunctor<DeviceContext, T>()(
99
        context.template device_context<DeviceContext>(), axis_dim, &Out_2d, &dOut_2d,
F
fengjiayi 已提交
100
        &dX_2d);
Q
Qiao Longfei 已提交
101 102 103
  }
};

104 105
}  // namespace operators
}  // namespace paddle