elementwise_add_op.h 6.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
gongweibao 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
gongweibao 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
G
gongweibao 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
F
fengjiayi 已提交
14 15
#pragma once

W
Wu Yi 已提交
16
#include "paddle/fluid/operators/elementwise/elementwise_op.h"
17
#include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h"
W
Wu Yi 已提交
18
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
19
#include "paddle/fluid/operators/math/blas.h"
W
wanghuancoder 已提交
20

G
gongweibao 已提交
21 22 23
namespace paddle {
namespace operators {

24
template <typename DeviceContext, typename T>
C
chengduo 已提交
25 26 27
void default_elementwise_add(const framework::ExecutionContext &ctx,
                             const framework::Tensor *x,
                             const framework::Tensor *y, framework::Tensor *z) {
28
  int axis = ctx.Attr<int>("axis");
29 30 31
  auto x_dims = x->dims();
  auto y_dims = y->dims();
  if (x_dims.size() >= y_dims.size()) {
32 33 34 35 36 37
    ElementwiseComputeEx<AddFunctor<T>, DeviceContext, T>(ctx, x, y, axis,
                                                          AddFunctor<T>(), z);
  } else {
    ElementwiseComputeEx<InverseAddFunctor<T>, DeviceContext, T>(
        ctx, x, y, axis, InverseAddFunctor<T>(), z);
  }
38 39
}

40 41 42 43 44 45
template <typename DeviceContext, typename T, class Enable = void>
struct SameDimsElemwiseAdd {
  void operator()(const framework::ExecutionContext &ctx,
                  const framework::Tensor *x, const framework::Tensor *y,
                  framework::Tensor *z);
};
46

Q
QI JUN 已提交
47
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
48
class ElementwiseAddKernel : public framework::OpKernel<T> {
G
gongweibao 已提交
49
 public:
C
chengduo 已提交
50 51 52 53
  void Compute(const framework::ExecutionContext &ctx) const override {
    auto *x = ctx.Input<framework::LoDTensor>("X");
    auto *y = ctx.Input<framework::LoDTensor>("Y");
    auto *z = ctx.Output<framework::LoDTensor>("Out");
C
chengduoZH 已提交
54
    z->mutable_data<T>(ctx.GetPlace());
55
    auto dims_equal = x->dims() == y->dims();
56
    if (dims_equal) {
57 58
      SameDimsElemwiseAdd<DeviceContext, T> same_dims_add;
      same_dims_add(ctx, x, y, z);
59
    } else {
60
      default_elementwise_add<DeviceContext, T>(ctx, x, y, z);
61
    }
G
gongweibao 已提交
62 63 64 65
  }
};

template <typename T>
Y
Yu Yang 已提交
66 67
struct IdentityGrad {
  HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout; }
G
gongweibao 已提交
68 69
};

70
template <typename DeviceContext, typename T>
C
chengduo 已提交
71 72 73 74 75 76 77
void default_elementwise_add_grad(const framework::ExecutionContext &ctx,
                                  const framework::Tensor *x,
                                  const framework::Tensor *y,
                                  const framework::Tensor *out,
                                  const framework::Tensor *dout,
                                  framework::Tensor *dx,
                                  framework::Tensor *dy) {
78 79
  int axis = ctx.Attr<int>("axis");

80 81 82 83
  ElemwiseExplicitGradCompute<DeviceContext, T, IdentityGrad<T>,
                              IdentityGrad<T>>(ctx, *x, *y, *out, *dout, axis,
                                               dx, dy, IdentityGrad<T>(),
                                               IdentityGrad<T>());
84 85
}

86
template <typename DeviceContext, typename T>
87 88 89
typename std::enable_if<
    std::is_floating_point<T>::value &&
    std::is_same<DeviceContext, platform::CPUDeviceContext>::value>::type
C
chengduo 已提交
90 91 92 93 94
elementwise_add_grad(const framework::ExecutionContext &ctx,
                     const framework::Tensor *x, const framework::Tensor *y,
                     const framework::Tensor *out,
                     const framework::Tensor *dout, framework::Tensor *dx,
                     framework::Tensor *dy) {
95 96 97 98 99 100 101 102 103 104 105 106
  auto blas = math::GetBlas<DeviceContext, T>(ctx);
  if (dx) {
    blas.VCOPY(dout->numel(), dout->data<T>(),
               dx->mutable_data<T>(ctx.GetPlace()));
  }

  if (dy) {
    blas.VCOPY(dout->numel(), dout->data<T>(),
               dy->mutable_data<T>(ctx.GetPlace()));
  }
}

107
template <typename DeviceContext, typename T>
108
typename std::enable_if<
109 110
    !std::is_floating_point<T>::value &&
    std::is_same<DeviceContext, platform::CPUDeviceContext>::value>::type
C
chengduo 已提交
111 112 113 114 115
elementwise_add_grad(const framework::ExecutionContext &ctx,
                     const framework::Tensor *x, const framework::Tensor *y,
                     const framework::Tensor *out,
                     const framework::Tensor *dout, framework::Tensor *dx,
                     framework::Tensor *dy) {
116 117 118
  default_elementwise_add_grad<DeviceContext, T>(ctx, x, y, out, dout, dx, dy);
}

119 120 121 122 123 124 125 126 127 128 129 130
#ifdef PADDLE_WITH_CUDA
// cuda definition
template <typename DeviceContext, typename T>
typename std::enable_if<
    std::is_same<DeviceContext, platform::CUDADeviceContext>::value>::type
elementwise_add_grad(const framework::ExecutionContext &ctx,
                     const framework::Tensor *x, const framework::Tensor *y,
                     const framework::Tensor *out,
                     const framework::Tensor *dout, framework::Tensor *dx,
                     framework::Tensor *dy);
#endif

Q
QI JUN 已提交
131
template <typename DeviceContext, typename T>
132
class ElementwiseAddGradKernel : public ElemwiseGradKernel<T> {
G
gongweibao 已提交
133
 public:
C
chengduo 已提交
134
  void Compute(const framework::ExecutionContext &ctx) const override {
135 136
    ElemwiseGradKernel<T>::Compute(ctx);

C
chengduoZH 已提交
137 138
    using Tensor = framework::Tensor;

139 140
    auto *x = ctx.Input<Tensor>("X");
    auto *y = ctx.Input<Tensor>("Y");
C
chengduo 已提交
141 142 143
    auto *dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
    auto *dx = ctx.Output<Tensor>(framework::GradVarName("X"));
    auto *dy = ctx.Output<Tensor>(framework::GradVarName("Y"));
144
    // skip out
C
chengduo 已提交
145
    auto *out = dout;
146

147
    if (dx != nullptr && dy != nullptr && (dx->dims() == dy->dims())) {
T
tensor-tang 已提交
148
      elementwise_add_grad<DeviceContext, T>(ctx, x, y, out, dout, dx, dy);
149
    } else {
T
tensor-tang 已提交
150 151
      default_elementwise_add_grad<DeviceContext, T>(ctx, x, y, out, dout, dx,
                                                     dy);
152
    }
G
gongweibao 已提交
153 154 155
  }
};

156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
template <typename DeviceContext, typename T>
class ElementwiseAddDoubleGradKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    using Tensor = framework::Tensor;

    auto *y = ctx.Input<Tensor>("Y");
    auto *dout = ctx.Input<Tensor>("DOut");
    auto *ddx = ctx.Input<Tensor>("DDX");
    auto *ddy = ctx.Input<Tensor>("DDY");

    auto *ddout = ctx.Output<Tensor>("DDOut");

    // ddOut = ddx + ddy
    if (ddout) {
      Tensor ddx_safe, ddy_safe;
      GetDoubleGradSafeTensor<DeviceContext, T>(ctx, dout, ddx, &ddx_safe);
      GetDoubleGradSafeTensor<DeviceContext, T>(ctx, y, ddy, &ddy_safe);

      ddout->mutable_data<T>(ctx.GetPlace());
      default_elementwise_add<DeviceContext, T>(ctx, &ddx_safe, &ddy_safe,
                                                ddout);
    }
  }
};

G
gongweibao 已提交
182 183
}  // namespace operators
}  // namespace paddle