activation_mkldnn_op.cc 7.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

#include "paddle/fluid/operators/activation_op.h"
16
#include "paddle/fluid/platform/mkldnn_reuse.h"
17 18 19 20

namespace paddle {
namespace operators {

21 22 23 24 25 26 27 28
using framework::DataLayout;
using framework::Tensor;
using mkldnn::memory;
using mkldnn::primitive;
using mkldnn::stream;
using platform::GetMKLDNNFormat;
using platform::MKLDNNDeviceContext;
using platform::to_void_cast;
29

30 31 32 33 34 35
template <typename Functor>
class MKLDNNActivationKernel
    : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    const auto *x = ctx.Input<Tensor>("X");
36 37 38 39
    PADDLE_ENFORCE_EQ(x->layout(), DataLayout::kMKLDNN,
                      "Wrong layout set for X tensor");
    PADDLE_ENFORCE_NE(x->format(), MKLDNNMemoryFormat::format_undef,
                      "Wrong format set for X tensor");
40 41 42 43 44

    Functor functor;
    functor(ctx);
  }
};
K
Krzysztof Binias 已提交
45

46 47 48 49 50 51
template <typename Functor>
class MKLDNNActivationGradKernel
    : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    const auto *diff_y = ctx.Input<Tensor>(framework::GradVarName("Out"));
52 53 54 55
    PADDLE_ENFORCE_EQ(diff_y->layout(), DataLayout::kMKLDNN,
                      "Wrong layout set for Input OutGrad tensor");
    PADDLE_ENFORCE_NE(diff_y->format(), MKLDNNMemoryFormat::format_undef,
                      "Wrong format set for Input OutGrad tensor");
56

57 58
    PADDLE_ENFORCE_EQ(
        ctx.Attr<bool>("is_test"), false,
59 60
        "is_test attribute should be set to False in training phase.");

61 62 63 64 65 66 67
    Functor functor;
    functor(ctx);
  }
};

template <typename T>
void eltwise_forward(const framework::ExecutionContext &ctx,
A
Adam 已提交
68
                     mkldnn::algorithm algorithm) {
69 70 71 72
  PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()),
                 "It must use CPUPlace.");
  auto &dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();

73 74
  const auto *x = ctx.Input<Tensor>("X");
  auto *y = ctx.Output<Tensor>("Out");
75

A
Adam 已提交
76 77 78
  const T alpha = ctx.op().HasAttr("alpha") ? ctx.Attr<T>("alpha") : 0;
  const T beta = ctx.op().HasAttr("beta") ? ctx.Attr<T>("beta") : 0;

Y
Yihua Xu 已提交
79 80 81 82
  PADDLE_ENFORCE(
      x->dims().size() == 2 || x->dims().size() == 3 || x->dims().size() == 4,
      "Input dim must be with 2, 3 or 4");

83
  auto src_tz = framework::vectorize<int>(x->dims());
84

85
  auto src_format = src_tz.size() == 2 ? MKLDNNMemoryFormat::nc : x->format();
86

87 88
  bool is_test = ctx.Attr<bool>("is_test");

89 90 91
  platform::ActivationMKLDNNHandler<T> handler(
      src_tz, algorithm, alpha, beta, src_format, is_test, dev_ctx,
      ctx.GetPlace(), ctx.op().Input("X"));
92

93 94
  auto src_memory_p = handler.AcquireSrcMemory(x);
  auto dst_memory_p = handler.AcquireDstMemory(y);
95 96
  auto activation_p = handler.AcquireActivation(dst_memory_p, src_memory_p);

97
  // push primitive to stream and wait until it's executed
98
  std::vector<primitive> pipeline;
99
  pipeline.push_back(*activation_p);
100 101
  stream(stream::kind::eager).submit(pipeline).wait();

102
  y->set_layout(DataLayout::kMKLDNN);
103
  y->set_format(GetMKLDNNFormat(*dst_memory_p));
104 105
}

106 107
template <typename T>
void eltwise_grad(const framework::ExecutionContext &ctx,
A
Adam 已提交
108
                  mkldnn::algorithm algorithm) {
109 110
  auto &dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();

111
  const auto *x = ctx.Input<Tensor>("X");
112 113
  const auto *diff_y = ctx.Input<Tensor>(framework::GradVarName("Out"));
  auto *diff_x = ctx.Output<Tensor>(framework::GradVarName("X"));
114

A
Adam 已提交
115 116 117
  const T alpha = ctx.op().HasAttr("alpha") ? ctx.Attr<T>("alpha") : 0;
  const T beta = ctx.op().HasAttr("beta") ? ctx.Attr<T>("beta") : 0;

118
  auto diff_dst_tz = framework::vectorize<int>(diff_y->dims());
K
Krzysztof Binias 已提交
119

120 121
  // diff_dst and src dims should be the same
  auto src_format =
122
      diff_dst_tz.size() == 2 ? MKLDNNMemoryFormat::nc : x->format();
123

124
  auto diff_y_format =
125
      diff_dst_tz.size() == 2 ? MKLDNNMemoryFormat::nc : diff_y->format();
126

127 128 129
  platform::ActivationMKLDNNHandler<T> handler(
      diff_dst_tz, algorithm, alpha, beta, src_format, diff_y_format, dev_ctx,
      ctx.GetPlace(), ctx.op().Input("X"));
130

131 132 133
  auto src_memory_p = handler.AcquireBackwardSrcMemory(x);
  auto diff_dst_memory_p = handler.AcquireDiffDstMemory(diff_y);
  auto diff_src_memory_p = handler.AcquireDiffSrcMemory(diff_x);
134
  auto activation_backward_p = handler.AcquireActivationBackward(
135
      diff_src_memory_p, diff_dst_memory_p, src_memory_p);
136 137

  // push primitive to stream and wait until it's executed
138
  std::vector<primitive> pipeline;
139
  pipeline.push_back(*activation_backward_p);
140 141
  stream(stream::kind::eager).submit(pipeline).wait();

142
  diff_x->set_layout(DataLayout::kMKLDNN);
143
  diff_x->set_format(GetMKLDNNFormat(*diff_src_memory_p));
144 145 146 147
}

template <typename T, mkldnn::algorithm algorithm>
struct MKLDNNActivationFunc : public BaseActivationFunctor<T> {
148
  void operator()(const framework::ExecutionContext &ctx) const {
149 150 151 152 153 154
    eltwise_forward<T>(ctx, algorithm);
  }
};

template <typename T, mkldnn::algorithm algorithm>
struct MKLDNNActivationGradFunc : public BaseActivationFunctor<T> {
155
  void operator()(const framework::ExecutionContext &ctx) const {
156 157 158 159 160
    eltwise_grad<T>(ctx, algorithm);
  }
};

template <typename T>
T
tensor-tang 已提交
161
using ReluMKLDNNFunctor =
162 163 164
    MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_relu>;

template <typename T>
T
tensor-tang 已提交
165
using TanhMKLDNNFunctor =
166 167 168
    MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_tanh>;

template <typename T>
T
tensor-tang 已提交
169
using SqrtMKLDNNFunctor =
170 171 172
    MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_sqrt>;

template <typename T>
T
tensor-tang 已提交
173
using AbsMKLDNNFunctor =
174 175 176
    MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_abs>;

template <typename T>
T
tensor-tang 已提交
177
using ReluMKLDNNGradFunctor =
178 179 180
    MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_relu>;

template <typename T>
T
tensor-tang 已提交
181
using TanhMKLDNNGradFunctor =
182 183 184
    MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_tanh>;

template <typename T>
T
tensor-tang 已提交
185
using SqrtMKLDNNGradFunctor =
186 187 188
    MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_sqrt>;

template <typename T>
T
tensor-tang 已提交
189
using AbsMKLDNNGradFunctor =
190 191 192 193 194 195 196 197 198 199 200 201 202
    MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_abs>;
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

#define REGISTER_ACTIVATION_MKLDNN_KERNEL(act_type, functor, grad_functor) \
  REGISTER_OP_KERNEL(act_type, MKLDNN, ::paddle::platform::CPUPlace,       \
                     ops::MKLDNNActivationKernel<ops::functor<float>>);    \
  REGISTER_OP_KERNEL(                                                      \
      act_type##_grad, MKLDNN, ::paddle::platform::CPUPlace,               \
      ops::MKLDNNActivationGradKernel<ops::grad_functor<float>>);

A
Adam 已提交
203 204 205 206 207
#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro)                  \
  __macro(relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor);       \
  __macro(leaky_relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
  __macro(tanh, TanhMKLDNNFunctor, TanhMKLDNNGradFunctor);       \
  __macro(sqrt, SqrtMKLDNNFunctor, SqrtMKLDNNGradFunctor);       \
T
tensor-tang 已提交
208
  __macro(abs, AbsMKLDNNFunctor, AbsMKLDNNGradFunctor);
209 210

FOR_EACH_MKLDNN_KERNEL_FUNCTOR(REGISTER_ACTIVATION_MKLDNN_KERNEL);