softmax_mkldnn_op.cc 13.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include <iostream>
16 17
#include "mkldnn.hpp"
#include "paddle/fluid/operators/softmax_op.h"
J
Jacek Czaja 已提交
18
#include "paddle/fluid/platform/mkldnn_reuse.h"
19 20 21 22 23 24 25 26 27 28 29

namespace paddle {
namespace operators {

using paddle::framework::Tensor;
using paddle::platform::MKLDNNDeviceContext;
using paddle::platform::MKLDNNMemDesc;

using mkldnn::memory;  // Note: paddle has also "memory" namespace
using mkldnn::primitive;
using mkldnn::prop_kind;
F
fengjiayi 已提交
30 31
using mkldnn::softmax_backward;
using mkldnn::softmax_forward;
32
using mkldnn::stream;
J
Jacek Czaja 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
using platform::to_void_cast;

class SoftmaxMKLDNNHandler : public platform::MKLDNNHandler {
 public:
  SoftmaxMKLDNNHandler(
      std::shared_ptr<mkldnn::softmax_forward::primitive_desc> softmax_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        softmax_pd_(softmax_pd) {}

  SoftmaxMKLDNNHandler(
      std::shared_ptr<mkldnn::softmax_forward::primitive_desc> softmax_pd,
      std::shared_ptr<mkldnn::softmax_backward::primitive_desc> softmax_bwd_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        softmax_pd_(softmax_pd),
        softmax_bwd_pd_(softmax_bwd_pd) {
    // If we are in Grad operatgor then update a key with BWD suffix to
    // distinguish from FWD memory primitives
    key_ += "-BWD";
  }

  std::shared_ptr<mkldnn::softmax_forward> AcquireSoftmax(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    /*Generate key*/
    auto prim_key = key_ + "@softmax_p";

    auto softmax_p = std::static_pointer_cast<mkldnn::softmax_forward>(
        dev_ctx_.GetBlob(prim_key));
    PADDLE_ENFORCE((softmax_p != nullptr) || (is_reusing_ == false),
                   "Fail to find softmax primitive in device context");
    if (softmax_p == nullptr) {
      softmax_p = std::make_shared<mkldnn::softmax_forward>(
69
          *softmax_pd_, *(static_cast<mkldnn::memory*>(src_memory_p.get())),
J
Jacek Czaja 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
          *(static_cast<mkldnn::memory*>(dst_memory_p.get())));
      dev_ctx_.SetBlob(prim_key, softmax_p);
    } else {
      is_reusing_ = true;
    }

    return softmax_p;
  }

  std::shared_ptr<mkldnn::softmax_backward> AcquireSoftmaxBackward(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> diff_dst_memory_p,
      std::shared_ptr<mkldnn::memory> diff_src_memory_p) {
    auto prim_key = key_ + "@softmax_bwd_p";
    auto softmax_bwd_p = std::static_pointer_cast<mkldnn::softmax_backward>(
        dev_ctx_.GetBlob(prim_key));
    PADDLE_ENFORCE((softmax_bwd_p != nullptr) || (is_reusing_ == false),
                   "Fail to find softmax backward primitive in device context");
    if (softmax_bwd_p == nullptr) {
      softmax_bwd_p = std::make_shared<mkldnn::softmax_backward>(
90 91
          *softmax_bwd_pd_, *dst_memory_p, *diff_dst_memory_p,
          *diff_src_memory_p);
J
Jacek Czaja 已提交
92 93 94 95 96 97 98 99 100 101 102 103
      dev_ctx_.SetBlob(prim_key, softmax_bwd_p);
    } else {
      is_reusing_ = true;
    }

    return softmax_bwd_p;
  }

 private:
  std::shared_ptr<mkldnn::softmax_forward::primitive_desc> softmax_pd_;
  std::shared_ptr<mkldnn::softmax_backward::primitive_desc> softmax_bwd_pd_;
};
104 105 106 107 108 109 110 111 112

template <typename T>
class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
    PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()),
                   "It must use CPUPlace.");
    auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
    auto mkldnn_engine = dev_ctx.GetEngine();
D
dengkaipeng 已提交
113 114
    const Tensor* X = ctx.Input<Tensor>("X");
    Tensor* Out = ctx.Output<Tensor>("Out");
F
fengjiayi 已提交
115
    PADDLE_ENFORCE_EQ(
D
dengkaipeng 已提交
116
        X->dims(), Out->dims(),
F
fengjiayi 已提交
117 118
        "The shape of softmax's input and output must be identical.");

D
dengkaipeng 已提交
119 120 121
    const int axis = ctx.Attr<int>("axis");
    int rank = X->dims().size();

F
fengjiayi 已提交
122 123
    // make sure 'output' holds memory, which will be shared by
    // 'flattened_output' later.
D
dengkaipeng 已提交
124 125 126 127 128 129 130 131 132 133
    Out->mutable_data<T>(ctx.GetPlace());

    std::vector<int> perm, shape;
    CalcTransPermAndShapeByAxis(*X, axis, &perm, &shape);

    Tensor X_2d, Out_2d;
    Tensor X_trans, Out_trans;
    if (axis != -1 && axis != rank - 1) {
      X_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
      Out_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
D
dengkaipeng 已提交
134 135 136 137 138 139
      TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, *X, &X_trans, perm);
      TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, *Out, &Out_trans, perm);
      auto dims = X_trans.dims();
      auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
      X_2d.ShareDataWith(X_trans).Resize(flattened_dims);
      Out_2d.ShareDataWith(Out_trans).Resize(flattened_dims);
D
dengkaipeng 已提交
140
    } else {
D
dengkaipeng 已提交
141 142 143 144
      auto dims = X->dims();
      auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
      X_2d.ShareDataWith(*X).Resize(flattened_dims);
      Out_2d.ShareDataWith(*Out).Resize(flattened_dims);
D
dengkaipeng 已提交
145
    }
F
fengjiayi 已提交
146

D
dengkaipeng 已提交
147 148 149 150
    const T* input_data = X_2d.data<T>();
    T* output_data = Out_2d.mutable_data<T>(ctx.GetPlace());

    std::vector<int> src_tz = paddle::framework::vectorize2int(X_2d.dims());
F
fengjiayi 已提交
151
    std::vector<int> dst_tz = src_tz;
152 153
    // Same memory descriptor to be used for input and output
    memory::dims softmax_tz = {src_tz[0], src_tz[1]};
154
    // Generate keys for storing/retriving primitives for this operator
J
Jacek Czaja 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
    const std::string key =
        platform::MKLDNNHandler::GetHash(softmax_tz, ctx.op().Output("Out"));
    const std::string key_softmax_pd = key + "@softmax_pd";

    // Currently only NC data format is supported
    auto softmax_md = MKLDNNMemDesc(
        {softmax_tz}, platform::MKLDNNGetDataType<T>(), memory::format::nc);
    // Normalization is made after innermost dimension eg. C out of NC
    auto softmax_desc = softmax_forward::desc(prop_kind::forward_scoring,
                                              softmax_md, 1 /*dim: C*/);
    auto softmax_pd = std::make_shared<mkldnn::softmax_forward::primitive_desc>(
        softmax_desc, mkldnn_engine);
    dev_ctx.SetBlob(key_softmax_pd, softmax_pd);

    SoftmaxMKLDNNHandler handler(softmax_pd, dev_ctx, mkldnn_engine, key);
    auto softmax_src_memory_p =
        handler.AcquireSrcMemory(softmax_md, to_void_cast<T>(input_data));
    auto softmax_dst_memory_p =
        handler.AcquireDstMemory(softmax_md, to_void_cast<T>(output_data));
    auto softmax_p =
        handler.AcquireSoftmax(softmax_dst_memory_p, softmax_src_memory_p);
176

177 178 179
    // We cannot use softmax_dst_memory_p to get prim desc as
    // it contains flattened dims (2D) while output tensor can
    // have 2,3,4+ dims
D
dengkaipeng 已提交
180 181 182 183 184 185 186 187 188 189
    if (axis != -1 && axis != rank - 1) {
      auto output_mem_pd = paddle::platform::create_prim_desc_from_dims(
          shape, mkldnn::memory::format::blocked);
      Out_trans.set_mkldnn_prim_desc(output_mem_pd);
    } else {
      auto output_mem_pd = paddle::platform::create_prim_desc_from_dims(
          paddle::framework::vectorize2int(Out->dims()),
          mkldnn::memory::format::blocked);
      Out->set_mkldnn_prim_desc(output_mem_pd);
    }
190

191 192
    std::vector<primitive> pipeline{
        *(static_cast<softmax_forward::primitive*>(softmax_p.get()))};
193
    stream(stream::kind::eager).submit(pipeline).wait();
J
Jacek Czaja 已提交
194 195 196 197

    const bool is_test = ctx.Attr<bool>("is_test");
    if (!is_test) {
      T threshold = exp(-64);
198
      for (int i = 0; i < dst_tz[0] * dst_tz[1]; ++i) {
J
Jacek Czaja 已提交
199 200 201 202
        output_data[i] =
            output_data[i] < threshold ? threshold : output_data[i];
      }
    }
D
dengkaipeng 已提交
203 204

    if (axis != -1 && axis != rank - 1) {
D
dengkaipeng 已提交
205
      TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, Out_trans, Out, perm);
D
dengkaipeng 已提交
206
    }
207 208 209
  }
};

J
Jacek Czaja 已提交
210 211 212 213 214 215 216 217 218
template <typename T>
class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
    PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()),
                   "It must use CPUPlace.");

    auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
    auto mkldnn_engine = dev_ctx.GetEngine();
D
dengkaipeng 已提交
219 220 221
    const Tensor* Out = ctx.Input<Tensor>("Out");
    auto* dOut = ctx.template Input<Tensor>(framework::GradVarName("Out"));
    auto* dX =
J
Jacek Czaja 已提交
222 223
        ctx.template Output<framework::Tensor>(framework::GradVarName("X"));

F
fengjiayi 已提交
224
    PADDLE_ENFORCE_EQ(
D
dengkaipeng 已提交
225
        dOut->dims(), dX->dims(),
F
fengjiayi 已提交
226 227
        "The shape of softmax_grad's input and output must be identical.");

D
dengkaipeng 已提交
228 229 230
    const int axis = ctx.Attr<int>("axis");
    int rank = Out->dims().size();

F
fengjiayi 已提交
231 232
    // make sure 'dx' holds memory, which will be shared by 'flattened_dx'
    // later.
D
dengkaipeng 已提交
233 234 235 236 237 238 239 240 241 242 243
    dX->template mutable_data<T>(ctx.GetPlace());

    std::vector<int> perm, shape;
    CalcTransPermAndShapeByAxis(*dX, axis, &perm, &shape);

    Tensor dX_2d, Out_2d, dOut_2d;
    Tensor dX_trans, Out_trans, dOut_trans;
    if (axis != -1 && axis != rank - 1) {
      dX_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
      Out_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
      dOut_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
D
dengkaipeng 已提交
244 245 246 247 248 249 250 251
      TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, *dX, &dX_trans, perm);
      TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, *Out, &Out_trans, perm);
      TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, *dOut, &dOut_trans, perm);
      auto dims = dX_trans.dims();
      auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
      dX_2d.ShareDataWith(dX_trans).Resize(flattened_dims);
      Out_2d.ShareDataWith(Out_trans).Resize(flattened_dims);
      dOut_2d.ShareDataWith(dOut_trans).Resize(flattened_dims);
D
dengkaipeng 已提交
252
    } else {
D
dengkaipeng 已提交
253 254 255 256 257
      auto dims = dX->dims();
      auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
      dX_2d.ShareDataWith(*dX).Resize(flattened_dims);
      Out_2d.ShareDataWith(*Out).Resize(flattened_dims);
      dOut_2d.ShareDataWith(*dOut).Resize(flattened_dims);
D
dengkaipeng 已提交
258 259 260 261 262 263 264
    }

    const T* dst_data = Out_2d.data<T>();
    const T* diff_dst_ptr = dOut_2d.template data<T>();
    T* diff_src_ptr = dX_2d.template mutable_data<T>(ctx.GetPlace());

    std::vector<int> dst_tz = paddle::framework::vectorize2int(Out_2d.dims());
J
Jacek Czaja 已提交
265
    std::vector<int> src_tz(dst_tz);
F
fengjiayi 已提交
266

J
Jacek Czaja 已提交
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
    // Same memory descriptor to be used for input and output
    memory::dims softmax_tz = {src_tz[0], src_tz[1]};
    // Currently only supports NC data format
    // retrieve eltwise primitive desc from device context
    const std::string key =
        platform::MKLDNNHandler::GetHash(softmax_tz, ctx.op().Input("Out"));
    const std::string key_softmax_pd = key + "@softmax_pd";

    auto softmax_pd =
        std::static_pointer_cast<mkldnn::softmax_forward::primitive_desc>(
            dev_ctx.GetBlob(key_softmax_pd));
    PADDLE_ENFORCE(softmax_pd != nullptr,
                   "Fail to find softmax_pd in device context");

    // TODO(jczaja): Add layouts support when there is a need to do so
    // Two dimensional softmax does support NC format
    auto data_softmax_md = MKLDNNMemDesc(
        {softmax_tz}, platform::MKLDNNGetDataType<T>(), memory::format::nc);
    auto diff_softmax_md = MKLDNNMemDesc(
        {softmax_tz}, platform::MKLDNNGetDataType<T>(), memory::format::nc);
    // Normalization is made after innermost dimension eg. C out of NC
    auto softmax_bwd_desc =
        softmax_backward::desc(diff_softmax_md, data_softmax_md, 1 /* dim: C*/);
    auto softmax_bwd_pd =
        std::make_shared<mkldnn::softmax_backward::primitive_desc>(
            softmax_bwd_desc, mkldnn_engine, *softmax_pd);

    SoftmaxMKLDNNHandler handler(softmax_pd, softmax_bwd_pd, dev_ctx,
                                 mkldnn_engine, key);
    auto dst_memory_p =
        handler.AcquireDstMemory(data_softmax_md, to_void_cast<T>(dst_data));
    auto diff_dst_memory_p = handler.AcquireDiffDstMemory(
        diff_softmax_md, to_void_cast<T>(diff_dst_ptr));
    auto diff_src_memory_p = handler.AcquireDiffSrcMemory(
        diff_softmax_md, to_void_cast<T>(diff_src_ptr));

    // Get primitve from device context
    auto softmax_bwd_p = handler.AcquireSoftmaxBackward(
        dst_memory_p, diff_dst_memory_p, diff_src_memory_p);

    std::vector<primitive> pipeline{*softmax_bwd_p};
    stream(stream::kind::eager).submit(pipeline).wait();
D
dengkaipeng 已提交
309 310

    if (axis != -1 && axis != rank - 1) {
D
dengkaipeng 已提交
311
      TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, dX_trans, dX, perm);
D
dengkaipeng 已提交
312
    }
J
Jacek Czaja 已提交
313 314
  }
};
315 316 317 318 319 320 321
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

REGISTER_OP_KERNEL(softmax, MKLDNN, ::paddle::platform::CPUPlace,
                   ops::SoftmaxMKLDNNKernel<float>);
J
Jacek Czaja 已提交
322 323
REGISTER_OP_KERNEL(softmax_grad, MKLDNN, ::paddle::platform::CPUPlace,
                   ops::SoftmaxMKLDNNGradKernel<float>);