mkldnn_reuse.h 48.9 KB
Newer Older
J
Jacek Czaja 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

16
#include <memory>
17
#include <sstream>
J
Jacek Czaja 已提交
18
#include <string>
19
#include <utility>
J
Jacek Czaja 已提交
20
#include <vector>
21
#include "boost/optional.hpp"
X
xiaoli.liu@intel.com 已提交
22
#include "paddle/fluid/framework/data_layout_transform.h"
J
Jacek Czaja 已提交
23 24 25 26 27 28 29
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/mkldnn_helper.h"
#include "paddle/fluid/platform/place.h"

namespace paddle {
namespace platform {

30 31
using framework::DataLayout;
using framework::Tensor;
J
Jacek Czaja 已提交
32
using user_function = std::function<std::shared_ptr<float>(const float*)>;
33
using memory = mkldnn::memory;
J
Jacek Czaja 已提交
34

35 36
template <typename T, typename TForward,
          typename TBackward = mkldnn_dummy_primitive>
37 38 39 40 41 42 43 44 45 46
class MKLDNNHandlerT {
 public:
  MKLDNNHandlerT(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
                 platform::Place cpu_place, const std::string& base_key)
      : dev_ctx_(dev_ctx),
        engine_(engine),
        place_(cpu_place),
        key_common_(base_key),
        fwd_pd_(nullptr),
        bwd_pd_(nullptr) {
47 48
    if (platform::MKLDNNDeviceContext::tls().get_cur_mkldnn_session_id() !=
        platform::MKLDNNDeviceContextThreadLocals::kMKLDNNSessionID_Default) {
49 50 51 52 53 54
      key_ = key_common_;
    } else {
      key_ = key_common_ + "-t:" + ThreadIDasStr();
    }
  }

A
Adam 已提交
55
  std::shared_ptr<TForward> AcquireForwardPrimitive() {
56 57 58 59
    const std::string key_p = key_ + "@forward_p";
    auto forward_p =
        std::static_pointer_cast<TForward>(dev_ctx_.GetBlob(key_p));
    if (forward_p == nullptr) {
A
Adam 已提交
60
      forward_p = std::make_shared<TForward>(*fwd_pd_);
61 62 63 64 65
      dev_ctx_.SetBlob(key_p, forward_p);
    }
    return forward_p;
  }

A
Adam 已提交
66
  std::shared_ptr<TBackward> AcquireBackwardPrimitive() {
67 68 69 70
    const std::string key_p = key_ + "@backward_p";
    auto backward_p =
        std::static_pointer_cast<TBackward>(dev_ctx_.GetBlob(key_p));
    if (backward_p == nullptr) {
A
Adam 已提交
71
      backward_p = std::make_shared<TBackward>(*bwd_pd_);
72 73 74 75 76
      dev_ctx_.SetBlob(key_p, backward_p);
    }
    return backward_p;
  }

77 78 79
  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
80 81
    return this->AcquireMemoryFromPrimitive(
        fwd_pd_->src_desc(), to_void_cast<T>(input_data), "@src_mem_p");
82 83 84
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output) {
A
Adam 已提交
85 86
    T* ptr = output->mutable_data<T>(place_, fwd_pd_->dst_desc().get_size());
    return this->AcquireMemoryFromPrimitive(fwd_pd_->dst_desc(), ptr,
87 88 89 90 91 92
                                            "@dst_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
      const framework::Tensor* output) {
    const T* output_data = output->data<T>();
A
Adam 已提交
93 94
    return this->AcquireMemoryFromPrimitive(
        bwd_pd_->dst_desc(), to_void_cast<T>(output_data), "@bwd-dst_mem_p");
95 96 97 98 99
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemory(
      const framework::Tensor* diffdst) {
    const T* ptr = diffdst->data<T>();
A
Adam 已提交
100 101
    return this->AcquireMemoryFromPrimitive(
        bwd_pd_->diff_dst_desc(), to_void_cast<T>(ptr), "@diff_dst_mem_p");
102 103 104 105
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemory(
      framework::Tensor* diffsrc) {
A
Adam 已提交
106 107 108 109
    T* ptr =
        diffsrc->mutable_data<T>(place_, bwd_pd_->diff_src_desc().get_size());
    return this->AcquireMemoryFromPrimitive(bwd_pd_->diff_src_desc(), ptr,
                                            "@diff_src_mem_p");
110 111
  }

112
 protected:
113 114 115 116 117 118 119
  bool isCached() {
    const std::string key_pd = key_common_ + "@forward_pd";
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    return (fwd_pd_ != nullptr);
  }

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
  template <typename... Args>
  void AcquireForwardPrimitiveDescriptor(Args&&... args) {
    // Forward PD has to be passed to Grad op that
    // may be executed by diffrent thread, hence
    // for that one we use key that does not contain TID
    const std::string key_pd = key_common_ + "@forward_pd";
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (fwd_pd_ == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);
      fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
          dev_ctx_.GetBlob(key_pd));
      if (fwd_pd_ == nullptr) {
        auto fwd_desc = typename TForward::desc(std::forward<Args>(args)...);
        fwd_pd_ = std::make_shared<typename TForward::primitive_desc>(fwd_desc,
                                                                      engine_);
        dev_ctx_.SetBlob(key_pd, fwd_pd_);
      }
    }
  }

  template <typename... Args>
  void AcquireBackwardPrimitiveDescriptor(Args&&... args) {
145 146 147
    const std::string key_fwd_pd = key_common_ + "@forward_pd";
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_fwd_pd));
148 149 150 151 152 153 154 155 156 157 158 159
    PADDLE_ENFORCE_NOT_NULL(fwd_pd_);
    const std::string key_pd = key_ + "@backward_pd";
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (bwd_pd_ == nullptr) {
      auto bwd_desc = typename TBackward::desc(std::forward<Args>(args)...);
      bwd_pd_ = std::make_shared<typename TBackward::primitive_desc>(
          bwd_desc, engine_, *fwd_pd_);
      dev_ctx_.SetBlob(key_pd, bwd_pd_);
    }
  }

160
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
A
Adam 已提交
161
      mkldnn::memory::desc md, void* ptr, const std::string& suffix) {
162 163 164 165
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
166
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

  const MKLDNNDeviceContext& dev_ctx_;
  mkldnn::engine engine_;
  platform::Place place_;
  std::string key_;
  std::string key_common_;
  std::shared_ptr<typename TForward::primitive_desc> fwd_pd_;
  std::shared_ptr<typename TBackward::primitive_desc> bwd_pd_;
};

// TODO(grygielski) this class will be deleted later.
J
Jacek Czaja 已提交
184 185 186 187
class MKLDNNHandler {
 public:
  MKLDNNHandler(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
                const std::string& base_key)
188
      : dev_ctx_(dev_ctx), engine_(engine), key_common_(base_key) {
189 190
    if (platform::MKLDNNDeviceContext::tls().get_cur_mkldnn_session_id() !=
        platform::MKLDNNDeviceContextThreadLocals::kMKLDNNSessionID_Default) {
191
      key_ = key_common_;
192
    } else {
A
Adam 已提交
193
      key_ = key_common_ + "-t:" + ThreadIDasStr();
194
    }
195
  }
J
Jacek Czaja 已提交
196 197 198 199 200 201 202 203 204 205 206

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_src_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_dst_mem_p");
  }

A
Adam 已提交
207
  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemory(
J
Jacek Czaja 已提交
208
      const mkldnn::memory::desc& md, void* ptr) {
A
Adam 已提交
209
    return this->AcquireMemory(md, ptr, "@user_diff_src_mem_p");
J
Jacek Czaja 已提交
210 211
  }

A
Adam 已提交
212
  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemory(
J
Jacek Czaja 已提交
213
      const mkldnn::memory::desc& md, void* ptr) {
A
Adam 已提交
214
    return this->AcquireMemory(md, ptr, "@user_diff_dst_mem_p");
J
Jacek Czaja 已提交
215 216 217
  }

  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
A
Adam 已提交
218
      mkldnn::memory::desc md, void* ptr, const std::string& suffix) {
J
Jacek Czaja 已提交
219 220 221 222
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
223
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
J
Jacek Czaja 已提交
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

  // This incarnation of AcquireMemory can call user function eg. custom reorder
  // or preprocessing routine if needed
  std::shared_ptr<mkldnn::memory> AcquireMemory(
      const mkldnn::memory::desc& md, void* ptr, const std::string& suffix,
      user_function custom_func = {}) {
    /*Generate key*/
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      // Call custom reorder/preprocessing func if available
      if (custom_func) {
        auto reordered_data = custom_func(reinterpret_cast<const float*>(ptr));
        dev_ctx_.SetBlob(local_key + "-custom_reorder", reordered_data);
        ptr = reinterpret_cast<void*>(reordered_data.get());
      }

A
Adam 已提交
248
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
J
Jacek Czaja 已提交
249 250 251 252 253 254 255
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

256
  std::shared_ptr<mkldnn::memory> AcquireMemory(
A
Adam 已提交
257
      const std::vector<int64_t>& dims, const mkldnn::memory::data_type dtype,
258
      const MKLDNNMemoryFormat& fmt, void* ptr, const std::string& suffix) {
259 260 261 262 263 264 265
    /*Generate key*/
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      auto md = mkldnn::memory::desc(dims, dtype, fmt);

A
Adam 已提交
266
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
267 268 269 270 271 272 273
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

J
Jacek Czaja 已提交
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
  std::shared_ptr<mkldnn::memory> AcquireMemory(
      const std::shared_ptr<mkldnn::memory>& user_memory_p,
      const std::shared_ptr<mkldnn::memory>& target_memory_p,
      const std::string& suffix,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
    auto local_key = key_ + suffix;
    auto key_reorder_p = key_ + suffix + "reorder_p";

    auto stored_reorder_p = std::static_pointer_cast<mkldnn::reorder>(
        dev_ctx_.GetBlob(key_reorder_p));

    if (stored_reorder_p) {
      pipeline.push_back(*stored_reorder_p);
    } else {
      auto reorder_p =
          std::make_shared<mkldnn::reorder>(*user_memory_p, *target_memory_p);
      dev_ctx_.SetBlob(key_reorder_p, reorder_p);
A
Adam 已提交
291 292 293 294
      mkldnn::stream astream(engine_);
      reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                   {MKLDNN_ARG_TO, *target_memory_p}});
      astream.wait();
J
Jacek Czaja 已提交
295 296 297 298 299 300
    }

    return target_memory_p;
  }

  std::shared_ptr<mkldnn::memory> AcquireMemory(
A
Adam 已提交
301 302
      mkldnn::memory::desc& md,       // NOLINT
      mkldnn::memory::desc& user_md,  // NOLINT
J
Jacek Czaja 已提交
303 304 305
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      const std::string& suffix,
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
306 307
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f}, int mask = 0) {
J
Jacek Czaja 已提交
308 309 310 311 312 313
    // create reorder primitive if the input format is not the preferred one
    auto local_key = key_ + suffix;
    auto key_reorder_p = key_ + suffix + "reorder_p";

    auto target_memory_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
A
Adam 已提交
314 315 316

    mkldnn::stream astream(engine_);

J
Jacek Czaja 已提交
317 318
    if (target_memory_p == nullptr) {
      target_memory_p = user_memory_p;
A
Adam 已提交
319 320 321
      if (md != user_md) {
        target_memory_p = std::make_shared<mkldnn::memory>(md, engine_);
        std::shared_ptr<mkldnn::reorder::primitive_desc> reorder_pd;
322 323 324 325 326
        if (is_INT8) {
          mkldnn::primitive_attr
              attri;  // attribute for int8 weights and bias data reorder.
          attri.set_output_scales(mask, scale_data);

A
Adam 已提交
327 328 329
          reorder_pd = std::shared_ptr<mkldnn::reorder::primitive_desc>(
              new mkldnn::reorder::primitive_desc(*user_memory_p,
                                                  *target_memory_p, attri));
330
        } else {
A
Adam 已提交
331 332 333
          reorder_pd = std::shared_ptr<mkldnn::reorder::primitive_desc>(
              new mkldnn::reorder::primitive_desc(*user_memory_p,
                                                  *target_memory_p));
334
        }
A
Adam 已提交
335 336
        auto reorder_p =
            std::shared_ptr<mkldnn::reorder>(new mkldnn::reorder(*reorder_pd));
J
Jacek Czaja 已提交
337
        dev_ctx_.SetBlob(key_reorder_p, reorder_p);
A
Adam 已提交
338 339 340 341

        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
J
Jacek Czaja 已提交
342 343 344 345 346 347 348
      }
      dev_ctx_.SetBlob(local_key, target_memory_p);
    } else if (!is_persistent) {
      // Make reorder if needed
      auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
          dev_ctx_.GetBlob(key_reorder_p));
      if (reorder_p != nullptr) {
A
Adam 已提交
349 350 351
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
J
Jacek Czaja 已提交
352 353 354 355 356 357 358 359 360
      }
    }
    return target_memory_p;
  }

 protected:
  const MKLDNNDeviceContext& dev_ctx_;
  mkldnn::engine engine_;
  std::string key_;
361
  std::string key_common_;
J
Jacek Czaja 已提交
362 363
};

364 365 366
template <typename T>
class BinaryMKLDNNHandler : public platform::MKLDNNHandlerT<T, dnnl::binary> {
 public:
367 368 369 370
  BinaryMKLDNNHandler(const MKLDNNDeviceContext& dev_ctx,
                      const mkldnn::engine engine, platform::Place cpu_place,
                      const Tensor* x, const Tensor* y, Tensor* z,
                      const std::string uniq_name)
371
      : platform::MKLDNNHandlerT<T, dnnl::binary>(
372 373
            dev_ctx, engine, cpu_place,
            platform::CreateKey(framework::vectorize(x->dims()), uniq_name)) {
374 375 376 377 378 379 380
    // bradcasting combined with in-place may require longer key
    auto rankdiff = x->dims().size() - y->dims().size();
    if (rankdiff > 0) {
      this->key_ += std::to_string(rankdiff);
      this->key_common_ += std::to_string(rankdiff);
    }

381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
    if (!this->isCached()) {
      PADDLE_ENFORCE_EQ(
          x->layout(), DataLayout::kMKLDNN,
          platform::errors::InvalidArgument("Wrong layout set for X tensor"));
      PADDLE_ENFORCE_NE(
          x->format(), MKLDNNMemoryFormat::undef,
          platform::errors::InvalidArgument("Wrong format set for X tensor"));

      PADDLE_ENFORCE_EQ(
          y->layout(), DataLayout::kMKLDNN,
          platform::errors::InvalidArgument("Wrong layout set for Y tensor"));
      PADDLE_ENFORCE_NE(
          y->format(), MKLDNNMemoryFormat::undef,
          platform::errors::InvalidArgument("Wrong format set for Y tensor"));

      const auto src_x_tz = framework::vectorize(x->dims());
      const auto src_y_tz = framework::vectorize(y->dims());
      const auto dst_tz = framework::vectorize(z->dims());

      const auto src0_md = dnnl::memory::desc(
          src_x_tz, platform::MKLDNNGetDataType<T>(), x->format());
402
      auto src1_md = dnnl::memory::desc(
403
          src_y_tz, platform::MKLDNNGetDataType<T>(), y->format());
404 405 406 407 408 409
      if (rankdiff > 0) {
        std::vector<int64_t> ones(rankdiff, 1);
        std::vector<int64_t> dims1_ex(src_y_tz);
        dims1_ex.insert(dims1_ex.begin(), ones.begin(), ones.end());
        src1_md = src1_md.reshape(dims1_ex);
      }
410 411 412 413 414 415
      const auto dst_md = memory::desc(dst_tz, platform::MKLDNNGetDataType<T>(),
                                       MKLDNNMemoryFormat::any);

      this->AcquireForwardPrimitiveDescriptor(dnnl::algorithm::binary_add,
                                              src0_md, src1_md, dst_md);
    }
416 417 418 419 420 421
  }

  std::shared_ptr<mkldnn::memory> AcquireSecondSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
    return this->AcquireMemoryFromPrimitive(
422
        this->fwd_pd_->src1_desc(), to_void_cast<T>(input_data), "@src1_mem_p");
423 424 425
  }
};

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
class SumMKLDNNHandler : public MKLDNNHandler {
 public:
  SumMKLDNNHandler(const platform::MKLDNNDeviceContext& dev_ctx,
                   mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {}

  std::shared_ptr<mkldnn::sum::primitive_desc> AcquireSumPrimitiveDescriptor(
      const std::vector<std::shared_ptr<mkldnn::memory>>& src_mems,
      const std::vector<float>& scales, const mkldnn::memory::desc& dst_md) {
    const std::string key_sum_pd = key_ + "@sum_pd";

    sum_pd_ = std::static_pointer_cast<mkldnn::sum::primitive_desc>(
        dev_ctx_.GetBlob(key_sum_pd));
    if (sum_pd_ == nullptr) {
      // Get vector of inputs primitive descriptors
A
Adam 已提交
441
      std::vector<mkldnn::memory::desc> src_ds;
442
      for (auto& input_mem : src_mems) {
A
Adam 已提交
443
        src_ds.push_back(input_mem->get_desc());
444 445
      }

A
Adam 已提交
446 447
      sum_pd_.reset(
          new mkldnn::sum::primitive_desc(dst_md, scales, src_ds, engine_));
448 449 450 451 452 453 454
      dev_ctx_.SetBlob(key_sum_pd, sum_pd_);
    }

    return sum_pd_;
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromPrimitive(void* ptr) {
A
Adam 已提交
455
    return this->AcquireMemoryFromPrimitive(sum_pd_->dst_desc(), ptr,
456 457 458
                                            "@dst_mem_p");
  }

A
Adam 已提交
459 460 461 462 463
  std::shared_ptr<mkldnn::memory> AcquireSecondSrcMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_src2_mem_p");
  }

A
Adam 已提交
464
  std::shared_ptr<mkldnn::sum> AcquireSum() {
465 466 467 468
    auto prim_key = key_ + "@sum_p";
    auto sum_p =
        std::static_pointer_cast<mkldnn::sum>(dev_ctx_.GetBlob(prim_key));
    if (sum_p == nullptr) {
A
Adam 已提交
469
      sum_p = std::make_shared<mkldnn::sum>(*sum_pd_);
470 471 472 473 474 475 476 477 478
      dev_ctx_.SetBlob(prim_key, sum_p);
    }
    return sum_p;
  }

 private:
  std::shared_ptr<mkldnn::sum::primitive_desc> sum_pd_;
};

479
template <typename T>
480 481 482
class ActivationMKLDNNHandler
    : public MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                            mkldnn::eltwise_backward> {
483
 public:
A
Adam 已提交
484
  ActivationMKLDNNHandler(const std::vector<int64_t>& dims,
485
                          mkldnn::algorithm algorithm, float alpha, float beta,
486
                          const MKLDNNMemoryFormat fmt,
487 488 489 490
                          const platform::MKLDNNDeviceContext& dev_ctx,
                          platform::Place cpu_place,
                          const std::string& unique_name)

491 492 493
      : platform::MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                                 mkldnn::eltwise_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
494
            platform::CreateKey(dims, "a", algorithm, unique_name)) {
495 496
    auto md = mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);

497 498
    this->AcquireForwardPrimitiveDescriptor(mkldnn::prop_kind::forward_training,
                                            algorithm, md, alpha, beta);
499
  }
500

A
Adam 已提交
501
  ActivationMKLDNNHandler(const std::vector<int64_t>& dims,
502 503 504 505 506 507 508
                          mkldnn::algorithm algorithm, float alpha, float beta,
                          const MKLDNNMemoryFormat fmt,
                          const MKLDNNMemoryFormat diff_fmt,
                          const platform::MKLDNNDeviceContext& dev_ctx,
                          platform::Place cpu_place,
                          const std::string& unique_name)

509 510 511
      : platform::MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                                 mkldnn::eltwise_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
512
            platform::CreateKey(dims, "a", algorithm, unique_name)) {
513 514 515 516 517 518 519
    auto diff_dst_md = platform::MKLDNNMemDesc(
        dims, platform::MKLDNNGetDataType<T>(), diff_fmt);
    auto src_md =
        platform::MKLDNNMemDesc(dims, platform::MKLDNNGetDataType<T>(), fmt);

    this->AcquireBackwardPrimitiveDescriptor(algorithm, diff_dst_md, src_md,
                                             alpha, beta);
520
  }
521

522 523 524
  std::shared_ptr<mkldnn::memory> AcquireBackwardSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
525
    return this->AcquireMemoryFromPrimitive(this->bwd_pd_->src_desc(),
526 527
                                            to_void_cast<T>(input_data),
                                            "@bwd-src_mem_p");
528 529 530
  }
};

J
Jacek Czaja 已提交
531 532 533
template <typename T>
class LRNMKLDNNHandler
    : public MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward> {
534
 public:
A
Adam 已提交
535 536
  LRNMKLDNNHandler(const std::vector<int64_t>& dims, const int n,
                   const float alpha, const float beta, const float k,
J
Jacek Czaja 已提交
537 538 539
                   const MKLDNNMemoryFormat fmt, bool is_test,
                   const platform::MKLDNNDeviceContext& dev_ctx,
                   platform::Place cpu_place, const std::string& unique_name)
540

J
Jacek Czaja 已提交
541 542
      : platform::MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
543
            platform::CreateKey(dims, unique_name)) {
J
Jacek Czaja 已提交
544 545 546 547 548
    auto src_md =
        mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);
    this->AcquireForwardPrimitiveDescriptor(
        is_test ? mkldnn::prop_kind::forward_inference
                : mkldnn::prop_kind::forward_training,
A
Adam 已提交
549
        mkldnn::algorithm::lrn_across_channels, src_md, n, alpha, beta, k);
550 551
  }

A
Adam 已提交
552 553
  LRNMKLDNNHandler(const std::vector<int64_t>& dims, const int n,
                   const float alpha, const float beta, const float k,
J
Jacek Czaja 已提交
554 555 556 557
                   const MKLDNNMemoryFormat fmt,
                   const MKLDNNMemoryFormat diff_fmt,
                   const platform::MKLDNNDeviceContext& dev_ctx,
                   platform::Place cpu_place, const std::string& unique_name)
558

J
Jacek Czaja 已提交
559 560
      : platform::MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
561
            platform::CreateKey(dims, unique_name)) {
J
Jacek Czaja 已提交
562 563 564 565
    auto src_md =
        mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);
    auto diff_md =
        mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), diff_fmt);
566

J
Jacek Czaja 已提交
567
    this->AcquireBackwardPrimitiveDescriptor(
A
Adam 已提交
568 569
        mkldnn::algorithm::lrn_across_channels, src_md, diff_md, n, alpha, beta,
        k);
570 571
  }

J
Jacek Czaja 已提交
572 573 574
  std::shared_ptr<mkldnn::memory> AcquireWorkspaceMemory(
      framework::Tensor* workspace) {
    T* ptr = workspace->mutable_data<T>(
A
Adam 已提交
575 576 577
        this->place_, this->fwd_pd_->workspace_desc().get_size());
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->workspace_desc(),
                                            ptr, "@wrk_mem_p");
J
Jacek Czaja 已提交
578 579 580 581 582
  }

  std::shared_ptr<mkldnn::memory> AcquireBackwardWorkspaceMemory(
      const framework::Tensor* workspace) {
    const T* workspace_data = workspace->data<T>();
A
Adam 已提交
583 584 585
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->workspace_desc(),
                                            to_void_cast<T>(workspace_data),
                                            "@bwd-wrk_mem_p");
J
Jacek Czaja 已提交
586
  }
587 588
};

589 590 591
template <typename T>
class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
                                                   mkldnn::pooling_backward> {
592
 public:
593
  PoolingMKLDNNHandler(
A
Adam 已提交
594 595 596 597 598
      const std::vector<int64_t>& src_dims,
      const std::vector<int64_t>& dst_dims, const std::vector<int64_t>& ksize,
      const std::vector<int64_t>& strides, const std::vector<int64_t>& paddings,
      const std::string& pooling_type, bool ceil_mode,
      const MKLDNNMemoryFormat fmt, mkldnn::memory::data_type dt, bool is_test,
599
      const platform::MKLDNNDeviceContext& dev_ctx, platform::Place cpu_place,
600
      const std::string& unique_name, bool exclude_padding)
601 602 603
      : platform::MKLDNNHandlerT<T, mkldnn::pooling_forward,
                                 mkldnn::pooling_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
604
            platform::CreateKey(src_dims, dt, unique_name)) {
605 606 607 608 609 610 611 612
    auto src_md = mkldnn::memory::desc(src_dims, dt, fmt);
    /* create memory descriptor for pooling without specified format
     * ('any') which lets a primitive (pooling in this case) choose
     * the memory format preferred for best performance
     */
    auto dst_md =
        platform::MKLDNNMemDesc(dst_dims, dt, MKLDNNMemoryFormat::any);

613 614
    auto mkldnn_paddings = ToMkldnnPadding(paddings);

615 616
    if (ceil_mode) {
      CorrectOutputSize(src_dims, dst_dims, ksize, paddings, strides,
617
                        mkldnn_paddings[1]);
618
    }
619 620 621
    this->AcquireForwardPrimitiveDescriptor(
        is_test ? mkldnn::prop_kind::forward_inference
                : mkldnn::prop_kind::forward_training,
622 623 624 625 626
        pooling_type == "max"
            ? mkldnn::algorithm::pooling_max
            : (exclude_padding
                   ? mkldnn::algorithm::pooling_avg_exclude_padding
                   : mkldnn::algorithm::pooling_avg_include_padding),
A
Adam 已提交
627
        src_md, dst_md, strides, ksize, mkldnn_paddings[0], mkldnn_paddings[1]);
628 629 630
  }

  PoolingMKLDNNHandler(
A
Adam 已提交
631 632 633 634 635 636
      const std::vector<int64_t>& diff_dst_dims,
      const std::vector<int64_t>& diff_src_dims,
      const std::vector<int64_t>& ksize, const std::vector<int64_t>& strides,
      const std::vector<int64_t>& paddings, const std::string& pooling_type,
      bool ceil_mode, const MKLDNNMemoryFormat fmt,
      const MKLDNNMemoryFormat diff_dst_fmt, mkldnn::memory::data_type dt,
637
      const platform::MKLDNNDeviceContext& dev_ctx, platform::Place cpu_place,
638
      const std::string& unique_name, bool exclude_padding)
639 640 641
      : platform::MKLDNNHandlerT<T, mkldnn::pooling_forward,
                                 mkldnn::pooling_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
642
            platform::CreateKey(diff_src_dims, dt, unique_name)) {
643 644 645 646 647 648
    auto diff_dst_md = mkldnn::memory::desc(
        diff_dst_dims, platform::MKLDNNGetDataType<T>(), diff_dst_fmt);
    auto diff_src_md =
        mkldnn::memory::desc(diff_src_dims, platform::MKLDNNGetDataType<T>(),
                             MKLDNNMemoryFormat::any);

649 650
    auto mkldnn_paddings = ToMkldnnPadding(paddings);

651
    this->AcquireBackwardPrimitiveDescriptor(
652 653 654 655 656
        pooling_type == "max"
            ? mkldnn::algorithm::pooling_max
            : (exclude_padding
                   ? mkldnn::algorithm::pooling_avg_exclude_padding
                   : mkldnn::algorithm::pooling_avg_include_padding),
657
        diff_src_md, diff_dst_md, strides, ksize, mkldnn_paddings[0],
A
Adam 已提交
658
        mkldnn_paddings[1]);
659 660 661
  }

  std::shared_ptr<mkldnn::memory> AcquireWorkspaceMemory(void) {
A
Adam 已提交
662
    mkldnn::memory::desc workspace_md = this->fwd_pd_->workspace_desc();
663 664 665
    // Pooling PD has to be passed to Grad op that
    // may be executed by diffrent thread, hence
    // for that one we use key that does not contain TID
666 667 668
    auto local_key = this->key_common_ + "@workspace";
    auto mem_p = std::static_pointer_cast<mkldnn::memory>(
        this->dev_ctx_.GetBlob(local_key));
669 670 671 672
    if (mem_p == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);
673 674
      mem_p = std::static_pointer_cast<mkldnn::memory>(
          this->dev_ctx_.GetBlob(local_key));
675
      if (mem_p == nullptr) {
A
Adam 已提交
676
        mem_p = std::make_shared<mkldnn::memory>(workspace_md, this->engine_);
677
        this->dev_ctx_.SetBlob(local_key, mem_p);
678 679 680 681 682 683 684 685 686 687 688 689
      }
    }
    return mem_p;
  }

 private:
  static inline int ComputeCeiledOutput(int input_size, int kernel_size,
                                        int padding, int stride) {
    return (input_size - kernel_size + 2 * padding) / stride + 1;
  }

  static inline void CorrectOutputSize(
A
Adam 已提交
690 691 692 693
      const std::vector<int64_t>& src_tz, const std::vector<int64_t>& dst_tz,
      const std::vector<int64_t>& kernel_size,
      const std::vector<int64_t>& paddings, const std::vector<int64_t>& strides,
      std::vector<int64_t>& right_bot_padding) {  // NOLINT
694 695 696 697
    for (size_t i = 0; i < right_bot_padding.size(); i++) {
      int desired_size = ComputeCeiledOutput(src_tz[i + 2], kernel_size[i],
                                             paddings[i], strides[i]);
      if (desired_size != dst_tz[i + 2]) {
J
Jacek Czaja 已提交
698
        right_bot_padding[i] += strides[i] - 1;
699 700 701 702 703
      }
    }
  }
};

704
template <typename T>
705 706
class TransposeMKLDNNHandler : public MKLDNNHandler {
 public:
A
Adam 已提交
707 708
  TransposeMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
                         std::vector<int>& axis,      // NOLINT
709 710 711 712
                         const platform::MKLDNNDeviceContext& dev_ctx,
                         mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
713 714 715 716
        axis_(axis),
        logical_axis_(dims.size(), 0) {}

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
717
      const MKLDNNMemoryFormat& fmt, void* ptr) {
718 719 720 721 722 723 724 725 726
    auto local_key = key_ + "@user_src_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      // Make memory descriptor using input format, unless it
      // cannot be trusted (nchw) then make up memory fmt manually
      for (size_t i = 0; i < logical_axis_.size(); ++i) {
        logical_axis_[i] = i;
      }
727

A
Adam 已提交
728
      auto src_md = fmt != MKLDNNMemoryFormat::nchw
729
                        ? platform::MKLDNNMemDesc(
730
                              dims_, platform::MKLDNNGetDataType<T>(), fmt)
731
                        : Axis2MemoryDesc(dims_, logical_axis_);
A
Adam 已提交
732
      mem_p = std::make_shared<mkldnn::memory>(src_md, engine_, ptr);
733 734 735 736 737 738
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }
739 740 741 742 743 744 745

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output,
                                                   platform::Place place) {
    auto local_key = key_ + "@user_dst_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
746
      auto dst_md = Axis2MemoryDesc(dims_, axis_);
747

A
Adam 已提交
748
      auto dst_data = output->mutable_data<T>(place, dst_md.get_size());
749

A
Adam 已提交
750
      mem_p = std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
751 752
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
753
      auto dst_data = output->mutable_data<T>(place);
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
      mem_p->set_data_handle(dst_data);
    }
    return mem_p;
  }

  std::shared_ptr<mkldnn::reorder> AcquireTranspose(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    auto prim_key = key_ + "@transpose_p";
    auto transpose_p =
        std::static_pointer_cast<mkldnn::reorder>(dev_ctx_.GetBlob(prim_key));
    if (transpose_p == nullptr) {
      transpose_p =
          std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
      dev_ctx_.SetBlob(prim_key, transpose_p);
    }
    return transpose_p;
  }

 protected:
A
Adam 已提交
774 775 776 777
  mkldnn::memory::desc Axis2MemoryDesc(std::vector<int64_t>& nchw_tz,  // NOLINT
                                       std::vector<int>& axis          // NOLINT
                                       ) {
    size_t ndims = axis.size();
778

A
Adam 已提交
779
    std::vector<int64_t> strides(ndims);
780
    unsigned int total_stride = 1;
A
Adam 已提交
781 782
    for (int i = ndims - 1; i >= 0; --i) {
      strides[axis[i]] = total_stride;
783 784
      total_stride *= nchw_tz[axis[i]];
    }
A
Adam 已提交
785 786 787 788
    mkldnn::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType<T>(),
                               strides);

    return mem_d;
789 790 791
  }

 private:
A
Adam 已提交
792
  std::vector<int64_t> dims_;
793
  std::vector<int> axis_;
794
  std::vector<int> logical_axis_;
795 796
};

797 798
class ReorderMKLDNNHandler : public MKLDNNHandler {
 public:
A
Adam 已提交
799
  ReorderMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
800 801 802 803 804 805 806 807 808 809
                       framework::proto::VarType::Type vtype,
                       mkldnn::memory::data_type dtype,
                       const platform::MKLDNNDeviceContext& dev_ctx,
                       mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
        vtype_(vtype),
        dtype_(dtype) {}

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
810
      const MKLDNNMemoryFormat& fmt, void* ptr) {
811
    return this->AcquireMemory(dims_, dtype_, fmt, ptr, "@user_src_mem_p");
812 813 814
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
815
      framework::Tensor* output, const MKLDNNMemoryFormat& fmt,
816 817 818 819 820 821 822 823 824
      platform::Place place) {
    auto local_key = key_ + "@user_dst_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      auto dst_md = platform::MKLDNNMemDesc(dims_, dtype_, fmt);

      auto dst_data = output->mutable_data(place, vtype_);

A
Adam 已提交
825
      mem_p = std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      auto dst_data = output->mutable_data(place, vtype_);
      mem_p->set_data_handle(dst_data);
    }
    return mem_p;
  }

  std::shared_ptr<mkldnn::reorder> AcquireReorder(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    auto prim_key = key_ + "@reorder_p";
    auto reorder_p =
        std::static_pointer_cast<mkldnn::reorder>(dev_ctx_.GetBlob(prim_key));
    if (reorder_p == nullptr) {
      reorder_p =
          std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
      dev_ctx_.SetBlob(prim_key, reorder_p);
    }
    return reorder_p;
  }

 private:
A
Adam 已提交
849
  std::vector<int64_t> dims_;
850 851 852 853
  framework::proto::VarType::Type vtype_;
  mkldnn::memory::data_type dtype_;
};

854 855 856 857 858 859 860 861 862 863 864 865 866 867
template <typename T>
struct convolutional_algorithm;

template <>
struct convolutional_algorithm<mkldnn::convolution_forward> {
  static constexpr mkldnn::algorithm T = mkldnn::algorithm::convolution_direct;
};

template <>
struct convolutional_algorithm<mkldnn::deconvolution_forward> {
  static constexpr mkldnn::algorithm T =
      mkldnn::algorithm::deconvolution_direct;
};

J
Jacek Czaja 已提交
868 869 870
template <class forward_t, class backward_data_t, class backward_weights_t>
class ConvMKLDNNTemplateHandler : public MKLDNNHandler {
 public:
871 872 873 874
  ConvMKLDNNTemplateHandler(const platform::MKLDNNDeviceContext& dev_ctx,
                            mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {}

875 876 877 878 879 880 881 882 883
  // TODO(jczaja): remove after conv int8 is adapted
  ConvMKLDNNTemplateHandler(
      std::shared_ptr<typename forward_t::primitive_desc> conv_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {
    conv_pd_ = conv_pd;
  }

J
Jacek Czaja 已提交
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
  ConvMKLDNNTemplateHandler(
      std::shared_ptr<typename forward_t::primitive_desc> conv_pd,
      std::shared_ptr<typename backward_data_t::primitive_desc>
          conv_bwd_data_pd,
      std::shared_ptr<typename backward_weights_t::primitive_desc>
          conv_bwd_weights_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        conv_pd_(conv_pd),
        conv_bwd_weights_pd_(conv_bwd_weights_pd),
        conv_bwd_data_pd_(conv_bwd_data_pd) {
    // If we are in Grad operatgor then update a key with BWD suffix to
    // distinguish from FWD memory primitives
    key_ += "-BWD";
  }

A
Adam 已提交
901
  size_t GetDstMemorySize() const { return conv_pd_->dst_desc().get_size(); }
J
Jacek Czaja 已提交
902

903
  MKLDNNMemoryFormat GetDstFormat() const {
A
Adam 已提交
904
    return paddle::platform::GetMKLDNNFormat(conv_pd_->dst_desc());
J
Jacek Czaja 已提交
905 906 907
  }

  size_t GetDiffWeightsMemorySize() const {
A
Adam 已提交
908
    return conv_bwd_weights_pd_->diff_weights_desc().get_size();
J
Jacek Czaja 已提交
909 910 911
  }

  size_t GetDiffSourceMemorySize() const {
A
Adam 已提交
912
    return conv_bwd_data_pd_->diff_src_desc().get_size();
J
Jacek Czaja 已提交
913 914 915 916 917
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromWeightsPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
918 919
    auto src_pd = conv_bwd_weights_pd_->src_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
920 921 922 923 924 925 926
    return this->AcquireMemory(src_pd, user_pd, user_memory_p,
                               "@weights-src_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromWeightsPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
927 928
    auto diff_dst_pd = conv_bwd_weights_pd_->diff_dst_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
929 930 931 932 933 934 935
    return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
                               "@weights-diff_dst_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffWeightsMemoryFromWeightsPrimitive(
      void* ptr) {
    return this->AcquireMemoryFromPrimitive(
A
Adam 已提交
936
        conv_bwd_weights_pd_->diff_weights_desc(), ptr, "@diff_weights_mem_p");
J
Jacek Czaja 已提交
937 938 939 940 941
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromDataPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
942 943
    auto diff_dst_pd = conv_bwd_data_pd_->diff_dst_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
944 945 946 947 948 949 950
    return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
                               "@data-diff_dst_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromDataPrimitive(
      const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
951 952
    auto weights_pd = conv_bwd_data_pd_->weights_desc();
    auto user_pd = user_weights_memory_p->get_desc();
J
Jacek Czaja 已提交
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
    return this->AcquireMemory(weights_pd, user_pd, user_weights_memory_p,
                               "@data-weights_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireResidualDataMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_residual_data_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromResidualDataMemory(
      const std::shared_ptr<mkldnn::memory>& user_residual_memory_p,
      void* dst_ptr,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
    return this->AcquireMemory(user_residual_memory_p,
                               this->AcquireDstMemoryFromPrimitive(dst_ptr),
                               "@residual_data_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemoryFromDataPrimitive(
      void* ptr) {
A
Adam 已提交
973 974
    return this->AcquireMemoryFromPrimitive(conv_bwd_data_pd_->diff_src_desc(),
                                            ptr, "@diff_src_mem_p");
J
Jacek Czaja 已提交
975 976 977
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromPrimitive(void* ptr) {
A
Adam 已提交
978
    return this->AcquireMemoryFromPrimitive(conv_pd_->dst_desc(), ptr,
J
Jacek Czaja 已提交
979 980 981 982 983 984
                                            "@dst_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
985 986
    auto src_pd = conv_pd_->src_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
987 988 989 990
    return this->AcquireMemory(src_pd, user_pd, user_memory_p, "@src_mem_p",
                               pipeline);
  }

A
Adam 已提交
991 992 993 994 995 996 997 998 999 1000 1001
  std::shared_ptr<mkldnn::memory> AcquireWeightsMemory(
      const mkldnn::memory::desc& md, void* ptr,
      user_function custom_func = {}) {
    return this->AcquireMemory(md, ptr, "@user_weights_mem_p", custom_func);
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_bias_mem_p");
  }

J
Jacek Czaja 已提交
1002 1003 1004
  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
1005 1006
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f}, int mask = 0) {
A
Adam 已提交
1007 1008
    auto user_weights_pd = user_weights_memory_p->get_desc();
    auto weights_pd = conv_pd_->weights_desc();
1009 1010 1011
    return this->AcquireMemory(
        weights_pd, user_weights_pd, user_weights_memory_p, "@weights_mem_p",
        pipeline, is_persistent, is_INT8, scale_data, mask);
J
Jacek Czaja 已提交
1012 1013 1014 1015
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_bias_memory_p,
1016 1017 1018 1019
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f},
      int mask = 0) {  // NOLINT
A
Adam 已提交
1020 1021
    auto user_bias_pd = user_bias_memory_p->get_desc();
    auto bias_pd = conv_pd_->bias_desc();
J
Jacek Czaja 已提交
1022
    return this->AcquireMemory(bias_pd, user_bias_pd, user_bias_memory_p,
1023 1024
                               "@bias_mem_p", pipeline, is_persistent, is_INT8,
                               scale_data, mask);
J
Jacek Czaja 已提交
1025 1026
  }

1027
  mkldnn::primitive_attr CreatePostOps(
1028 1029
      std::string fuse_activation, float fuse_alpha, float fuse_beta,
      bool fuse_residual_conn, const std::vector<float> output_shift_scale = {},
1030
      float sum_scale = 1.0f) const {
1031 1032
    mkldnn::primitive_attr conv_attr;
    mkldnn::post_ops post_operations;
1033 1034 1035 1036
    if (output_shift_scale.size() > 0) {
      int mask = output_shift_scale.size() > 1 ? 1 << 1 : 0;
      conv_attr.set_output_scales(mask, output_shift_scale);
    }
1037 1038 1039 1040 1041 1042
    // Fusion with Elementwise layer relies on adding a sum post-operation with
    // the scale parameter. It is assumed that when fuse_residual_connection is
    // true, the output tensor contains the data coming from residual
    // connection. The result of this post_op is:
    // Output = scale * Output + Conv_Out.
    if (fuse_residual_conn) {
1043
      post_operations.append_sum(sum_scale);
1044 1045 1046
    }
    // Fusion with ReLU layer is executed through the PostOps feature. Create a
    // PostOps object and configure it to execute an eltwise relu operation.
1047
    if (fuse_activation == "relu" || fuse_activation == "leaky_relu") {
1048 1049
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_relu,
1050
                                     fuse_alpha, fuse_beta);
1051
    } else if (fuse_activation == "relu6") {
1052 1053 1054
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale,
                                     mkldnn::algorithm::eltwise_bounded_relu,
1055
                                     fuse_alpha, fuse_beta);
1056 1057 1058 1059
    } else if (fuse_activation == "swish") {
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_swish,
                                     fuse_alpha, fuse_beta);
1060
    }
1061 1062 1063 1064 1065 1066 1067 1068
    conv_attr.set_post_ops(post_operations);
    return conv_attr;
  }

  std::shared_ptr<typename forward_t::primitive_desc>
  AcquireConvolutionPrimitiveDescriptor(
      const mkldnn::memory::desc& src, const mkldnn::memory::desc& weights,
      boost::optional<const mkldnn::memory::desc&> bias,
A
Adam 已提交
1069 1070
      const mkldnn::memory::desc& dst, const std::vector<int64_t>& strides,
      const std::vector<int64_t>& paddings, const mkldnn::engine& engine,
1071 1072
      const std::string& fuse_activation, float fuse_alpha, float fuse_beta,
      const bool fuse_residual_conn, mkldnn::prop_kind fwd_prop_kind,
1073 1074
      const std::vector<float> output_shift_scale = {},
      const float sum_scale = 1.0f) {
1075 1076 1077 1078
    // Conv PD has to be passed to Grad op that
    // may be exxecuted by diffrent thread, hence
    // for that one we use key that does not contain TID
    const std::string key_conv_pd = key_common_ + "@conv_pd";
1079

1080
    conv_pd_ = std::static_pointer_cast<typename forward_t::primitive_desc>(
1081 1082
        dev_ctx_.GetBlob(key_conv_pd));

1083 1084 1085 1086 1087 1088 1089 1090 1091
    if (conv_pd_ == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);

      conv_pd_ = std::static_pointer_cast<typename forward_t::primitive_desc>(
          dev_ctx_.GetBlob(key_conv_pd));
      if (conv_pd_ == nullptr) {
        mkldnn::memory::dims stride_dims = strides;
1092 1093

        auto mkldnn_paddings = ToMkldnnPadding(paddings);
1094 1095

        auto conv_desc =
A
Adam 已提交
1096 1097 1098 1099 1100 1101 1102 1103
            bias ? typename forward_t::desc(
                       fwd_prop_kind, convolutional_algorithm<forward_t>::T,
                       src, weights, *bias, dst, stride_dims,
                       mkldnn_paddings[0], mkldnn_paddings[1])
                 : typename forward_t::desc(
                       fwd_prop_kind, convolutional_algorithm<forward_t>::T,
                       src, weights, dst, stride_dims, mkldnn_paddings[0],
                       mkldnn_paddings[1]);
1104

1105
        mkldnn::primitive_attr conv_attr =
1106 1107
            CreatePostOps(fuse_activation, fuse_alpha, fuse_beta,
                          fuse_residual_conn, output_shift_scale, sum_scale);
1108 1109 1110 1111 1112 1113

        conv_pd_.reset(new typename forward_t::primitive_desc(
            conv_desc, conv_attr, engine));
        // Save conv_pd/src_memory/weights_memory for backward pass
        dev_ctx_.SetBlob(key_conv_pd, conv_pd_);
      }
1114 1115 1116 1117 1118
    }

    return conv_pd_;
  }

A
Adam 已提交
1119
  std::shared_ptr<forward_t> AcquireConvolution() {
J
Jacek Czaja 已提交
1120 1121 1122 1123
    auto prim_key = key_ + "@conv_p";
    auto conv_p =
        std::static_pointer_cast<forward_t>(dev_ctx_.GetBlob(prim_key));
    if (conv_p == nullptr) {
A
Adam 已提交
1124
      conv_p = std::make_shared<forward_t>(*conv_pd_);
J
Jacek Czaja 已提交
1125 1126 1127 1128 1129 1130

      dev_ctx_.SetBlob(prim_key, conv_p);
    }
    return conv_p;
  }

A
Adam 已提交
1131
  std::shared_ptr<backward_weights_t> AcquireConvolutionBackwardWeights() {
J
Jacek Czaja 已提交
1132 1133 1134 1135 1136
    auto prim_key = key_ + "@conv_bwd_weights_p";
    auto conv_bwd_weights_p = std::static_pointer_cast<backward_weights_t>(
        dev_ctx_.GetBlob(prim_key));
    if (conv_bwd_weights_p == nullptr) {
      // create backward conv primitive for weights
A
Adam 已提交
1137 1138
      conv_bwd_weights_p =
          std::make_shared<backward_weights_t>(*conv_bwd_weights_pd_);
J
Jacek Czaja 已提交
1139 1140 1141 1142 1143
      dev_ctx_.SetBlob(prim_key, conv_bwd_weights_p);
    }
    return conv_bwd_weights_p;
  }

A
Adam 已提交
1144
  std::shared_ptr<backward_data_t> AcquireConvolutionBackwardData() {
J
Jacek Czaja 已提交
1145 1146 1147 1148
    auto prim_key = key_ + "@conv_bwd_data_p";
    auto conv_bwd_data_p =
        std::static_pointer_cast<backward_data_t>(dev_ctx_.GetBlob(prim_key));
    if (conv_bwd_data_p == nullptr) {
A
Adam 已提交
1149
      conv_bwd_data_p = std::make_shared<backward_data_t>(*conv_bwd_data_pd_);
J
Jacek Czaja 已提交
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
      dev_ctx_.SetBlob(prim_key, conv_bwd_data_p);
    }
    return conv_bwd_data_p;
  }

 private:
  std::shared_ptr<typename forward_t::primitive_desc> conv_pd_;
  std::shared_ptr<typename backward_weights_t::primitive_desc>
      conv_bwd_weights_pd_;
  std::shared_ptr<typename backward_data_t::primitive_desc> conv_bwd_data_pd_;
};

using ConvMKLDNNHandler =
    ConvMKLDNNTemplateHandler<mkldnn::convolution_forward,
                              mkldnn::convolution_backward_data,
                              mkldnn::convolution_backward_weights>;

using ConvTransposeMKLDNNHandler =
    ConvMKLDNNTemplateHandler<mkldnn::deconvolution_forward,
                              mkldnn::deconvolution_backward_data,
                              mkldnn::deconvolution_backward_weights>;
1171

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
template <typename T>
static std::shared_ptr<mkldnn::memory> SetDstMemory(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const std::shared_ptr<ConvMKLDNNHandler>& handler) {
  T* output_data =
      output->mutable_data<T>(ctx.GetPlace(), handler->GetDstMemorySize());
  std::shared_ptr<mkldnn::memory> dst_memory_p =
      handler->AcquireDstMemoryFromPrimitive(to_void_cast<T>(output_data));
  return dst_memory_p;
}

template <typename T>
static std::shared_ptr<mkldnn::memory> SetDstMemory(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const framework::Tensor* residual_param,
    const mkldnn::memory::desc& user_residual_md,
    const std::shared_ptr<ConvMKLDNNHandler>& handler,
    std::vector<mkldnn::primitive>* pipeline) {
  const T* residual_param_data = residual_param->data<T>();
  PADDLE_ENFORCE(residual_param_data != nullptr,
                 "Provide data if you want MKLDNN conv+elementwise_add fusion");
  std::shared_ptr<mkldnn::memory> user_residual_memory_p =
      handler->AcquireResidualDataMemory(user_residual_md,
                                         to_void_cast<T>(residual_param_data));
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  std::shared_ptr<mkldnn::memory> dst_memory_p =
      handler->AcquireDstMemoryFromResidualDataMemory(
          user_residual_memory_p, to_void_cast<T>(output_data), *pipeline);
  return dst_memory_p;
}

template <typename T>
static void SetDstMemoryHandler(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const std::shared_ptr<ConvMKLDNNHandler>& handler,
    std::shared_ptr<mkldnn::memory> dst_memory_p) {
  T* output_data =
      output->mutable_data<T>(ctx.GetPlace(), handler->GetDstMemorySize());
  dst_memory_p->set_data_handle(to_void_cast<T>(output_data));
}

1213 1214 1215
template <typename T>
static void SetDstMemoryQuantized(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
A
Adam 已提交
1216 1217
    std::vector<int64_t> dst_tz, const mkldnn::engine& engine,
    std::shared_ptr<mkldnn::memory::desc>& dst_md,  // NOLINT
1218 1219
    std::shared_ptr<mkldnn::memory>& dst_memory,    // NOLINT
    MKLDNNMemoryFormat output_format) {
1220 1221
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  const size_t dst_dims = dst_tz.size();
1222 1223 1224
  MKLDNNMemoryFormat dst_fmt;
  PADDLE_ENFORCE_LE(dst_dims, 5,
                    "Dst memory for quantization can not have dims > 5");
1225
  dst_fmt = platform::MKLDNNFormatForSize(dst_dims, output_format);
1226

A
Adam 已提交
1227
  auto tmp_dst_md = platform::MKLDNNMemDesc(
1228
      {dst_tz}, paddle::framework::ToMKLDNNDataType(
1229
                    framework::DataTypeTrait<T>::DataType()),
1230
      dst_fmt);
A
Adam 已提交
1231 1232 1233
  dst_md.reset(new mkldnn::memory::desc(tmp_dst_md));
  dst_memory.reset(
      new mkldnn::memory(*dst_md, engine, to_void_cast<T>(output_data)));
1234 1235
}

J
Jacek Czaja 已提交
1236 1237
}  // namespace platform
}  // namespace paddle