mkldnn_reuse.h 51.6 KB
Newer Older
J
Jacek Czaja 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

16
#include <memory>
17
#include <sstream>
J
Jacek Czaja 已提交
18
#include <string>
19
#include <utility>
J
Jacek Czaja 已提交
20
#include <vector>
21
#include "boost/optional.hpp"
X
xiaoli.liu@intel.com 已提交
22
#include "paddle/fluid/framework/data_layout_transform.h"
J
Jacek Czaja 已提交
23
#include "paddle/fluid/framework/operator.h"
24
#include "paddle/fluid/operators/pool_op.h"
J
Jacek Czaja 已提交
25 26 27 28 29 30
#include "paddle/fluid/platform/mkldnn_helper.h"
#include "paddle/fluid/platform/place.h"

namespace paddle {
namespace platform {

31 32
using framework::DataLayout;
using framework::Tensor;
J
Jacek Czaja 已提交
33
using user_function = std::function<std::shared_ptr<float>(const float*)>;
34
using memory = mkldnn::memory;
J
Jacek Czaja 已提交
35

36 37
template <typename T, typename TForward,
          typename TBackward = mkldnn_dummy_primitive>
38 39 40 41 42 43 44 45 46 47
class MKLDNNHandlerT {
 public:
  MKLDNNHandlerT(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
                 platform::Place cpu_place, const std::string& base_key)
      : dev_ctx_(dev_ctx),
        engine_(engine),
        place_(cpu_place),
        key_common_(base_key),
        fwd_pd_(nullptr),
        bwd_pd_(nullptr) {
48 49
    if (platform::MKLDNNDeviceContext::tls().get_cur_mkldnn_session_id() !=
        platform::MKLDNNDeviceContextThreadLocals::kMKLDNNSessionID_Default) {
50 51 52 53 54 55
      key_ = key_common_;
    } else {
      key_ = key_common_ + "-t:" + ThreadIDasStr();
    }
  }

A
Adam 已提交
56
  std::shared_ptr<TForward> AcquireForwardPrimitive() {
57 58 59 60
    const std::string key_p = key_ + "@forward_p";
    auto forward_p =
        std::static_pointer_cast<TForward>(dev_ctx_.GetBlob(key_p));
    if (forward_p == nullptr) {
A
Adam 已提交
61
      forward_p = std::make_shared<TForward>(*fwd_pd_);
62 63 64 65 66
      dev_ctx_.SetBlob(key_p, forward_p);
    }
    return forward_p;
  }

A
Adam 已提交
67
  std::shared_ptr<TBackward> AcquireBackwardPrimitive() {
68 69 70 71
    const std::string key_p = key_ + "@backward_p";
    auto backward_p =
        std::static_pointer_cast<TBackward>(dev_ctx_.GetBlob(key_p));
    if (backward_p == nullptr) {
A
Adam 已提交
72
      backward_p = std::make_shared<TBackward>(*bwd_pd_);
73 74 75 76 77
      dev_ctx_.SetBlob(key_p, backward_p);
    }
    return backward_p;
  }

78 79 80
  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
81 82
    return this->AcquireMemoryFromPrimitive(
        fwd_pd_->src_desc(), to_void_cast<T>(input_data), "@src_mem_p");
83 84 85
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output) {
A
Adam 已提交
86 87
    T* ptr = output->mutable_data<T>(place_, fwd_pd_->dst_desc().get_size());
    return this->AcquireMemoryFromPrimitive(fwd_pd_->dst_desc(), ptr,
88 89 90 91 92 93
                                            "@dst_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
      const framework::Tensor* output) {
    const T* output_data = output->data<T>();
A
Adam 已提交
94 95
    return this->AcquireMemoryFromPrimitive(
        bwd_pd_->dst_desc(), to_void_cast<T>(output_data), "@bwd-dst_mem_p");
96 97 98 99 100
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemory(
      const framework::Tensor* diffdst) {
    const T* ptr = diffdst->data<T>();
A
Adam 已提交
101 102
    return this->AcquireMemoryFromPrimitive(
        bwd_pd_->diff_dst_desc(), to_void_cast<T>(ptr), "@diff_dst_mem_p");
103 104 105 106
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemory(
      framework::Tensor* diffsrc) {
A
Adam 已提交
107 108 109 110
    T* ptr =
        diffsrc->mutable_data<T>(place_, bwd_pd_->diff_src_desc().get_size());
    return this->AcquireMemoryFromPrimitive(bwd_pd_->diff_src_desc(), ptr,
                                            "@diff_src_mem_p");
111 112
  }

113
 protected:
114 115 116 117
  bool isCached() {
    const std::string key_pd = key_common_ + "@forward_pd";
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
118 119 120

    const std::string key_p = key_ + "@forward_p";
    return (dev_ctx_.GetBlob(key_p) != nullptr);
121 122
  }

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
  template <typename... Args>
  void AcquireForwardPrimitiveDescriptor(Args&&... args) {
    // Forward PD has to be passed to Grad op that
    // may be executed by diffrent thread, hence
    // for that one we use key that does not contain TID
    const std::string key_pd = key_common_ + "@forward_pd";
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (fwd_pd_ == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);
      fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
          dev_ctx_.GetBlob(key_pd));
      if (fwd_pd_ == nullptr) {
        auto fwd_desc = typename TForward::desc(std::forward<Args>(args)...);
        fwd_pd_ = std::make_shared<typename TForward::primitive_desc>(fwd_desc,
                                                                      engine_);
        dev_ctx_.SetBlob(key_pd, fwd_pd_);
      }
    }
  }

  template <typename... Args>
  void AcquireBackwardPrimitiveDescriptor(Args&&... args) {
148 149 150
    const std::string key_fwd_pd = key_common_ + "@forward_pd";
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_fwd_pd));
151 152 153 154 155 156 157 158 159 160 161 162
    PADDLE_ENFORCE_NOT_NULL(fwd_pd_);
    const std::string key_pd = key_ + "@backward_pd";
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (bwd_pd_ == nullptr) {
      auto bwd_desc = typename TBackward::desc(std::forward<Args>(args)...);
      bwd_pd_ = std::make_shared<typename TBackward::primitive_desc>(
          bwd_desc, engine_, *fwd_pd_);
      dev_ctx_.SetBlob(key_pd, bwd_pd_);
    }
  }

163
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
A
Adam 已提交
164
      mkldnn::memory::desc md, void* ptr, const std::string& suffix) {
165 166 167 168
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
169
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

  const MKLDNNDeviceContext& dev_ctx_;
  mkldnn::engine engine_;
  platform::Place place_;
  std::string key_;
  std::string key_common_;
  std::shared_ptr<typename TForward::primitive_desc> fwd_pd_;
  std::shared_ptr<typename TBackward::primitive_desc> bwd_pd_;
};

// TODO(grygielski) this class will be deleted later.
J
Jacek Czaja 已提交
187 188 189 190
class MKLDNNHandler {
 public:
  MKLDNNHandler(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
                const std::string& base_key)
191
      : dev_ctx_(dev_ctx), engine_(engine), key_common_(base_key) {
192 193
    if (platform::MKLDNNDeviceContext::tls().get_cur_mkldnn_session_id() !=
        platform::MKLDNNDeviceContextThreadLocals::kMKLDNNSessionID_Default) {
194
      key_ = key_common_;
195
    } else {
A
Adam 已提交
196
      key_ = key_common_ + "-t:" + ThreadIDasStr();
197
    }
198
  }
J
Jacek Czaja 已提交
199 200 201 202 203 204 205 206 207 208 209

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_src_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_dst_mem_p");
  }

A
Adam 已提交
210
  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemory(
J
Jacek Czaja 已提交
211
      const mkldnn::memory::desc& md, void* ptr) {
A
Adam 已提交
212
    return this->AcquireMemory(md, ptr, "@user_diff_src_mem_p");
J
Jacek Czaja 已提交
213 214
  }

A
Adam 已提交
215
  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemory(
J
Jacek Czaja 已提交
216
      const mkldnn::memory::desc& md, void* ptr) {
A
Adam 已提交
217
    return this->AcquireMemory(md, ptr, "@user_diff_dst_mem_p");
J
Jacek Czaja 已提交
218 219 220
  }

  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
A
Adam 已提交
221
      mkldnn::memory::desc md, void* ptr, const std::string& suffix) {
J
Jacek Czaja 已提交
222 223 224 225
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
226
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
J
Jacek Czaja 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

  // This incarnation of AcquireMemory can call user function eg. custom reorder
  // or preprocessing routine if needed
  std::shared_ptr<mkldnn::memory> AcquireMemory(
      const mkldnn::memory::desc& md, void* ptr, const std::string& suffix,
      user_function custom_func = {}) {
    /*Generate key*/
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      // Call custom reorder/preprocessing func if available
      if (custom_func) {
        auto reordered_data = custom_func(reinterpret_cast<const float*>(ptr));
        dev_ctx_.SetBlob(local_key + "-custom_reorder", reordered_data);
        ptr = reinterpret_cast<void*>(reordered_data.get());
      }

A
Adam 已提交
251
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
J
Jacek Czaja 已提交
252 253 254 255 256 257 258
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

259
  std::shared_ptr<mkldnn::memory> AcquireMemory(
A
Adam 已提交
260
      const std::vector<int64_t>& dims, const mkldnn::memory::data_type dtype,
261
      const MKLDNNMemoryFormat& fmt, void* ptr, const std::string& suffix) {
262 263 264 265 266 267 268
    /*Generate key*/
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      auto md = mkldnn::memory::desc(dims, dtype, fmt);

A
Adam 已提交
269
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
270 271 272 273 274 275 276
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

J
Jacek Czaja 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
  std::shared_ptr<mkldnn::memory> AcquireMemory(
      const std::shared_ptr<mkldnn::memory>& user_memory_p,
      const std::shared_ptr<mkldnn::memory>& target_memory_p,
      const std::string& suffix,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
    auto local_key = key_ + suffix;
    auto key_reorder_p = key_ + suffix + "reorder_p";

    auto stored_reorder_p = std::static_pointer_cast<mkldnn::reorder>(
        dev_ctx_.GetBlob(key_reorder_p));

    if (stored_reorder_p) {
      pipeline.push_back(*stored_reorder_p);
    } else {
      auto reorder_p =
          std::make_shared<mkldnn::reorder>(*user_memory_p, *target_memory_p);
      dev_ctx_.SetBlob(key_reorder_p, reorder_p);
A
Adam 已提交
294 295 296 297
      mkldnn::stream astream(engine_);
      reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                   {MKLDNN_ARG_TO, *target_memory_p}});
      astream.wait();
J
Jacek Czaja 已提交
298 299 300 301 302 303
    }

    return target_memory_p;
  }

  std::shared_ptr<mkldnn::memory> AcquireMemory(
A
Adam 已提交
304 305
      mkldnn::memory::desc& md,       // NOLINT
      mkldnn::memory::desc& user_md,  // NOLINT
J
Jacek Czaja 已提交
306 307 308
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      const std::string& suffix,
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
309 310
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f}, int mask = 0) {
J
Jacek Czaja 已提交
311 312 313 314 315 316
    // create reorder primitive if the input format is not the preferred one
    auto local_key = key_ + suffix;
    auto key_reorder_p = key_ + suffix + "reorder_p";

    auto target_memory_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
A
Adam 已提交
317 318 319

    mkldnn::stream astream(engine_);

J
Jacek Czaja 已提交
320 321
    if (target_memory_p == nullptr) {
      target_memory_p = user_memory_p;
A
Adam 已提交
322 323 324
      if (md != user_md) {
        target_memory_p = std::make_shared<mkldnn::memory>(md, engine_);
        std::shared_ptr<mkldnn::reorder::primitive_desc> reorder_pd;
325 326 327 328 329
        if (is_INT8) {
          mkldnn::primitive_attr
              attri;  // attribute for int8 weights and bias data reorder.
          attri.set_output_scales(mask, scale_data);

A
Adam 已提交
330 331 332
          reorder_pd = std::shared_ptr<mkldnn::reorder::primitive_desc>(
              new mkldnn::reorder::primitive_desc(*user_memory_p,
                                                  *target_memory_p, attri));
333
        } else {
A
Adam 已提交
334 335 336
          reorder_pd = std::shared_ptr<mkldnn::reorder::primitive_desc>(
              new mkldnn::reorder::primitive_desc(*user_memory_p,
                                                  *target_memory_p));
337
        }
A
Adam 已提交
338 339
        auto reorder_p =
            std::shared_ptr<mkldnn::reorder>(new mkldnn::reorder(*reorder_pd));
J
Jacek Czaja 已提交
340
        dev_ctx_.SetBlob(key_reorder_p, reorder_p);
A
Adam 已提交
341 342 343 344

        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
J
Jacek Czaja 已提交
345 346 347 348 349 350 351
      }
      dev_ctx_.SetBlob(local_key, target_memory_p);
    } else if (!is_persistent) {
      // Make reorder if needed
      auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
          dev_ctx_.GetBlob(key_reorder_p));
      if (reorder_p != nullptr) {
A
Adam 已提交
352 353 354
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
J
Jacek Czaja 已提交
355 356 357 358 359 360 361 362 363
      }
    }
    return target_memory_p;
  }

 protected:
  const MKLDNNDeviceContext& dev_ctx_;
  mkldnn::engine engine_;
  std::string key_;
364
  std::string key_common_;
J
Jacek Czaja 已提交
365 366
};

367 368 369
template <typename T>
class BinaryMKLDNNHandler : public platform::MKLDNNHandlerT<T, dnnl::binary> {
 public:
370 371 372
  BinaryMKLDNNHandler(const MKLDNNDeviceContext& dev_ctx,
                      const mkldnn::engine engine, platform::Place cpu_place,
                      const Tensor* x, const Tensor* y, Tensor* z,
373
                      const std::string& uniq_name)
374
      : platform::MKLDNNHandlerT<T, dnnl::binary>(
375 376
            dev_ctx, engine, cpu_place,
            platform::CreateKey(framework::vectorize(x->dims()), uniq_name)) {
377 378 379 380 381 382 383
    // bradcasting combined with in-place may require longer key
    auto rankdiff = x->dims().size() - y->dims().size();
    if (rankdiff > 0) {
      this->key_ += std::to_string(rankdiff);
      this->key_common_ += std::to_string(rankdiff);
    }

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
    if (!this->isCached()) {
      PADDLE_ENFORCE_EQ(
          x->layout(), DataLayout::kMKLDNN,
          platform::errors::InvalidArgument("Wrong layout set for X tensor"));
      PADDLE_ENFORCE_NE(
          x->format(), MKLDNNMemoryFormat::undef,
          platform::errors::InvalidArgument("Wrong format set for X tensor"));

      PADDLE_ENFORCE_EQ(
          y->layout(), DataLayout::kMKLDNN,
          platform::errors::InvalidArgument("Wrong layout set for Y tensor"));
      PADDLE_ENFORCE_NE(
          y->format(), MKLDNNMemoryFormat::undef,
          platform::errors::InvalidArgument("Wrong format set for Y tensor"));

      const auto src_x_tz = framework::vectorize(x->dims());
      const auto src_y_tz = framework::vectorize(y->dims());
      const auto dst_tz = framework::vectorize(z->dims());

      const auto src0_md = dnnl::memory::desc(
          src_x_tz, platform::MKLDNNGetDataType<T>(), x->format());
405
      auto src1_md = dnnl::memory::desc(
406
          src_y_tz, platform::MKLDNNGetDataType<T>(), y->format());
407 408 409 410 411 412
      if (rankdiff > 0) {
        std::vector<int64_t> ones(rankdiff, 1);
        std::vector<int64_t> dims1_ex(src_y_tz);
        dims1_ex.insert(dims1_ex.begin(), ones.begin(), ones.end());
        src1_md = src1_md.reshape(dims1_ex);
      }
413 414 415 416 417 418
      const auto dst_md = memory::desc(dst_tz, platform::MKLDNNGetDataType<T>(),
                                       MKLDNNMemoryFormat::any);

      this->AcquireForwardPrimitiveDescriptor(dnnl::algorithm::binary_add,
                                              src0_md, src1_md, dst_md);
    }
419 420 421 422 423 424
  }

  std::shared_ptr<mkldnn::memory> AcquireSecondSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
    return this->AcquireMemoryFromPrimitive(
425
        this->fwd_pd_->src1_desc(), to_void_cast<T>(input_data), "@src1_mem_p");
426 427 428
  }
};

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
class SumMKLDNNHandler : public MKLDNNHandler {
 public:
  SumMKLDNNHandler(const platform::MKLDNNDeviceContext& dev_ctx,
                   mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {}

  std::shared_ptr<mkldnn::sum::primitive_desc> AcquireSumPrimitiveDescriptor(
      const std::vector<std::shared_ptr<mkldnn::memory>>& src_mems,
      const std::vector<float>& scales, const mkldnn::memory::desc& dst_md) {
    const std::string key_sum_pd = key_ + "@sum_pd";

    sum_pd_ = std::static_pointer_cast<mkldnn::sum::primitive_desc>(
        dev_ctx_.GetBlob(key_sum_pd));
    if (sum_pd_ == nullptr) {
      // Get vector of inputs primitive descriptors
A
Adam 已提交
444
      std::vector<mkldnn::memory::desc> src_ds;
445
      for (auto& input_mem : src_mems) {
A
Adam 已提交
446
        src_ds.push_back(input_mem->get_desc());
447 448
      }

A
Adam 已提交
449 450
      sum_pd_.reset(
          new mkldnn::sum::primitive_desc(dst_md, scales, src_ds, engine_));
451 452 453 454 455 456 457
      dev_ctx_.SetBlob(key_sum_pd, sum_pd_);
    }

    return sum_pd_;
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromPrimitive(void* ptr) {
A
Adam 已提交
458
    return this->AcquireMemoryFromPrimitive(sum_pd_->dst_desc(), ptr,
459 460 461
                                            "@dst_mem_p");
  }

A
Adam 已提交
462 463 464 465 466
  std::shared_ptr<mkldnn::memory> AcquireSecondSrcMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_src2_mem_p");
  }

A
Adam 已提交
467
  std::shared_ptr<mkldnn::sum> AcquireSum() {
468 469 470 471
    auto prim_key = key_ + "@sum_p";
    auto sum_p =
        std::static_pointer_cast<mkldnn::sum>(dev_ctx_.GetBlob(prim_key));
    if (sum_p == nullptr) {
A
Adam 已提交
472
      sum_p = std::make_shared<mkldnn::sum>(*sum_pd_);
473 474 475 476 477 478 479 480 481
      dev_ctx_.SetBlob(prim_key, sum_p);
    }
    return sum_p;
  }

 private:
  std::shared_ptr<mkldnn::sum::primitive_desc> sum_pd_;
};

482
template <typename T>
483 484 485
class ActivationMKLDNNHandler
    : public MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                            mkldnn::eltwise_backward> {
486
 public:
A
Adam 已提交
487
  ActivationMKLDNNHandler(const std::vector<int64_t>& dims,
488
                          mkldnn::algorithm algorithm, float alpha, float beta,
489
                          const MKLDNNMemoryFormat fmt,
490 491 492 493
                          const platform::MKLDNNDeviceContext& dev_ctx,
                          platform::Place cpu_place,
                          const std::string& unique_name)

494 495 496
      : platform::MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                                 mkldnn::eltwise_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
497
            platform::CreateKey(dims, "a", algorithm, unique_name)) {
498 499
    auto md = mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);

500 501
    this->AcquireForwardPrimitiveDescriptor(mkldnn::prop_kind::forward_training,
                                            algorithm, md, alpha, beta);
502
  }
503

A
Adam 已提交
504
  ActivationMKLDNNHandler(const std::vector<int64_t>& dims,
505 506 507 508 509 510 511
                          mkldnn::algorithm algorithm, float alpha, float beta,
                          const MKLDNNMemoryFormat fmt,
                          const MKLDNNMemoryFormat diff_fmt,
                          const platform::MKLDNNDeviceContext& dev_ctx,
                          platform::Place cpu_place,
                          const std::string& unique_name)

512 513 514
      : platform::MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                                 mkldnn::eltwise_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
515
            platform::CreateKey(dims, "a", algorithm, unique_name)) {
516 517 518 519 520 521 522
    auto diff_dst_md = platform::MKLDNNMemDesc(
        dims, platform::MKLDNNGetDataType<T>(), diff_fmt);
    auto src_md =
        platform::MKLDNNMemDesc(dims, platform::MKLDNNGetDataType<T>(), fmt);

    this->AcquireBackwardPrimitiveDescriptor(algorithm, diff_dst_md, src_md,
                                             alpha, beta);
523
  }
524

525 526 527
  std::shared_ptr<mkldnn::memory> AcquireBackwardSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
528
    return this->AcquireMemoryFromPrimitive(this->bwd_pd_->src_desc(),
529 530
                                            to_void_cast<T>(input_data),
                                            "@bwd-src_mem_p");
531 532 533
  }
};

J
Jacek Czaja 已提交
534 535 536
template <typename T>
class LRNMKLDNNHandler
    : public MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward> {
537
 public:
A
Adam 已提交
538 539
  LRNMKLDNNHandler(const std::vector<int64_t>& dims, const int n,
                   const float alpha, const float beta, const float k,
J
Jacek Czaja 已提交
540 541 542
                   const MKLDNNMemoryFormat fmt, bool is_test,
                   const platform::MKLDNNDeviceContext& dev_ctx,
                   platform::Place cpu_place, const std::string& unique_name)
543

J
Jacek Czaja 已提交
544 545
      : platform::MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
546
            platform::CreateKey(dims, unique_name)) {
J
Jacek Czaja 已提交
547 548 549 550 551
    auto src_md =
        mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);
    this->AcquireForwardPrimitiveDescriptor(
        is_test ? mkldnn::prop_kind::forward_inference
                : mkldnn::prop_kind::forward_training,
A
Adam 已提交
552
        mkldnn::algorithm::lrn_across_channels, src_md, n, alpha, beta, k);
553 554
  }

A
Adam 已提交
555 556
  LRNMKLDNNHandler(const std::vector<int64_t>& dims, const int n,
                   const float alpha, const float beta, const float k,
J
Jacek Czaja 已提交
557 558 559 560
                   const MKLDNNMemoryFormat fmt,
                   const MKLDNNMemoryFormat diff_fmt,
                   const platform::MKLDNNDeviceContext& dev_ctx,
                   platform::Place cpu_place, const std::string& unique_name)
561

J
Jacek Czaja 已提交
562 563
      : platform::MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
564
            platform::CreateKey(dims, unique_name)) {
J
Jacek Czaja 已提交
565 566 567 568
    auto src_md =
        mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);
    auto diff_md =
        mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), diff_fmt);
569

J
Jacek Czaja 已提交
570
    this->AcquireBackwardPrimitiveDescriptor(
A
Adam 已提交
571 572
        mkldnn::algorithm::lrn_across_channels, src_md, diff_md, n, alpha, beta,
        k);
573 574
  }

J
Jacek Czaja 已提交
575 576 577
  std::shared_ptr<mkldnn::memory> AcquireWorkspaceMemory(
      framework::Tensor* workspace) {
    T* ptr = workspace->mutable_data<T>(
A
Adam 已提交
578 579 580
        this->place_, this->fwd_pd_->workspace_desc().get_size());
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->workspace_desc(),
                                            ptr, "@wrk_mem_p");
J
Jacek Czaja 已提交
581 582 583 584 585
  }

  std::shared_ptr<mkldnn::memory> AcquireBackwardWorkspaceMemory(
      const framework::Tensor* workspace) {
    const T* workspace_data = workspace->data<T>();
A
Adam 已提交
586 587 588
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->workspace_desc(),
                                            to_void_cast<T>(workspace_data),
                                            "@bwd-wrk_mem_p");
J
Jacek Czaja 已提交
589
  }
590 591
};

592 593 594
template <typename T>
class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
                                                   mkldnn::pooling_backward> {
595
 public:
596 597 598 599 600
  PoolingMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
                       const MKLDNNDeviceContext& dev_ctx,
                       const mkldnn::engine mkldnn_engine,
                       platform::Place cpu_place, const Tensor* input,
                       Tensor* output, const std::string& unique_name)
601 602 603
      : platform::MKLDNNHandlerT<T, mkldnn::pooling_forward,
                                 mkldnn::pooling_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
            platform::CreateKey(framework::vectorize(input->dims()),
                                framework::ToMKLDNNDataType(input->type()),
                                unique_name)) {
    if (!this->isCached()) {
      PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
                        platform::errors::InvalidArgument(
                            "Wrong layout set for Input tensor"));
      PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
                        platform::errors::InvalidArgument(
                            "Wrong format set for Input tensor"));

      const std::string pooling_type = ctx.Attr<std::string>("pooling_type");

      std::vector<int> ksize_temp = ctx.Attr<std::vector<int>>("ksize");
      std::vector<int64_t> ksize(begin(ksize_temp), end(ksize_temp));

      std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
      std::vector<int64_t> strides(begin(strides_temp), end(strides_temp));

      std::vector<int> paddings_temp = ctx.Attr<std::vector<int>>("paddings");
      std::vector<int64_t> paddings(begin(paddings_temp), end(paddings_temp));

      const bool global_pooling = ctx.Attr<bool>("global_pooling");
      const std::string padding_algorithm =
          ctx.Attr<std::string>("padding_algorithm");

      // Only 2D pooling is supported now
      PADDLE_ENFORCE_EQ(ksize.size(), 2,
                        platform::errors::InvalidArgument(
                            "ksize must be 2D, i.e. 2D pooling"));
      PADDLE_ENFORCE_EQ(pooling_type == "max" || pooling_type == "avg", true,
                        platform::errors::InvalidArgument(
                            "pooling_type must be 'max' or 'avg'"));
      PADDLE_ENFORCE_EQ(input->dims().size(), 4,
                        platform::errors::InvalidArgument(
                            "Input dim must be with 4, i.e. NCHW"));

      const auto input_dims = input->dims();
      framework::DDim data_dims =
          framework::slice_ddim(input_dims, 2, input_dims.size());

      if (global_pooling) {
        operators::UpdateKsize(&ksize, data_dims);
      }

      operators::UpdatePadding(&paddings, global_pooling, 0, padding_algorithm,
                               data_dims, strides, ksize);

      const auto src_tz = paddle::framework::vectorize(input->dims());
      const auto dst_tz = paddle::framework::vectorize(output->dims());

      const auto is_test = ctx.Attr<bool>("is_test");

      const auto dt = framework::ToMKLDNNDataType(input->type());
      const auto fmt = input->format();

      const auto exclude_padding = ctx.Attr<bool>("exclusive");

      const auto src_md = mkldnn::memory::desc(src_tz, dt, fmt);
      /* create memory descriptor for pooling without specified format
       * ('any') which lets a primitive (pooling in this case) choose
       * the memory format preferred for best performance
       */

      const auto dst_md =
          platform::MKLDNNMemDesc(dst_tz, dt, MKLDNNMemoryFormat::any);

      auto mkldnn_paddings = ToMkldnnPadding(paddings);

      const bool ceil_mode = ctx.Attr<bool>("ceil_mode");

      if (ceil_mode) {
        CorrectOutputSize(src_tz, dst_tz, ksize, paddings, strides,
                          mkldnn_paddings[1]);
      }
      this->AcquireForwardPrimitiveDescriptor(
          is_test ? mkldnn::prop_kind::forward_inference
                  : mkldnn::prop_kind::forward_training,
          pooling_type == "max"
              ? mkldnn::algorithm::pooling_max
              : (exclude_padding
                     ? mkldnn::algorithm::pooling_avg_exclude_padding
                     : mkldnn::algorithm::pooling_avg_include_padding),
          src_md, dst_md, strides, ksize, mkldnn_paddings[0],
          mkldnn_paddings[1]);
689
    }
690 691 692
  }

  PoolingMKLDNNHandler(
A
Adam 已提交
693 694 695 696 697 698
      const std::vector<int64_t>& diff_dst_dims,
      const std::vector<int64_t>& diff_src_dims,
      const std::vector<int64_t>& ksize, const std::vector<int64_t>& strides,
      const std::vector<int64_t>& paddings, const std::string& pooling_type,
      bool ceil_mode, const MKLDNNMemoryFormat fmt,
      const MKLDNNMemoryFormat diff_dst_fmt, mkldnn::memory::data_type dt,
699
      const platform::MKLDNNDeviceContext& dev_ctx, platform::Place cpu_place,
700
      const std::string& unique_name, bool exclude_padding)
701 702 703
      : platform::MKLDNNHandlerT<T, mkldnn::pooling_forward,
                                 mkldnn::pooling_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
704
            platform::CreateKey(diff_src_dims, dt, unique_name)) {
705 706 707 708 709 710
    auto diff_dst_md = mkldnn::memory::desc(
        diff_dst_dims, platform::MKLDNNGetDataType<T>(), diff_dst_fmt);
    auto diff_src_md =
        mkldnn::memory::desc(diff_src_dims, platform::MKLDNNGetDataType<T>(),
                             MKLDNNMemoryFormat::any);

711 712
    auto mkldnn_paddings = ToMkldnnPadding(paddings);

713
    this->AcquireBackwardPrimitiveDescriptor(
714 715 716 717 718
        pooling_type == "max"
            ? mkldnn::algorithm::pooling_max
            : (exclude_padding
                   ? mkldnn::algorithm::pooling_avg_exclude_padding
                   : mkldnn::algorithm::pooling_avg_include_padding),
719
        diff_src_md, diff_dst_md, strides, ksize, mkldnn_paddings[0],
A
Adam 已提交
720
        mkldnn_paddings[1]);
721 722 723
  }

  std::shared_ptr<mkldnn::memory> AcquireWorkspaceMemory(void) {
A
Adam 已提交
724
    mkldnn::memory::desc workspace_md = this->fwd_pd_->workspace_desc();
725 726 727
    // Pooling PD has to be passed to Grad op that
    // may be executed by diffrent thread, hence
    // for that one we use key that does not contain TID
728 729 730
    auto local_key = this->key_common_ + "@workspace";
    auto mem_p = std::static_pointer_cast<mkldnn::memory>(
        this->dev_ctx_.GetBlob(local_key));
731 732 733 734
    if (mem_p == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);
735 736
      mem_p = std::static_pointer_cast<mkldnn::memory>(
          this->dev_ctx_.GetBlob(local_key));
737
      if (mem_p == nullptr) {
A
Adam 已提交
738
        mem_p = std::make_shared<mkldnn::memory>(workspace_md, this->engine_);
739
        this->dev_ctx_.SetBlob(local_key, mem_p);
740 741 742 743 744 745 746 747 748 749 750 751
      }
    }
    return mem_p;
  }

 private:
  static inline int ComputeCeiledOutput(int input_size, int kernel_size,
                                        int padding, int stride) {
    return (input_size - kernel_size + 2 * padding) / stride + 1;
  }

  static inline void CorrectOutputSize(
A
Adam 已提交
752 753 754 755
      const std::vector<int64_t>& src_tz, const std::vector<int64_t>& dst_tz,
      const std::vector<int64_t>& kernel_size,
      const std::vector<int64_t>& paddings, const std::vector<int64_t>& strides,
      std::vector<int64_t>& right_bot_padding) {  // NOLINT
756 757 758 759
    for (size_t i = 0; i < right_bot_padding.size(); i++) {
      int desired_size = ComputeCeiledOutput(src_tz[i + 2], kernel_size[i],
                                             paddings[i], strides[i]);
      if (desired_size != dst_tz[i + 2]) {
J
Jacek Czaja 已提交
760
        right_bot_padding[i] += strides[i] - 1;
761 762 763 764 765
      }
    }
  }
};

766
template <typename T>
767 768
class TransposeMKLDNNHandler : public MKLDNNHandler {
 public:
A
Adam 已提交
769 770
  TransposeMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
                         std::vector<int>& axis,      // NOLINT
771 772 773 774
                         const platform::MKLDNNDeviceContext& dev_ctx,
                         mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
775 776 777 778
        axis_(axis),
        logical_axis_(dims.size(), 0) {}

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
779
      const MKLDNNMemoryFormat& fmt, void* ptr) {
780 781 782 783 784 785 786 787 788
    auto local_key = key_ + "@user_src_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      // Make memory descriptor using input format, unless it
      // cannot be trusted (nchw) then make up memory fmt manually
      for (size_t i = 0; i < logical_axis_.size(); ++i) {
        logical_axis_[i] = i;
      }
789

A
Adam 已提交
790
      auto src_md = fmt != MKLDNNMemoryFormat::nchw
791
                        ? platform::MKLDNNMemDesc(
792
                              dims_, platform::MKLDNNGetDataType<T>(), fmt)
793
                        : Axis2MemoryDesc(dims_, logical_axis_);
A
Adam 已提交
794
      mem_p = std::make_shared<mkldnn::memory>(src_md, engine_, ptr);
795 796 797 798 799 800
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }
801 802 803 804 805 806 807

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output,
                                                   platform::Place place) {
    auto local_key = key_ + "@user_dst_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
808
      auto dst_md = Axis2MemoryDesc(dims_, axis_);
809

A
Adam 已提交
810
      auto dst_data = output->mutable_data<T>(place, dst_md.get_size());
811

A
Adam 已提交
812
      mem_p = std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
813 814
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
815
      auto dst_data = output->mutable_data<T>(place);
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
      mem_p->set_data_handle(dst_data);
    }
    return mem_p;
  }

  std::shared_ptr<mkldnn::reorder> AcquireTranspose(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    auto prim_key = key_ + "@transpose_p";
    auto transpose_p =
        std::static_pointer_cast<mkldnn::reorder>(dev_ctx_.GetBlob(prim_key));
    if (transpose_p == nullptr) {
      transpose_p =
          std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
      dev_ctx_.SetBlob(prim_key, transpose_p);
    }
    return transpose_p;
  }

 protected:
A
Adam 已提交
836 837 838 839
  mkldnn::memory::desc Axis2MemoryDesc(std::vector<int64_t>& nchw_tz,  // NOLINT
                                       std::vector<int>& axis          // NOLINT
                                       ) {
    size_t ndims = axis.size();
840

A
Adam 已提交
841
    std::vector<int64_t> strides(ndims);
842
    unsigned int total_stride = 1;
A
Adam 已提交
843 844
    for (int i = ndims - 1; i >= 0; --i) {
      strides[axis[i]] = total_stride;
845 846
      total_stride *= nchw_tz[axis[i]];
    }
A
Adam 已提交
847 848 849 850
    mkldnn::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType<T>(),
                               strides);

    return mem_d;
851 852 853
  }

 private:
A
Adam 已提交
854
  std::vector<int64_t> dims_;
855
  std::vector<int> axis_;
856
  std::vector<int> logical_axis_;
857 858
};

859 860
class ReorderMKLDNNHandler : public MKLDNNHandler {
 public:
A
Adam 已提交
861
  ReorderMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
862 863 864 865 866 867 868 869 870 871
                       framework::proto::VarType::Type vtype,
                       mkldnn::memory::data_type dtype,
                       const platform::MKLDNNDeviceContext& dev_ctx,
                       mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
        vtype_(vtype),
        dtype_(dtype) {}

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
872
      const MKLDNNMemoryFormat& fmt, void* ptr) {
873
    return this->AcquireMemory(dims_, dtype_, fmt, ptr, "@user_src_mem_p");
874 875 876
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
877
      framework::Tensor* output, const MKLDNNMemoryFormat& fmt,
878 879 880 881 882 883 884 885 886
      platform::Place place) {
    auto local_key = key_ + "@user_dst_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      auto dst_md = platform::MKLDNNMemDesc(dims_, dtype_, fmt);

      auto dst_data = output->mutable_data(place, vtype_);

A
Adam 已提交
887
      mem_p = std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      auto dst_data = output->mutable_data(place, vtype_);
      mem_p->set_data_handle(dst_data);
    }
    return mem_p;
  }

  std::shared_ptr<mkldnn::reorder> AcquireReorder(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    auto prim_key = key_ + "@reorder_p";
    auto reorder_p =
        std::static_pointer_cast<mkldnn::reorder>(dev_ctx_.GetBlob(prim_key));
    if (reorder_p == nullptr) {
      reorder_p =
          std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
      dev_ctx_.SetBlob(prim_key, reorder_p);
    }
    return reorder_p;
  }

 private:
A
Adam 已提交
911
  std::vector<int64_t> dims_;
912 913 914 915
  framework::proto::VarType::Type vtype_;
  mkldnn::memory::data_type dtype_;
};

916 917 918 919 920 921 922 923 924 925 926 927 928 929
template <typename T>
struct convolutional_algorithm;

template <>
struct convolutional_algorithm<mkldnn::convolution_forward> {
  static constexpr mkldnn::algorithm T = mkldnn::algorithm::convolution_direct;
};

template <>
struct convolutional_algorithm<mkldnn::deconvolution_forward> {
  static constexpr mkldnn::algorithm T =
      mkldnn::algorithm::deconvolution_direct;
};

J
Jacek Czaja 已提交
930 931 932
template <class forward_t, class backward_data_t, class backward_weights_t>
class ConvMKLDNNTemplateHandler : public MKLDNNHandler {
 public:
933 934 935 936
  ConvMKLDNNTemplateHandler(const platform::MKLDNNDeviceContext& dev_ctx,
                            mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {}

937 938 939 940 941 942 943 944 945
  // TODO(jczaja): remove after conv int8 is adapted
  ConvMKLDNNTemplateHandler(
      std::shared_ptr<typename forward_t::primitive_desc> conv_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {
    conv_pd_ = conv_pd;
  }

J
Jacek Czaja 已提交
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
  ConvMKLDNNTemplateHandler(
      std::shared_ptr<typename forward_t::primitive_desc> conv_pd,
      std::shared_ptr<typename backward_data_t::primitive_desc>
          conv_bwd_data_pd,
      std::shared_ptr<typename backward_weights_t::primitive_desc>
          conv_bwd_weights_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        conv_pd_(conv_pd),
        conv_bwd_weights_pd_(conv_bwd_weights_pd),
        conv_bwd_data_pd_(conv_bwd_data_pd) {
    // If we are in Grad operatgor then update a key with BWD suffix to
    // distinguish from FWD memory primitives
    key_ += "-BWD";
  }

A
Adam 已提交
963
  size_t GetDstMemorySize() const { return conv_pd_->dst_desc().get_size(); }
J
Jacek Czaja 已提交
964

965
  MKLDNNMemoryFormat GetDstFormat() const {
A
Adam 已提交
966
    return paddle::platform::GetMKLDNNFormat(conv_pd_->dst_desc());
J
Jacek Czaja 已提交
967 968 969
  }

  size_t GetDiffWeightsMemorySize() const {
A
Adam 已提交
970
    return conv_bwd_weights_pd_->diff_weights_desc().get_size();
J
Jacek Czaja 已提交
971 972 973
  }

  size_t GetDiffSourceMemorySize() const {
A
Adam 已提交
974
    return conv_bwd_data_pd_->diff_src_desc().get_size();
J
Jacek Czaja 已提交
975 976 977 978 979
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromWeightsPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
980 981
    auto src_pd = conv_bwd_weights_pd_->src_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
982 983 984 985 986 987 988
    return this->AcquireMemory(src_pd, user_pd, user_memory_p,
                               "@weights-src_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromWeightsPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
989 990
    auto diff_dst_pd = conv_bwd_weights_pd_->diff_dst_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
991 992 993 994 995 996 997
    return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
                               "@weights-diff_dst_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffWeightsMemoryFromWeightsPrimitive(
      void* ptr) {
    return this->AcquireMemoryFromPrimitive(
A
Adam 已提交
998
        conv_bwd_weights_pd_->diff_weights_desc(), ptr, "@diff_weights_mem_p");
J
Jacek Czaja 已提交
999 1000 1001 1002 1003
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromDataPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1004 1005
    auto diff_dst_pd = conv_bwd_data_pd_->diff_dst_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1006 1007 1008 1009 1010 1011 1012
    return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
                               "@data-diff_dst_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromDataPrimitive(
      const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1013 1014
    auto weights_pd = conv_bwd_data_pd_->weights_desc();
    auto user_pd = user_weights_memory_p->get_desc();
J
Jacek Czaja 已提交
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
    return this->AcquireMemory(weights_pd, user_pd, user_weights_memory_p,
                               "@data-weights_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireResidualDataMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_residual_data_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromResidualDataMemory(
      const std::shared_ptr<mkldnn::memory>& user_residual_memory_p,
      void* dst_ptr,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
    return this->AcquireMemory(user_residual_memory_p,
                               this->AcquireDstMemoryFromPrimitive(dst_ptr),
                               "@residual_data_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemoryFromDataPrimitive(
      void* ptr) {
A
Adam 已提交
1035 1036
    return this->AcquireMemoryFromPrimitive(conv_bwd_data_pd_->diff_src_desc(),
                                            ptr, "@diff_src_mem_p");
J
Jacek Czaja 已提交
1037 1038 1039
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromPrimitive(void* ptr) {
A
Adam 已提交
1040
    return this->AcquireMemoryFromPrimitive(conv_pd_->dst_desc(), ptr,
J
Jacek Czaja 已提交
1041 1042 1043 1044 1045 1046
                                            "@dst_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1047 1048
    auto src_pd = conv_pd_->src_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1049 1050 1051 1052
    return this->AcquireMemory(src_pd, user_pd, user_memory_p, "@src_mem_p",
                               pipeline);
  }

A
Adam 已提交
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
  std::shared_ptr<mkldnn::memory> AcquireWeightsMemory(
      const mkldnn::memory::desc& md, void* ptr,
      user_function custom_func = {}) {
    return this->AcquireMemory(md, ptr, "@user_weights_mem_p", custom_func);
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_bias_mem_p");
  }

J
Jacek Czaja 已提交
1064 1065 1066
  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
1067 1068
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f}, int mask = 0) {
A
Adam 已提交
1069 1070
    auto user_weights_pd = user_weights_memory_p->get_desc();
    auto weights_pd = conv_pd_->weights_desc();
1071 1072 1073
    return this->AcquireMemory(
        weights_pd, user_weights_pd, user_weights_memory_p, "@weights_mem_p",
        pipeline, is_persistent, is_INT8, scale_data, mask);
J
Jacek Czaja 已提交
1074 1075 1076 1077
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_bias_memory_p,
1078 1079 1080 1081
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f},
      int mask = 0) {  // NOLINT
A
Adam 已提交
1082 1083
    auto user_bias_pd = user_bias_memory_p->get_desc();
    auto bias_pd = conv_pd_->bias_desc();
J
Jacek Czaja 已提交
1084
    return this->AcquireMemory(bias_pd, user_bias_pd, user_bias_memory_p,
1085 1086
                               "@bias_mem_p", pipeline, is_persistent, is_INT8,
                               scale_data, mask);
J
Jacek Czaja 已提交
1087 1088
  }

1089
  mkldnn::primitive_attr CreatePostOps(
1090 1091
      std::string fuse_activation, float fuse_alpha, float fuse_beta,
      bool fuse_residual_conn, const std::vector<float> output_shift_scale = {},
1092
      float sum_scale = 1.0f) const {
1093 1094
    mkldnn::primitive_attr conv_attr;
    mkldnn::post_ops post_operations;
1095 1096 1097 1098
    if (output_shift_scale.size() > 0) {
      int mask = output_shift_scale.size() > 1 ? 1 << 1 : 0;
      conv_attr.set_output_scales(mask, output_shift_scale);
    }
1099 1100 1101 1102 1103 1104
    // Fusion with Elementwise layer relies on adding a sum post-operation with
    // the scale parameter. It is assumed that when fuse_residual_connection is
    // true, the output tensor contains the data coming from residual
    // connection. The result of this post_op is:
    // Output = scale * Output + Conv_Out.
    if (fuse_residual_conn) {
1105
      post_operations.append_sum(sum_scale);
1106 1107 1108
    }
    // Fusion with ReLU layer is executed through the PostOps feature. Create a
    // PostOps object and configure it to execute an eltwise relu operation.
1109
    if (fuse_activation == "relu" || fuse_activation == "leaky_relu") {
1110 1111
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_relu,
1112
                                     fuse_alpha, fuse_beta);
1113
    } else if (fuse_activation == "relu6") {
1114 1115 1116
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale,
                                     mkldnn::algorithm::eltwise_bounded_relu,
1117
                                     fuse_alpha, fuse_beta);
1118 1119 1120 1121
    } else if (fuse_activation == "swish") {
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_swish,
                                     fuse_alpha, fuse_beta);
1122
    }
1123 1124 1125 1126 1127 1128 1129 1130
    conv_attr.set_post_ops(post_operations);
    return conv_attr;
  }

  std::shared_ptr<typename forward_t::primitive_desc>
  AcquireConvolutionPrimitiveDescriptor(
      const mkldnn::memory::desc& src, const mkldnn::memory::desc& weights,
      boost::optional<const mkldnn::memory::desc&> bias,
A
Adam 已提交
1131 1132
      const mkldnn::memory::desc& dst, const std::vector<int64_t>& strides,
      const std::vector<int64_t>& paddings, const mkldnn::engine& engine,
1133 1134
      const std::string& fuse_activation, float fuse_alpha, float fuse_beta,
      const bool fuse_residual_conn, mkldnn::prop_kind fwd_prop_kind,
1135 1136
      const std::vector<float> output_shift_scale = {},
      const float sum_scale = 1.0f) {
1137 1138 1139 1140
    // Conv PD has to be passed to Grad op that
    // may be exxecuted by diffrent thread, hence
    // for that one we use key that does not contain TID
    const std::string key_conv_pd = key_common_ + "@conv_pd";
1141

1142
    conv_pd_ = std::static_pointer_cast<typename forward_t::primitive_desc>(
1143 1144
        dev_ctx_.GetBlob(key_conv_pd));

1145 1146 1147 1148 1149 1150 1151 1152 1153
    if (conv_pd_ == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);

      conv_pd_ = std::static_pointer_cast<typename forward_t::primitive_desc>(
          dev_ctx_.GetBlob(key_conv_pd));
      if (conv_pd_ == nullptr) {
        mkldnn::memory::dims stride_dims = strides;
1154 1155

        auto mkldnn_paddings = ToMkldnnPadding(paddings);
1156 1157

        auto conv_desc =
A
Adam 已提交
1158 1159 1160 1161 1162 1163 1164 1165
            bias ? typename forward_t::desc(
                       fwd_prop_kind, convolutional_algorithm<forward_t>::T,
                       src, weights, *bias, dst, stride_dims,
                       mkldnn_paddings[0], mkldnn_paddings[1])
                 : typename forward_t::desc(
                       fwd_prop_kind, convolutional_algorithm<forward_t>::T,
                       src, weights, dst, stride_dims, mkldnn_paddings[0],
                       mkldnn_paddings[1]);
1166

1167
        mkldnn::primitive_attr conv_attr =
1168 1169
            CreatePostOps(fuse_activation, fuse_alpha, fuse_beta,
                          fuse_residual_conn, output_shift_scale, sum_scale);
1170 1171 1172 1173 1174 1175

        conv_pd_.reset(new typename forward_t::primitive_desc(
            conv_desc, conv_attr, engine));
        // Save conv_pd/src_memory/weights_memory for backward pass
        dev_ctx_.SetBlob(key_conv_pd, conv_pd_);
      }
1176 1177 1178 1179 1180
    }

    return conv_pd_;
  }

A
Adam 已提交
1181
  std::shared_ptr<forward_t> AcquireConvolution() {
J
Jacek Czaja 已提交
1182 1183 1184 1185
    auto prim_key = key_ + "@conv_p";
    auto conv_p =
        std::static_pointer_cast<forward_t>(dev_ctx_.GetBlob(prim_key));
    if (conv_p == nullptr) {
A
Adam 已提交
1186
      conv_p = std::make_shared<forward_t>(*conv_pd_);
J
Jacek Czaja 已提交
1187 1188 1189 1190 1191 1192

      dev_ctx_.SetBlob(prim_key, conv_p);
    }
    return conv_p;
  }

A
Adam 已提交
1193
  std::shared_ptr<backward_weights_t> AcquireConvolutionBackwardWeights() {
J
Jacek Czaja 已提交
1194 1195 1196 1197 1198
    auto prim_key = key_ + "@conv_bwd_weights_p";
    auto conv_bwd_weights_p = std::static_pointer_cast<backward_weights_t>(
        dev_ctx_.GetBlob(prim_key));
    if (conv_bwd_weights_p == nullptr) {
      // create backward conv primitive for weights
A
Adam 已提交
1199 1200
      conv_bwd_weights_p =
          std::make_shared<backward_weights_t>(*conv_bwd_weights_pd_);
J
Jacek Czaja 已提交
1201 1202 1203 1204 1205
      dev_ctx_.SetBlob(prim_key, conv_bwd_weights_p);
    }
    return conv_bwd_weights_p;
  }

A
Adam 已提交
1206
  std::shared_ptr<backward_data_t> AcquireConvolutionBackwardData() {
J
Jacek Czaja 已提交
1207 1208 1209 1210
    auto prim_key = key_ + "@conv_bwd_data_p";
    auto conv_bwd_data_p =
        std::static_pointer_cast<backward_data_t>(dev_ctx_.GetBlob(prim_key));
    if (conv_bwd_data_p == nullptr) {
A
Adam 已提交
1211
      conv_bwd_data_p = std::make_shared<backward_data_t>(*conv_bwd_data_pd_);
J
Jacek Czaja 已提交
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
      dev_ctx_.SetBlob(prim_key, conv_bwd_data_p);
    }
    return conv_bwd_data_p;
  }

 private:
  std::shared_ptr<typename forward_t::primitive_desc> conv_pd_;
  std::shared_ptr<typename backward_weights_t::primitive_desc>
      conv_bwd_weights_pd_;
  std::shared_ptr<typename backward_data_t::primitive_desc> conv_bwd_data_pd_;
};

using ConvMKLDNNHandler =
    ConvMKLDNNTemplateHandler<mkldnn::convolution_forward,
                              mkldnn::convolution_backward_data,
                              mkldnn::convolution_backward_weights>;

using ConvTransposeMKLDNNHandler =
    ConvMKLDNNTemplateHandler<mkldnn::deconvolution_forward,
                              mkldnn::deconvolution_backward_data,
                              mkldnn::deconvolution_backward_weights>;
1233

1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
template <typename T>
static std::shared_ptr<mkldnn::memory> SetDstMemory(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const std::shared_ptr<ConvMKLDNNHandler>& handler) {
  T* output_data =
      output->mutable_data<T>(ctx.GetPlace(), handler->GetDstMemorySize());
  std::shared_ptr<mkldnn::memory> dst_memory_p =
      handler->AcquireDstMemoryFromPrimitive(to_void_cast<T>(output_data));
  return dst_memory_p;
}

template <typename T>
static std::shared_ptr<mkldnn::memory> SetDstMemory(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const framework::Tensor* residual_param,
    const mkldnn::memory::desc& user_residual_md,
    const std::shared_ptr<ConvMKLDNNHandler>& handler,
    std::vector<mkldnn::primitive>* pipeline) {
  const T* residual_param_data = residual_param->data<T>();
1253 1254 1255 1256 1257
  PADDLE_ENFORCE_NOT_NULL(
      residual_param_data,
      platform::errors::PreconditionNotMet("Residual parameter is required for "
                                           "the DNNL conv+elementwise_add "
                                           "fusion, but now it is missing"));
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
  std::shared_ptr<mkldnn::memory> user_residual_memory_p =
      handler->AcquireResidualDataMemory(user_residual_md,
                                         to_void_cast<T>(residual_param_data));
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  std::shared_ptr<mkldnn::memory> dst_memory_p =
      handler->AcquireDstMemoryFromResidualDataMemory(
          user_residual_memory_p, to_void_cast<T>(output_data), *pipeline);
  return dst_memory_p;
}

template <typename T>
static void SetDstMemoryHandler(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const std::shared_ptr<ConvMKLDNNHandler>& handler,
    std::shared_ptr<mkldnn::memory> dst_memory_p) {
  T* output_data =
      output->mutable_data<T>(ctx.GetPlace(), handler->GetDstMemorySize());
  dst_memory_p->set_data_handle(to_void_cast<T>(output_data));
}

1278 1279 1280
template <typename T>
static void SetDstMemoryQuantized(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
A
Adam 已提交
1281 1282
    std::vector<int64_t> dst_tz, const mkldnn::engine& engine,
    std::shared_ptr<mkldnn::memory::desc>& dst_md,  // NOLINT
1283 1284
    std::shared_ptr<mkldnn::memory>& dst_memory,    // NOLINT
    MKLDNNMemoryFormat output_format) {
1285 1286
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  const size_t dst_dims = dst_tz.size();
1287 1288 1289
  MKLDNNMemoryFormat dst_fmt;
  PADDLE_ENFORCE_LE(dst_dims, 5,
                    "Dst memory for quantization can not have dims > 5");
1290
  dst_fmt = platform::MKLDNNFormatForSize(dst_dims, output_format);
1291

A
Adam 已提交
1292
  auto tmp_dst_md = platform::MKLDNNMemDesc(
1293
      {dst_tz}, paddle::framework::ToMKLDNNDataType(
1294
                    framework::DataTypeTrait<T>::DataType()),
1295
      dst_fmt);
A
Adam 已提交
1296 1297 1298
  dst_md.reset(new mkldnn::memory::desc(tmp_dst_md));
  dst_memory.reset(
      new mkldnn::memory(*dst_md, engine, to_void_cast<T>(output_data)));
1299 1300
}

J
Jacek Czaja 已提交
1301 1302
}  // namespace platform
}  // namespace paddle