mkldnn_reuse.h 52.1 KB
Newer Older
J
Jacek Czaja 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

16
#include <memory>
17
#include <sstream>
J
Jacek Czaja 已提交
18
#include <string>
19
#include <utility>
J
Jacek Czaja 已提交
20
#include <vector>
21
#include "boost/optional.hpp"
X
xiaoli.liu@intel.com 已提交
22
#include "paddle/fluid/framework/data_layout_transform.h"
J
Jacek Czaja 已提交
23
#include "paddle/fluid/framework/operator.h"
24
#include "paddle/fluid/operators/pool_op.h"
J
Jacek Czaja 已提交
25 26 27 28 29 30
#include "paddle/fluid/platform/mkldnn_helper.h"
#include "paddle/fluid/platform/place.h"

namespace paddle {
namespace platform {

31 32
using framework::DataLayout;
using framework::Tensor;
J
Jacek Czaja 已提交
33
using user_function = std::function<std::shared_ptr<float>(const float*)>;
34
using memory = mkldnn::memory;
J
Jacek Czaja 已提交
35

36 37
template <typename T, typename TForward,
          typename TBackward = mkldnn_dummy_primitive>
38 39 40 41 42 43 44 45
class MKLDNNHandlerT {
 public:
  MKLDNNHandlerT(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
                 platform::Place cpu_place, const std::string& base_key)
      : dev_ctx_(dev_ctx),
        engine_(engine),
        place_(cpu_place),
        key_common_(base_key),
46
        key_(platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, base_key)),
47
        fwd_pd_(nullptr),
48 49 50
        bwd_pd_(nullptr) {
    platform::MKLDNNDeviceContext::tls().log_lib_version();
  }
51

A
Adam 已提交
52
  std::shared_ptr<TForward> AcquireForwardPrimitive() {
53
    const std::string key_p = key_ + "@fwd_p";
54 55 56
    auto forward_p =
        std::static_pointer_cast<TForward>(dev_ctx_.GetBlob(key_p));
    if (forward_p == nullptr) {
A
Adam 已提交
57
      forward_p = std::make_shared<TForward>(*fwd_pd_);
58 59 60 61 62
      dev_ctx_.SetBlob(key_p, forward_p);
    }
    return forward_p;
  }

A
Adam 已提交
63
  std::shared_ptr<TBackward> AcquireBackwardPrimitive() {
64
    const std::string key_p = key_ + "@bwd_p";
65 66 67
    auto backward_p =
        std::static_pointer_cast<TBackward>(dev_ctx_.GetBlob(key_p));
    if (backward_p == nullptr) {
A
Adam 已提交
68
      backward_p = std::make_shared<TBackward>(*bwd_pd_);
69 70 71 72 73
      dev_ctx_.SetBlob(key_p, backward_p);
    }
    return backward_p;
  }

74 75 76
  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
77 78
    return this->AcquireMemoryFromPrimitive(
        fwd_pd_->src_desc(), to_void_cast<T>(input_data), "@src_mem_p");
79 80
  }

81
  template <typename T_out = T>
82
  std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output) {
83 84
    T_out* ptr =
        output->mutable_data<T_out>(place_, fwd_pd_->dst_desc().get_size());
A
Adam 已提交
85
    return this->AcquireMemoryFromPrimitive(fwd_pd_->dst_desc(), ptr,
86 87 88
                                            "@dst_mem_p");
  }

89
  template <typename T_out = T>
90 91
  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
      const framework::Tensor* output) {
92 93 94 95
    const T_out* output_data = output->data<T_out>();
    return this->AcquireMemoryFromPrimitive(bwd_pd_->dst_desc(),
                                            to_void_cast<T_out>(output_data),
                                            "@bwd-dst_mem_p");
96 97 98 99 100
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemory(
      const framework::Tensor* diffdst) {
    const T* ptr = diffdst->data<T>();
A
Adam 已提交
101 102
    return this->AcquireMemoryFromPrimitive(
        bwd_pd_->diff_dst_desc(), to_void_cast<T>(ptr), "@diff_dst_mem_p");
103 104 105 106
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemory(
      framework::Tensor* diffsrc) {
A
Adam 已提交
107 108 109 110
    T* ptr =
        diffsrc->mutable_data<T>(place_, bwd_pd_->diff_src_desc().get_size());
    return this->AcquireMemoryFromPrimitive(bwd_pd_->diff_src_desc(), ptr,
                                            "@diff_src_mem_p");
111 112
  }

113
 protected:
114
  bool isCached() {
115
    const std::string key_pd = key_common_ + "@fwd_pd";
116 117
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
118

119
    const std::string key_p = key_ + "@fwd_p";
120
    return (dev_ctx_.GetBlob(key_p) != nullptr);
121 122
  }

123 124 125 126 127 128 129 130 131
  bool isBwdCached() {
    const std::string key_pd = key_common_ + "@bwd_pd";
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));

    const std::string key_p = key_ + "@bwd_p";
    return (dev_ctx_.GetBlob(key_p) != nullptr);
  }

132 133 134 135 136 137
  // If your primitive descriptor requires attributes, pass them as a
  // first argument and paramters to descriptor constructor in the following
  // arguments. Otherwise, all arguments will be forwarded to descriptor
  // constructor, including the first one.
  template <typename Arg, typename... Args>
  void AcquireForwardPrimitiveDescriptor(Arg&& first_arg, Args&&... args) {
138 139 140
    // Forward PD has to be passed to Grad op that
    // may be executed by diffrent thread, hence
    // for that one we use key that does not contain TID
141
    const std::string key_pd = key_common_ + "@fwd_pd";
142 143 144 145 146 147 148 149 150
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (fwd_pd_ == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);
      fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
          dev_ctx_.GetBlob(key_pd));
      if (fwd_pd_ == nullptr) {
151 152
        CreateForwardPrimitiveDescriptor(first_arg,
                                         std::forward<Args>(args)...);
153 154 155 156 157
        dev_ctx_.SetBlob(key_pd, fwd_pd_);
      }
    }
  }

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
  // Using sfinae to specialise variadic function. Workaround for not having
  // if constexpr in C++ 11.
  template <class First, class... Args>
  typename std::enable_if<std::is_same<typename std::decay<First>::type,
                                       dnnl::primitive_attr>::value>::type
  CreateForwardPrimitiveDescriptor(First&& first, Args&&... args) {
    auto fwd_desc = typename TForward::desc(std::forward<Args>(args)...);
    fwd_pd_ = std::make_shared<typename TForward::primitive_desc>(
        fwd_desc, first, engine_);
  }

  template <class First, class... Args>
  typename std::enable_if<!std::is_same<typename std::decay<First>::type,
                                        dnnl::primitive_attr>::value>::type
  CreateForwardPrimitiveDescriptor(First&& first, Args&&... args) {
    auto fwd_desc = typename TForward::desc(std::forward<First>(first),
                                            std::forward<Args>(args)...);
    fwd_pd_ =
        std::make_shared<typename TForward::primitive_desc>(fwd_desc, engine_);
  }

179 180
  template <typename... Args>
  void AcquireBackwardPrimitiveDescriptor(Args&&... args) {
181
    const std::string key_fwd_pd = key_common_ + "@fwd_pd";
182 183
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_fwd_pd));
G
GaoWei8 已提交
184 185 186
    PADDLE_ENFORCE_NOT_NULL(
        fwd_pd_, platform::errors::Unavailable(
                     "Get MKLDNN Forward primitive %s failed.", key_fwd_pd));
187
    const std::string key_pd = key_ + "@bwd_pd";
188 189 190 191 192 193 194 195 196 197
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (bwd_pd_ == nullptr) {
      auto bwd_desc = typename TBackward::desc(std::forward<Args>(args)...);
      bwd_pd_ = std::make_shared<typename TBackward::primitive_desc>(
          bwd_desc, engine_, *fwd_pd_);
      dev_ctx_.SetBlob(key_pd, bwd_pd_);
    }
  }

198 199 200 201 202 203
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
      const std::string& suffix) {
    return std::static_pointer_cast<mkldnn::memory>(
        dev_ctx_.GetBlob(key_ + suffix));
  }

204
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
A
Adam 已提交
205
      mkldnn::memory::desc md, void* ptr, const std::string& suffix) {
206
    const auto local_key = key_ + suffix;
207 208 209
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
210
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
211 212 213 214 215 216 217
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

218 219 220 221 222 223 224 225 226 227 228 229
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
      mkldnn::memory::desc md, const std::string& suffix) {
    const auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      mem_p = std::make_shared<mkldnn::memory>(md, engine_);
      dev_ctx_.SetBlob(local_key, mem_p);
    }
    return mem_p;
  }

230 231 232 233 234 235 236 237 238 239 240 241 242 243
  void AcquireReorder(const std::shared_ptr<mkldnn::memory>& user_memory_p,
                      const std::shared_ptr<mkldnn::memory>& target_memory_p,
                      const std::string& suffix) {
    const auto key_reorder_p = key_ + suffix + "reorder_p";

    auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
        dev_ctx_.GetBlob(key_reorder_p));

    if (reorder_p == nullptr) {
      reorder_p =
          std::make_shared<mkldnn::reorder>(*user_memory_p, *target_memory_p);
      dev_ctx_.SetBlob(key_reorder_p, reorder_p);
    }

244
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
245 246 247

    platform::RecordEvent record_reorder("int_reorder",
                                         platform::EventRole::kUniqueOp);
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
    reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                 {MKLDNN_ARG_TO, *target_memory_p}});
    astream.wait();
  }

  std::shared_ptr<mkldnn::memory> AcquireMemoryWithReorder(
      const mkldnn::memory::desc& user_md,
      const mkldnn::memory::desc& target_md, void* ptr,
      const std::string& suffix, bool is_persistent = false) {
    const auto target_key = key_ + suffix + "_target";
    const auto key_reorder_p = key_ + suffix + "reorder_p";
    const auto user_key = key_ + suffix + "_user";

    auto target_memory_p =
        std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(target_key));

    if (target_memory_p == nullptr) {
      auto user_memory_p =
          std::make_shared<dnnl::memory>(user_md, engine_, ptr);
      if (user_md != target_md) {
        target_memory_p = std::make_shared<mkldnn::memory>(target_md, engine_);
        auto reorder_p =
            std::make_shared<dnnl::reorder>(*user_memory_p, *target_memory_p);
        dev_ctx_.SetBlob(key_reorder_p, reorder_p);

273
        auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
274 275
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
276 277 278 279 280 281 282 283 284
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
      } else {
        target_memory_p = user_memory_p;
      }
      dev_ctx_.SetBlob(user_key, user_memory_p);
      dev_ctx_.SetBlob(target_key, target_memory_p);
    } else if (!is_persistent) {
285
      auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
286 287 288 289 290 291 292 293

      auto user_memory_p =
          std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(user_key));
      user_memory_p->set_data_handle(ptr);

      auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
          dev_ctx_.GetBlob(key_reorder_p));
      if (reorder_p != nullptr) {
294 295
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
296 297 298 299 300 301 302 303
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
      }
    }
    return target_memory_p;
  }

304 305 306 307 308 309
  std::shared_ptr<mkldnn::memory> AcquireMemory(const std::string& suffix) {
    const auto local_key = key_ + suffix;
    return std::static_pointer_cast<mkldnn::memory>(
        dev_ctx_.GetBlob(local_key));
  }

310 311 312 313
  const MKLDNNDeviceContext& dev_ctx_;
  mkldnn::engine engine_;
  platform::Place place_;
  std::string key_common_;
314
  std::string key_;
315 316 317 318 319
  std::shared_ptr<typename TForward::primitive_desc> fwd_pd_;
  std::shared_ptr<typename TBackward::primitive_desc> bwd_pd_;
};

// TODO(grygielski) this class will be deleted later.
J
Jacek Czaja 已提交
320 321 322 323
class MKLDNNHandler {
 public:
  MKLDNNHandler(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
                const std::string& base_key)
324 325 326
      : dev_ctx_(dev_ctx),
        engine_(engine),
        key_common_(base_key),
327 328 329
        key_(platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, base_key)) {
    platform::MKLDNNDeviceContext::tls().log_lib_version();
  }
J
Jacek Czaja 已提交
330 331 332 333 334 335 336 337 338 339 340

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_src_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_dst_mem_p");
  }

A
Adam 已提交
341
  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemory(
J
Jacek Czaja 已提交
342
      const mkldnn::memory::desc& md, void* ptr) {
A
Adam 已提交
343
    return this->AcquireMemory(md, ptr, "@user_diff_src_mem_p");
J
Jacek Czaja 已提交
344 345
  }

A
Adam 已提交
346
  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemory(
J
Jacek Czaja 已提交
347
      const mkldnn::memory::desc& md, void* ptr) {
A
Adam 已提交
348
    return this->AcquireMemory(md, ptr, "@user_diff_dst_mem_p");
J
Jacek Czaja 已提交
349 350 351
  }

  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
A
Adam 已提交
352
      mkldnn::memory::desc md, void* ptr, const std::string& suffix) {
J
Jacek Czaja 已提交
353 354 355 356
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
357
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
J
Jacek Czaja 已提交
358 359 360 361 362 363 364
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

365 366 367 368 369 370 371 372 373 374 375 376
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
      mkldnn::memory::desc md, const std::string& suffix) {
    const auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      mem_p = std::make_shared<mkldnn::memory>(md, engine_);
      dev_ctx_.SetBlob(local_key, mem_p);
    }
    return mem_p;
  }

J
Jacek Czaja 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
  // This incarnation of AcquireMemory can call user function eg. custom reorder
  // or preprocessing routine if needed
  std::shared_ptr<mkldnn::memory> AcquireMemory(
      const mkldnn::memory::desc& md, void* ptr, const std::string& suffix,
      user_function custom_func = {}) {
    /*Generate key*/
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      // Call custom reorder/preprocessing func if available
      if (custom_func) {
        auto reordered_data = custom_func(reinterpret_cast<const float*>(ptr));
        dev_ctx_.SetBlob(local_key + "-custom_reorder", reordered_data);
        ptr = reinterpret_cast<void*>(reordered_data.get());
      }

A
Adam 已提交
394
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
J
Jacek Czaja 已提交
395 396 397 398 399 400 401
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

402
  std::shared_ptr<mkldnn::memory> AcquireMemory(
A
Adam 已提交
403
      const std::vector<int64_t>& dims, const mkldnn::memory::data_type dtype,
404
      const MKLDNNMemoryFormat& fmt, void* ptr, const std::string& suffix) {
405 406 407 408 409 410 411
    /*Generate key*/
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      auto md = mkldnn::memory::desc(dims, dtype, fmt);

A
Adam 已提交
412
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
413 414 415 416 417 418 419
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

J
Jacek Czaja 已提交
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
  std::shared_ptr<mkldnn::memory> AcquireMemory(
      const std::shared_ptr<mkldnn::memory>& user_memory_p,
      const std::shared_ptr<mkldnn::memory>& target_memory_p,
      const std::string& suffix,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
    auto local_key = key_ + suffix;
    auto key_reorder_p = key_ + suffix + "reorder_p";

    auto stored_reorder_p = std::static_pointer_cast<mkldnn::reorder>(
        dev_ctx_.GetBlob(key_reorder_p));

    if (stored_reorder_p) {
      pipeline.push_back(*stored_reorder_p);
    } else {
      auto reorder_p =
          std::make_shared<mkldnn::reorder>(*user_memory_p, *target_memory_p);
      dev_ctx_.SetBlob(key_reorder_p, reorder_p);
437
      auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
438 439
      platform::RecordEvent record_reorder("int_reorder",
                                           platform::EventRole::kUniqueOp);
A
Adam 已提交
440 441 442
      reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                   {MKLDNN_ARG_TO, *target_memory_p}});
      astream.wait();
J
Jacek Czaja 已提交
443 444 445 446 447 448
    }

    return target_memory_p;
  }

  std::shared_ptr<mkldnn::memory> AcquireMemory(
A
Adam 已提交
449 450
      mkldnn::memory::desc& md,       // NOLINT
      mkldnn::memory::desc& user_md,  // NOLINT
J
Jacek Czaja 已提交
451 452 453
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      const std::string& suffix,
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
454 455
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f}, int mask = 0) {
J
Jacek Czaja 已提交
456 457 458 459 460 461
    // create reorder primitive if the input format is not the preferred one
    auto local_key = key_ + suffix;
    auto key_reorder_p = key_ + suffix + "reorder_p";

    auto target_memory_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
A
Adam 已提交
462

463
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
A
Adam 已提交
464

J
Jacek Czaja 已提交
465 466
    if (target_memory_p == nullptr) {
      target_memory_p = user_memory_p;
A
Adam 已提交
467 468 469
      if (md != user_md) {
        target_memory_p = std::make_shared<mkldnn::memory>(md, engine_);
        std::shared_ptr<mkldnn::reorder::primitive_desc> reorder_pd;
470 471 472 473 474
        if (is_INT8) {
          mkldnn::primitive_attr
              attri;  // attribute for int8 weights and bias data reorder.
          attri.set_output_scales(mask, scale_data);

A
Adam 已提交
475 476 477
          reorder_pd = std::shared_ptr<mkldnn::reorder::primitive_desc>(
              new mkldnn::reorder::primitive_desc(*user_memory_p,
                                                  *target_memory_p, attri));
478
        } else {
A
Adam 已提交
479 480 481
          reorder_pd = std::shared_ptr<mkldnn::reorder::primitive_desc>(
              new mkldnn::reorder::primitive_desc(*user_memory_p,
                                                  *target_memory_p));
482
        }
A
Adam 已提交
483 484
        auto reorder_p =
            std::shared_ptr<mkldnn::reorder>(new mkldnn::reorder(*reorder_pd));
J
Jacek Czaja 已提交
485
        dev_ctx_.SetBlob(key_reorder_p, reorder_p);
A
Adam 已提交
486

487 488
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
A
Adam 已提交
489 490 491
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
J
Jacek Czaja 已提交
492 493 494 495 496 497 498
      }
      dev_ctx_.SetBlob(local_key, target_memory_p);
    } else if (!is_persistent) {
      // Make reorder if needed
      auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
          dev_ctx_.GetBlob(key_reorder_p));
      if (reorder_p != nullptr) {
499 500
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
A
Adam 已提交
501 502 503
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
J
Jacek Czaja 已提交
504 505 506 507 508 509 510 511
      }
    }
    return target_memory_p;
  }

 protected:
  const MKLDNNDeviceContext& dev_ctx_;
  mkldnn::engine engine_;
512
  std::string key_common_;
513
  std::string key_;
J
Jacek Czaja 已提交
514 515
};

516 517 518
template <typename T>
class BinaryMKLDNNHandler : public platform::MKLDNNHandlerT<T, dnnl::binary> {
 public:
519 520
  BinaryMKLDNNHandler(const dnnl::algorithm algo, const int axis,
                      const MKLDNNDeviceContext& dev_ctx,
521 522
                      const mkldnn::engine engine, platform::Place cpu_place,
                      const Tensor* x, const Tensor* y, Tensor* z,
523
                      float scale_x, float scale_y, float scale_z,
524
                      const std::string& uniq_name)
525
      : platform::MKLDNNHandlerT<T, dnnl::binary>(
526
            dev_ctx, engine, cpu_place,
527
            platform::CreateKey(
528 529
                dev_ctx, framework::vectorize(x->dims()), uniq_name,
                (algo == dnnl::algorithm::binary_mul ? "M" : ""))) {
530
    // bradcasting combined with in-place may require
531 532
    auto rankdiff = x->dims().size() - y->dims().size();
    if (rankdiff > 0) {
533 534 535
      auto suffix = std::to_string(rankdiff);
      this->key_ += suffix;
      this->key_common_ += suffix;
536 537
    }

538 539 540
    if (!this->isCached()) {
      PADDLE_ENFORCE_EQ(
          x->layout(), DataLayout::kMKLDNN,
G
GaoWei8 已提交
541
          platform::errors::InvalidArgument("Wrong layout set for X tensor."));
542 543
      PADDLE_ENFORCE_NE(
          x->format(), MKLDNNMemoryFormat::undef,
G
GaoWei8 已提交
544
          platform::errors::InvalidArgument("Wrong format set for X tensor."));
545 546 547

      PADDLE_ENFORCE_EQ(
          y->layout(), DataLayout::kMKLDNN,
G
GaoWei8 已提交
548
          platform::errors::InvalidArgument("Wrong layout set for Y tensor."));
549 550
      PADDLE_ENFORCE_NE(
          y->format(), MKLDNNMemoryFormat::undef,
G
GaoWei8 已提交
551
          platform::errors::InvalidArgument("Wrong format set for Y tensor."));
552 553 554 555 556 557 558

      const auto src_x_tz = framework::vectorize(x->dims());
      const auto src_y_tz = framework::vectorize(y->dims());
      const auto dst_tz = framework::vectorize(z->dims());

      const auto src0_md = dnnl::memory::desc(
          src_x_tz, platform::MKLDNNGetDataType<T>(), x->format());
559
      auto src1_md = dnnl::memory::desc(
560
          src_y_tz, platform::MKLDNNGetDataType<T>(), y->format());
561
      if (rankdiff > 0) {
562 563 564
        std::vector<int64_t> dims1_ex(rankdiff, 1);
        dims1_ex.insert(next(dims1_ex.begin(), (axis == -1 ? rankdiff : axis)),
                        src_y_tz.begin(), src_y_tz.end());
565 566
        src1_md = src1_md.reshape(dims1_ex);
      }
567 568 569
      const auto dst_md = memory::desc(dst_tz, platform::MKLDNNGetDataType<T>(),
                                       MKLDNNMemoryFormat::any);

570 571 572
      auto attributes = CreateAttributes(algo, scale_x, scale_y, scale_z);
      this->AcquireForwardPrimitiveDescriptor(attributes, algo, src0_md,
                                              src1_md, dst_md);
573
    }
574 575 576 577 578 579
  }

  std::shared_ptr<mkldnn::memory> AcquireSecondSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
    return this->AcquireMemoryFromPrimitive(
580
        this->fwd_pd_->src1_desc(), to_void_cast<T>(input_data), "@src1_mem_p");
581
  }
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613

 private:
  static inline dnnl::primitive_attr CreateAttributes(dnnl::algorithm op,
                                                      float scale_x,
                                                      float scale_y,
                                                      float scale_z) {
    // Scales set in attributes for inputs contibute to the output equation
    // in the following way (assuming no broadcasting takes place):
    // output_i = scale_0 * x_i <+ or *> scale_1 * y_i;
    // Hence we have to create scales that will:
    // 1. Dequantize both values, by multiplying with (1.0 / scale_x_or_y)
    // 2. Quantize their result to output scale range, by multiplying with
    // (scale_z)
    // If we combine these two, we end up with following equation
    // output = scale_out * (1/scale_x * x <* or +> 1/scale_y * y)
    // Hence, to mimic such behaviour using provided interface,
    // For add operation the equation is equal to:
    // output = (scale_out / scale_x) * x + (scale_out / scale_y) * y
    //                <scale_0>                  <scale_1>
    // For mul operation on the other hand
    // output = (scale_out / scale_x) * x * (1.0 / scale_y) * y
    //                <scale_0>                 <scale_1>
    float scale_0 = scale_z / scale_x;
    float scale_1 =
        op == dnnl::algorithm::binary_add ? scale_z / scale_y : 1.0 / scale_y;
    dnnl::primitive_attr attributes;
    attributes.set_scales(/* input_x_id = */ DNNL_ARG_SRC_0, /* mask = */ 0,
                          {scale_0});
    attributes.set_scales(/* input_y_id = */ DNNL_ARG_SRC_1, /* mask = */ 0,
                          {scale_1});
    return attributes;
  }
614 615
};

616
template <typename T>
617 618 619
class ActivationMKLDNNHandler
    : public MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                            mkldnn::eltwise_backward> {
620
 public:
A
Adam 已提交
621
  ActivationMKLDNNHandler(const std::vector<int64_t>& dims,
622
                          mkldnn::algorithm algorithm, float alpha, float beta,
623
                          const MKLDNNMemoryFormat fmt,
624 625
                          const platform::MKLDNNDeviceContext& dev_ctx,
                          platform::Place cpu_place,
626
                          const std::string& unique_name, bool is_inplaced)
627

628 629 630
      : platform::MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                                 mkldnn::eltwise_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
631 632 633 634
            is_inplaced
                ? platform::CreateKey(dev_ctx, dims, "a", algorithm,
                                      unique_name)
                : platform::CreateKey(dev_ctx, dims, "a", unique_name)) {
635 636
    auto md = mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);

637 638
    this->AcquireForwardPrimitiveDescriptor(mkldnn::prop_kind::forward_training,
                                            algorithm, md, alpha, beta);
639
  }
640

A
Adam 已提交
641
  ActivationMKLDNNHandler(const std::vector<int64_t>& dims,
642 643 644 645 646 647 648
                          mkldnn::algorithm algorithm, float alpha, float beta,
                          const MKLDNNMemoryFormat fmt,
                          const MKLDNNMemoryFormat diff_fmt,
                          const platform::MKLDNNDeviceContext& dev_ctx,
                          platform::Place cpu_place,
                          const std::string& unique_name)

649 650 651
      : platform::MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                                 mkldnn::eltwise_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
652
            platform::CreateKey(dev_ctx, dims, "a", unique_name)) {
653 654 655 656 657 658 659
    auto diff_dst_md = platform::MKLDNNMemDesc(
        dims, platform::MKLDNNGetDataType<T>(), diff_fmt);
    auto src_md =
        platform::MKLDNNMemDesc(dims, platform::MKLDNNGetDataType<T>(), fmt);

    this->AcquireBackwardPrimitiveDescriptor(algorithm, diff_dst_md, src_md,
                                             alpha, beta);
660
  }
661

662 663 664
  std::shared_ptr<mkldnn::memory> AcquireBackwardSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
665
    return this->AcquireMemoryFromPrimitive(this->bwd_pd_->src_desc(),
666 667
                                            to_void_cast<T>(input_data),
                                            "@bwd-src_mem_p");
668 669 670
  }
};

J
Jacek Czaja 已提交
671 672 673
template <typename T>
class LRNMKLDNNHandler
    : public MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward> {
674
 public:
675
  LRNMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
J
Jacek Czaja 已提交
676
                   const platform::MKLDNNDeviceContext& dev_ctx,
677 678 679
                   const mkldnn::engine mkldnn_engine,
                   platform::Place cpu_place, const Tensor* input,
                   const std::string& unique_name)
680

J
Jacek Czaja 已提交
681
      : platform::MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward>(
682
            dev_ctx, mkldnn_engine, cpu_place,
683
            platform::CreateKey(dev_ctx, framework::vectorize(input->dims()),
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
                                unique_name)) {
    if (!this->isCached()) {
      const int n = ctx.Attr<int>("n");
      // MKL-DNN implements LRN in a caffe way:
      // http://caffe.berkeleyvision.org/tutorial/layers/lrn.html
      // Where sum of squares is divided by size of normalization window
      // this is not the case for PaddlePaddle LRN.
      // Hence we need to compensate for this diffrence by
      // multipliing alpha by size of window(n)
      const float alpha = ctx.Attr<float>("alpha") * static_cast<float>(n);
      const float beta = ctx.Attr<float>("beta");
      const float k = ctx.Attr<float>("k");
      bool is_test = ctx.Attr<bool>("is_test");

      auto dims = paddle::framework::vectorize(input->dims());

      auto src_md = mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(),
                                         input->format());

      this->AcquireForwardPrimitiveDescriptor(
          is_test ? mkldnn::prop_kind::forward_inference
                  : mkldnn::prop_kind::forward_training,
          mkldnn::algorithm::lrn_across_channels, src_md, n, alpha, beta, k);
    }
708 709
  }

A
Adam 已提交
710 711
  LRNMKLDNNHandler(const std::vector<int64_t>& dims, const int n,
                   const float alpha, const float beta, const float k,
J
Jacek Czaja 已提交
712 713 714 715
                   const MKLDNNMemoryFormat fmt,
                   const MKLDNNMemoryFormat diff_fmt,
                   const platform::MKLDNNDeviceContext& dev_ctx,
                   platform::Place cpu_place, const std::string& unique_name)
716

J
Jacek Czaja 已提交
717 718
      : platform::MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
719
            platform::CreateKey(dev_ctx, dims, unique_name)) {
J
Jacek Czaja 已提交
720 721 722 723
    auto src_md =
        mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);
    auto diff_md =
        mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), diff_fmt);
724

J
Jacek Czaja 已提交
725
    this->AcquireBackwardPrimitiveDescriptor(
A
Adam 已提交
726 727
        mkldnn::algorithm::lrn_across_channels, src_md, diff_md, n, alpha, beta,
        k);
728 729
  }

J
Jacek Czaja 已提交
730 731 732
  std::shared_ptr<mkldnn::memory> AcquireWorkspaceMemory(
      framework::Tensor* workspace) {
    T* ptr = workspace->mutable_data<T>(
A
Adam 已提交
733 734 735
        this->place_, this->fwd_pd_->workspace_desc().get_size());
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->workspace_desc(),
                                            ptr, "@wrk_mem_p");
J
Jacek Czaja 已提交
736 737 738 739 740
  }

  std::shared_ptr<mkldnn::memory> AcquireBackwardWorkspaceMemory(
      const framework::Tensor* workspace) {
    const T* workspace_data = workspace->data<T>();
A
Adam 已提交
741 742 743
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->workspace_desc(),
                                            to_void_cast<T>(workspace_data),
                                            "@bwd-wrk_mem_p");
J
Jacek Czaja 已提交
744
  }
745 746
};

747
template <typename T>
748 749
class TransposeMKLDNNHandler : public MKLDNNHandler {
 public:
A
Adam 已提交
750 751
  TransposeMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
                         std::vector<int>& axis,      // NOLINT
752 753 754 755
                         const platform::MKLDNNDeviceContext& dev_ctx,
                         mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
756 757 758 759
        axis_(axis),
        logical_axis_(dims.size(), 0) {}

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
760
      const MKLDNNMemoryFormat& fmt, void* ptr) {
761 762 763 764 765 766 767 768 769
    auto local_key = key_ + "@user_src_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      // Make memory descriptor using input format, unless it
      // cannot be trusted (nchw) then make up memory fmt manually
      for (size_t i = 0; i < logical_axis_.size(); ++i) {
        logical_axis_[i] = i;
      }
770

A
Adam 已提交
771
      auto src_md = fmt != MKLDNNMemoryFormat::nchw
772
                        ? platform::MKLDNNMemDesc(
773
                              dims_, platform::MKLDNNGetDataType<T>(), fmt)
774
                        : Axis2MemoryDesc(dims_, logical_axis_);
A
Adam 已提交
775
      mem_p = std::make_shared<mkldnn::memory>(src_md, engine_, ptr);
776 777 778 779 780 781
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }
782 783 784 785 786 787 788

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output,
                                                   platform::Place place) {
    auto local_key = key_ + "@user_dst_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
789
      auto dst_md = Axis2MemoryDesc(dims_, axis_);
790

A
Adam 已提交
791
      auto dst_data = output->mutable_data<T>(place, dst_md.get_size());
792

A
Adam 已提交
793
      mem_p = std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
794 795
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
796
      auto dst_data = output->mutable_data<T>(place);
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
      mem_p->set_data_handle(dst_data);
    }
    return mem_p;
  }

  std::shared_ptr<mkldnn::reorder> AcquireTranspose(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    auto prim_key = key_ + "@transpose_p";
    auto transpose_p =
        std::static_pointer_cast<mkldnn::reorder>(dev_ctx_.GetBlob(prim_key));
    if (transpose_p == nullptr) {
      transpose_p =
          std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
      dev_ctx_.SetBlob(prim_key, transpose_p);
    }
    return transpose_p;
  }

 protected:
A
Adam 已提交
817 818 819 820
  mkldnn::memory::desc Axis2MemoryDesc(std::vector<int64_t>& nchw_tz,  // NOLINT
                                       std::vector<int>& axis          // NOLINT
                                       ) {
    size_t ndims = axis.size();
821

A
Adam 已提交
822
    std::vector<int64_t> strides(ndims);
823
    unsigned int total_stride = 1;
A
Adam 已提交
824 825
    for (int i = ndims - 1; i >= 0; --i) {
      strides[axis[i]] = total_stride;
826 827
      total_stride *= nchw_tz[axis[i]];
    }
A
Adam 已提交
828 829 830 831
    mkldnn::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType<T>(),
                               strides);

    return mem_d;
832 833 834
  }

 private:
A
Adam 已提交
835
  std::vector<int64_t> dims_;
836
  std::vector<int> axis_;
837
  std::vector<int> logical_axis_;
838 839
};

840 841
class ReorderMKLDNNHandler : public MKLDNNHandler {
 public:
A
Adam 已提交
842
  ReorderMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
843 844 845 846 847 848 849 850 851 852
                       framework::proto::VarType::Type vtype,
                       mkldnn::memory::data_type dtype,
                       const platform::MKLDNNDeviceContext& dev_ctx,
                       mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
        vtype_(vtype),
        dtype_(dtype) {}

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
853
      const MKLDNNMemoryFormat& fmt, void* ptr) {
854
    return this->AcquireMemory(dims_, dtype_, fmt, ptr, "@user_src_mem_p");
855 856 857
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
858
      framework::Tensor* output, const MKLDNNMemoryFormat& fmt,
859 860 861 862 863 864
      platform::Place place) {
    auto local_key = key_ + "@user_dst_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      auto dst_md = platform::MKLDNNMemDesc(dims_, dtype_, fmt);
865
      auto dst_data = output->mutable_data(place, vtype_, dst_md.get_size());
866

A
Adam 已提交
867
      mem_p = std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
868 869
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
870 871 872
      // Even if memory object exists , we may be using it for diffrent tensor
      auto dst_data =
          output->mutable_data(place, vtype_, mem_p->get_desc().get_size());
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
      mem_p->set_data_handle(dst_data);
    }
    return mem_p;
  }

  std::shared_ptr<mkldnn::reorder> AcquireReorder(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    auto prim_key = key_ + "@reorder_p";
    auto reorder_p =
        std::static_pointer_cast<mkldnn::reorder>(dev_ctx_.GetBlob(prim_key));
    if (reorder_p == nullptr) {
      reorder_p =
          std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
      dev_ctx_.SetBlob(prim_key, reorder_p);
    }
    return reorder_p;
  }

 private:
A
Adam 已提交
893
  std::vector<int64_t> dims_;
894 895 896 897
  framework::proto::VarType::Type vtype_;
  mkldnn::memory::data_type dtype_;
};

898 899 900 901 902 903 904 905 906 907 908 909 910 911
template <typename T>
struct convolutional_algorithm;

template <>
struct convolutional_algorithm<mkldnn::convolution_forward> {
  static constexpr mkldnn::algorithm T = mkldnn::algorithm::convolution_direct;
};

template <>
struct convolutional_algorithm<mkldnn::deconvolution_forward> {
  static constexpr mkldnn::algorithm T =
      mkldnn::algorithm::deconvolution_direct;
};

J
Jacek Czaja 已提交
912 913 914
template <class forward_t, class backward_data_t, class backward_weights_t>
class ConvMKLDNNTemplateHandler : public MKLDNNHandler {
 public:
915 916 917 918
  ConvMKLDNNTemplateHandler(const platform::MKLDNNDeviceContext& dev_ctx,
                            mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {}

919 920 921 922 923 924 925 926 927
  // TODO(jczaja): remove after conv int8 is adapted
  ConvMKLDNNTemplateHandler(
      std::shared_ptr<typename forward_t::primitive_desc> conv_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {
    conv_pd_ = conv_pd;
  }

J
Jacek Czaja 已提交
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
  ConvMKLDNNTemplateHandler(
      std::shared_ptr<typename forward_t::primitive_desc> conv_pd,
      std::shared_ptr<typename backward_data_t::primitive_desc>
          conv_bwd_data_pd,
      std::shared_ptr<typename backward_weights_t::primitive_desc>
          conv_bwd_weights_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        conv_pd_(conv_pd),
        conv_bwd_weights_pd_(conv_bwd_weights_pd),
        conv_bwd_data_pd_(conv_bwd_data_pd) {
    // If we are in Grad operatgor then update a key with BWD suffix to
    // distinguish from FWD memory primitives
    key_ += "-BWD";
  }

A
Adam 已提交
945
  size_t GetDstMemorySize() const { return conv_pd_->dst_desc().get_size(); }
J
Jacek Czaja 已提交
946

947
  MKLDNNMemoryFormat GetDstFormat() const {
A
Adam 已提交
948
    return paddle::platform::GetMKLDNNFormat(conv_pd_->dst_desc());
J
Jacek Czaja 已提交
949 950 951
  }

  size_t GetDiffWeightsMemorySize() const {
A
Adam 已提交
952
    return conv_bwd_weights_pd_->diff_weights_desc().get_size();
J
Jacek Czaja 已提交
953 954 955
  }

  size_t GetDiffSourceMemorySize() const {
A
Adam 已提交
956
    return conv_bwd_data_pd_->diff_src_desc().get_size();
J
Jacek Czaja 已提交
957 958 959 960 961
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromWeightsPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
962 963
    auto src_pd = conv_bwd_weights_pd_->src_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
964 965 966 967 968 969 970
    return this->AcquireMemory(src_pd, user_pd, user_memory_p,
                               "@weights-src_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromWeightsPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
971 972
    auto diff_dst_pd = conv_bwd_weights_pd_->diff_dst_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
973 974 975 976 977 978 979
    return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
                               "@weights-diff_dst_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffWeightsMemoryFromWeightsPrimitive(
      void* ptr) {
    return this->AcquireMemoryFromPrimitive(
A
Adam 已提交
980
        conv_bwd_weights_pd_->diff_weights_desc(), ptr, "@diff_weights_mem_p");
J
Jacek Czaja 已提交
981 982
  }

983 984 985 986 987 988
  std::shared_ptr<mkldnn::memory> AcquireDiffWeightsMemoryFromWeightsPrimitive(
      void) {
    return this->AcquireMemoryFromPrimitive(
        conv_bwd_weights_pd_->diff_weights_desc(), "@diff_weights_mem_p");
  }

J
Jacek Czaja 已提交
989 990 991
  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromDataPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
992 993
    auto diff_dst_pd = conv_bwd_data_pd_->diff_dst_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
994 995 996 997 998 999 1000
    return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
                               "@data-diff_dst_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromDataPrimitive(
      const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1001 1002
    auto weights_pd = conv_bwd_data_pd_->weights_desc();
    auto user_pd = user_weights_memory_p->get_desc();
J
Jacek Czaja 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
    return this->AcquireMemory(weights_pd, user_pd, user_weights_memory_p,
                               "@data-weights_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireResidualDataMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_residual_data_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromResidualDataMemory(
      const std::shared_ptr<mkldnn::memory>& user_residual_memory_p,
      void* dst_ptr,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
    return this->AcquireMemory(user_residual_memory_p,
                               this->AcquireDstMemoryFromPrimitive(dst_ptr),
                               "@residual_data_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemoryFromDataPrimitive(
      void* ptr) {
A
Adam 已提交
1023 1024
    return this->AcquireMemoryFromPrimitive(conv_bwd_data_pd_->diff_src_desc(),
                                            ptr, "@diff_src_mem_p");
J
Jacek Czaja 已提交
1025 1026 1027
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromPrimitive(void* ptr) {
A
Adam 已提交
1028
    return this->AcquireMemoryFromPrimitive(conv_pd_->dst_desc(), ptr,
J
Jacek Czaja 已提交
1029 1030 1031 1032 1033 1034
                                            "@dst_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1035 1036
    auto src_pd = conv_pd_->src_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1037 1038 1039 1040
    return this->AcquireMemory(src_pd, user_pd, user_memory_p, "@src_mem_p",
                               pipeline);
  }

A
Adam 已提交
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
  std::shared_ptr<mkldnn::memory> AcquireWeightsMemory(
      const mkldnn::memory::desc& md, void* ptr,
      user_function custom_func = {}) {
    return this->AcquireMemory(md, ptr, "@user_weights_mem_p", custom_func);
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_bias_mem_p");
  }

J
Jacek Czaja 已提交
1052 1053 1054
  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
1055 1056
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f}, int mask = 0) {
A
Adam 已提交
1057 1058
    auto user_weights_pd = user_weights_memory_p->get_desc();
    auto weights_pd = conv_pd_->weights_desc();
1059 1060 1061
    return this->AcquireMemory(
        weights_pd, user_weights_pd, user_weights_memory_p, "@weights_mem_p",
        pipeline, is_persistent, is_INT8, scale_data, mask);
J
Jacek Czaja 已提交
1062 1063 1064 1065
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_bias_memory_p,
1066 1067 1068 1069
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f},
      int mask = 0) {  // NOLINT
A
Adam 已提交
1070 1071
    auto user_bias_pd = user_bias_memory_p->get_desc();
    auto bias_pd = conv_pd_->bias_desc();
J
Jacek Czaja 已提交
1072
    return this->AcquireMemory(bias_pd, user_bias_pd, user_bias_memory_p,
1073 1074
                               "@bias_mem_p", pipeline, is_persistent, is_INT8,
                               scale_data, mask);
J
Jacek Czaja 已提交
1075 1076
  }

1077
  mkldnn::primitive_attr CreatePostOps(
1078 1079
      std::string fuse_activation, float fuse_alpha, float fuse_beta,
      bool fuse_residual_conn, const std::vector<float> output_shift_scale = {},
1080
      float sum_scale = 1.0f) const {
1081 1082
    mkldnn::primitive_attr conv_attr;
    mkldnn::post_ops post_operations;
1083 1084 1085 1086
    if (output_shift_scale.size() > 0) {
      int mask = output_shift_scale.size() > 1 ? 1 << 1 : 0;
      conv_attr.set_output_scales(mask, output_shift_scale);
    }
1087 1088 1089 1090 1091 1092
    // Fusion with Elementwise layer relies on adding a sum post-operation with
    // the scale parameter. It is assumed that when fuse_residual_connection is
    // true, the output tensor contains the data coming from residual
    // connection. The result of this post_op is:
    // Output = scale * Output + Conv_Out.
    if (fuse_residual_conn) {
1093
      post_operations.append_sum(sum_scale);
1094 1095 1096
    }
    // Fusion with ReLU layer is executed through the PostOps feature. Create a
    // PostOps object and configure it to execute an eltwise relu operation.
1097
    if (fuse_activation == "relu" || fuse_activation == "leaky_relu") {
1098 1099
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_relu,
1100
                                     fuse_alpha, fuse_beta);
1101
    } else if (fuse_activation == "relu6") {
1102 1103 1104
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale,
                                     mkldnn::algorithm::eltwise_bounded_relu,
1105
                                     fuse_alpha, fuse_beta);
1106 1107 1108 1109
    } else if (fuse_activation == "swish") {
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_swish,
                                     fuse_alpha, fuse_beta);
1110
    }
1111 1112 1113 1114 1115 1116 1117 1118
    conv_attr.set_post_ops(post_operations);
    return conv_attr;
  }

  std::shared_ptr<typename forward_t::primitive_desc>
  AcquireConvolutionPrimitiveDescriptor(
      const mkldnn::memory::desc& src, const mkldnn::memory::desc& weights,
      boost::optional<const mkldnn::memory::desc&> bias,
A
Adam 已提交
1119
      const mkldnn::memory::desc& dst, const std::vector<int64_t>& strides,
1120
      const std::vector<int64_t>& dilations,
A
Adam 已提交
1121
      const std::vector<int64_t>& paddings, const mkldnn::engine& engine,
1122 1123
      const std::string& fuse_activation, float fuse_alpha, float fuse_beta,
      const bool fuse_residual_conn, mkldnn::prop_kind fwd_prop_kind,
1124 1125
      const std::vector<float> output_shift_scale = {},
      const float sum_scale = 1.0f) {
1126 1127 1128 1129
    // Conv PD has to be passed to Grad op that
    // may be exxecuted by diffrent thread, hence
    // for that one we use key that does not contain TID
    const std::string key_conv_pd = key_common_ + "@conv_pd";
1130

1131
    conv_pd_ = std::static_pointer_cast<typename forward_t::primitive_desc>(
1132 1133
        dev_ctx_.GetBlob(key_conv_pd));

1134 1135 1136 1137 1138 1139 1140 1141 1142
    if (conv_pd_ == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);

      conv_pd_ = std::static_pointer_cast<typename forward_t::primitive_desc>(
          dev_ctx_.GetBlob(key_conv_pd));
      if (conv_pd_ == nullptr) {
        mkldnn::memory::dims stride_dims = strides;
1143
        mkldnn::memory::dims dilations_dims = dilations;
1144
        auto mkldnn_paddings = ToMkldnnPadding(paddings);
1145 1146

        auto conv_desc =
A
Adam 已提交
1147 1148
            bias ? typename forward_t::desc(
                       fwd_prop_kind, convolutional_algorithm<forward_t>::T,
1149
                       src, weights, *bias, dst, stride_dims, dilations_dims,
A
Adam 已提交
1150 1151 1152
                       mkldnn_paddings[0], mkldnn_paddings[1])
                 : typename forward_t::desc(
                       fwd_prop_kind, convolutional_algorithm<forward_t>::T,
1153 1154
                       src, weights, dst, stride_dims, dilations_dims,
                       mkldnn_paddings[0], mkldnn_paddings[1]);
1155

1156
        mkldnn::primitive_attr conv_attr =
1157 1158
            CreatePostOps(fuse_activation, fuse_alpha, fuse_beta,
                          fuse_residual_conn, output_shift_scale, sum_scale);
1159 1160 1161 1162 1163 1164

        conv_pd_.reset(new typename forward_t::primitive_desc(
            conv_desc, conv_attr, engine));
        // Save conv_pd/src_memory/weights_memory for backward pass
        dev_ctx_.SetBlob(key_conv_pd, conv_pd_);
      }
1165 1166 1167 1168 1169
    }

    return conv_pd_;
  }

A
Adam 已提交
1170
  std::shared_ptr<forward_t> AcquireConvolution() {
J
Jacek Czaja 已提交
1171 1172 1173 1174
    auto prim_key = key_ + "@conv_p";
    auto conv_p =
        std::static_pointer_cast<forward_t>(dev_ctx_.GetBlob(prim_key));
    if (conv_p == nullptr) {
A
Adam 已提交
1175
      conv_p = std::make_shared<forward_t>(*conv_pd_);
J
Jacek Czaja 已提交
1176 1177 1178 1179 1180 1181

      dev_ctx_.SetBlob(prim_key, conv_p);
    }
    return conv_p;
  }

A
Adam 已提交
1182
  std::shared_ptr<backward_weights_t> AcquireConvolutionBackwardWeights() {
J
Jacek Czaja 已提交
1183 1184 1185 1186 1187
    auto prim_key = key_ + "@conv_bwd_weights_p";
    auto conv_bwd_weights_p = std::static_pointer_cast<backward_weights_t>(
        dev_ctx_.GetBlob(prim_key));
    if (conv_bwd_weights_p == nullptr) {
      // create backward conv primitive for weights
A
Adam 已提交
1188 1189
      conv_bwd_weights_p =
          std::make_shared<backward_weights_t>(*conv_bwd_weights_pd_);
J
Jacek Czaja 已提交
1190 1191 1192 1193 1194
      dev_ctx_.SetBlob(prim_key, conv_bwd_weights_p);
    }
    return conv_bwd_weights_p;
  }

A
Adam 已提交
1195
  std::shared_ptr<backward_data_t> AcquireConvolutionBackwardData() {
J
Jacek Czaja 已提交
1196 1197 1198 1199
    auto prim_key = key_ + "@conv_bwd_data_p";
    auto conv_bwd_data_p =
        std::static_pointer_cast<backward_data_t>(dev_ctx_.GetBlob(prim_key));
    if (conv_bwd_data_p == nullptr) {
A
Adam 已提交
1200
      conv_bwd_data_p = std::make_shared<backward_data_t>(*conv_bwd_data_pd_);
J
Jacek Czaja 已提交
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
      dev_ctx_.SetBlob(prim_key, conv_bwd_data_p);
    }
    return conv_bwd_data_p;
  }

 private:
  std::shared_ptr<typename forward_t::primitive_desc> conv_pd_;
  std::shared_ptr<typename backward_weights_t::primitive_desc>
      conv_bwd_weights_pd_;
  std::shared_ptr<typename backward_data_t::primitive_desc> conv_bwd_data_pd_;
};

using ConvMKLDNNHandler =
    ConvMKLDNNTemplateHandler<mkldnn::convolution_forward,
                              mkldnn::convolution_backward_data,
                              mkldnn::convolution_backward_weights>;

using ConvTransposeMKLDNNHandler =
    ConvMKLDNNTemplateHandler<mkldnn::deconvolution_forward,
                              mkldnn::deconvolution_backward_data,
                              mkldnn::deconvolution_backward_weights>;
1222

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
template <typename T>
static std::shared_ptr<mkldnn::memory> SetDstMemory(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const std::shared_ptr<ConvMKLDNNHandler>& handler) {
  T* output_data =
      output->mutable_data<T>(ctx.GetPlace(), handler->GetDstMemorySize());
  std::shared_ptr<mkldnn::memory> dst_memory_p =
      handler->AcquireDstMemoryFromPrimitive(to_void_cast<T>(output_data));
  return dst_memory_p;
}

template <typename T>
static std::shared_ptr<mkldnn::memory> SetDstMemory(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const framework::Tensor* residual_param,
    const mkldnn::memory::desc& user_residual_md,
    const std::shared_ptr<ConvMKLDNNHandler>& handler,
    std::vector<mkldnn::primitive>* pipeline) {
  const T* residual_param_data = residual_param->data<T>();
1242 1243 1244 1245
  PADDLE_ENFORCE_NOT_NULL(
      residual_param_data,
      platform::errors::PreconditionNotMet("Residual parameter is required for "
                                           "the DNNL conv+elementwise_add "
G
GaoWei8 已提交
1246
                                           "fusion, but now it is missing."));
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
  std::shared_ptr<mkldnn::memory> user_residual_memory_p =
      handler->AcquireResidualDataMemory(user_residual_md,
                                         to_void_cast<T>(residual_param_data));
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  std::shared_ptr<mkldnn::memory> dst_memory_p =
      handler->AcquireDstMemoryFromResidualDataMemory(
          user_residual_memory_p, to_void_cast<T>(output_data), *pipeline);
  return dst_memory_p;
}

template <typename T>
static void SetDstMemoryHandler(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const std::shared_ptr<ConvMKLDNNHandler>& handler,
    std::shared_ptr<mkldnn::memory> dst_memory_p) {
  T* output_data =
      output->mutable_data<T>(ctx.GetPlace(), handler->GetDstMemorySize());
  dst_memory_p->set_data_handle(to_void_cast<T>(output_data));
}

1267 1268 1269
template <typename T>
static void SetDstMemoryQuantized(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
A
Adam 已提交
1270 1271
    std::vector<int64_t> dst_tz, const mkldnn::engine& engine,
    std::shared_ptr<mkldnn::memory::desc>& dst_md,  // NOLINT
1272 1273
    std::shared_ptr<mkldnn::memory>& dst_memory,    // NOLINT
    MKLDNNMemoryFormat output_format) {
1274 1275
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  const size_t dst_dims = dst_tz.size();
1276
  MKLDNNMemoryFormat dst_fmt;
G
GaoWei8 已提交
1277 1278 1279 1280
  PADDLE_ENFORCE_LE(dst_dims, 5, platform::errors::InvalidArgument(
                                     "Dst memory for quantization can not have "
                                     "dims > 5. But received dst_dims is %d.",
                                     dst_dims));
1281
  dst_fmt = platform::MKLDNNFormatForSize(dst_dims, output_format);
1282

A
Adam 已提交
1283
  auto tmp_dst_md = platform::MKLDNNMemDesc(
1284
      {dst_tz}, paddle::framework::ToMKLDNNDataType(
1285
                    framework::DataTypeTrait<T>::DataType()),
1286
      dst_fmt);
A
Adam 已提交
1287 1288 1289
  dst_md.reset(new mkldnn::memory::desc(tmp_dst_md));
  dst_memory.reset(
      new mkldnn::memory(*dst_md, engine, to_void_cast<T>(output_data)));
1290 1291
}

J
Jacek Czaja 已提交
1292 1293
}  // namespace platform
}  // namespace paddle