mkldnn_reuse.h 58.4 KB
Newer Older
J
Jacek Czaja 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

16
#include <algorithm>
17
#include <memory>
18
#include <sstream>
J
Jacek Czaja 已提交
19
#include <string>
20
#include <utility>
J
Jacek Czaja 已提交
21
#include <vector>
22
#include "boost/optional.hpp"
X
xiaoli.liu@intel.com 已提交
23
#include "paddle/fluid/framework/data_layout_transform.h"
J
Jacek Czaja 已提交
24
#include "paddle/fluid/framework/operator.h"
25
#include "paddle/fluid/operators/pool_op.h"
J
Jacek Czaja 已提交
26 27 28 29 30 31
#include "paddle/fluid/platform/mkldnn_helper.h"
#include "paddle/fluid/platform/place.h"

namespace paddle {
namespace platform {

32 33
using framework::DataLayout;
using framework::Tensor;
J
Jacek Czaja 已提交
34
using user_function = std::function<std::shared_ptr<float>(const float*)>;
35
using memory = mkldnn::memory;
J
Jacek Czaja 已提交
36

37
template <typename T, typename TForward,
38 39
          typename TBackward = mkldnn_dummy_primitive,
          typename TBackward_params = mkldnn_dummy_primitive>
40 41 42 43 44 45 46 47
class MKLDNNHandlerT {
 public:
  MKLDNNHandlerT(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
                 platform::Place cpu_place, const std::string& base_key)
      : dev_ctx_(dev_ctx),
        engine_(engine),
        place_(cpu_place),
        key_common_(base_key),
48
        key_(platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, base_key)),
49
        fwd_pd_(nullptr),
50 51 52
        bwd_pd_(nullptr) {
    platform::MKLDNNDeviceContext::tls().log_lib_version();
  }
53

A
Adam 已提交
54
  std::shared_ptr<TForward> AcquireForwardPrimitive() {
55
    const std::string key_p = key_ + "@fwd_p";
56 57 58
    auto forward_p =
        std::static_pointer_cast<TForward>(dev_ctx_.GetBlob(key_p));
    if (forward_p == nullptr) {
A
Adam 已提交
59
      forward_p = std::make_shared<TForward>(*fwd_pd_);
60 61 62 63 64
      dev_ctx_.SetBlob(key_p, forward_p);
    }
    return forward_p;
  }

A
Adam 已提交
65
  std::shared_ptr<TBackward> AcquireBackwardPrimitive() {
66
    const std::string key_p = key_ + "@bwd_p";
67 68 69
    auto backward_p =
        std::static_pointer_cast<TBackward>(dev_ctx_.GetBlob(key_p));
    if (backward_p == nullptr) {
A
Adam 已提交
70
      backward_p = std::make_shared<TBackward>(*bwd_pd_);
71 72 73 74 75
      dev_ctx_.SetBlob(key_p, backward_p);
    }
    return backward_p;
  }

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
  std::shared_ptr<TBackward_params> AcquireBackwardWeightsPrimitive() {
    const std::string key_p = key_ + "@bwd_w_p";
    auto backward_p =
        std::static_pointer_cast<TBackward_params>(dev_ctx_.GetBlob(key_p));
    if (backward_p == nullptr) {
      PADDLE_ENFORCE_NOT_NULL(bwd_w_pd_, platform::errors::Unavailable(
                                             "Error: BWD_PD should be set when "
                                             "getting BWD prim witk key: %s .",
                                             key_p));
      backward_p = std::make_shared<TBackward_params>(*bwd_w_pd_);
      dev_ctx_.SetBlob(key_p, backward_p);
    }
    return backward_p;
  }

91 92 93
  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
94 95
    return this->AcquireMemoryFromPrimitive(
        fwd_pd_->src_desc(), to_void_cast<T>(input_data), "@src_mem_p");
96 97
  }

98
  template <typename T_out = T>
99
  std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output) {
100 101
    T_out* ptr =
        output->mutable_data<T_out>(place_, fwd_pd_->dst_desc().get_size());
A
Adam 已提交
102
    return this->AcquireMemoryFromPrimitive(fwd_pd_->dst_desc(), ptr,
103 104 105
                                            "@dst_mem_p");
  }

106 107 108 109 110
  template <typename T_out = T>
  std::shared_ptr<mkldnn::memory> AcquireDstMemory(void) {
    return this->AcquireMemoryFromPrimitive(fwd_pd_->dst_desc(), "@dstt_mem_p");
  }

111
  template <typename T_out = T>
112 113
  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
      const framework::Tensor* output) {
114 115 116 117
    const T_out* output_data = output->data<T_out>();
    return this->AcquireMemoryFromPrimitive(bwd_pd_->dst_desc(),
                                            to_void_cast<T_out>(output_data),
                                            "@bwd-dst_mem_p");
118 119 120 121 122
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemory(
      const framework::Tensor* diffdst) {
    const T* ptr = diffdst->data<T>();
A
Adam 已提交
123 124
    return this->AcquireMemoryFromPrimitive(
        bwd_pd_->diff_dst_desc(), to_void_cast<T>(ptr), "@diff_dst_mem_p");
125 126 127 128
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemory(
      framework::Tensor* diffsrc) {
A
Adam 已提交
129 130 131 132
    T* ptr =
        diffsrc->mutable_data<T>(place_, bwd_pd_->diff_src_desc().get_size());
    return this->AcquireMemoryFromPrimitive(bwd_pd_->diff_src_desc(), ptr,
                                            "@diff_src_mem_p");
133 134
  }

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
  // Buffer of given Tensor is used for oneDNN computation
  std::shared_ptr<mkldnn::memory> AcquireDiffWeightsMemory(
      framework::Tensor* diff_weights) {
    PADDLE_ENFORCE_NOT_NULL(
        bwd_w_pd_,
        platform::errors::Unavailable(
            "Error: BWD_W_PD should be set when getting BWD grad of weights."));
    T* ptr = diff_weights->mutable_data<T>(
        place_, bwd_w_pd_->diff_weights_desc().get_size());
    return this->AcquireMemoryFromPrimitive(bwd_w_pd_->diff_weights_desc(), ptr,
                                            "@diff_wei_mem_p");
  }

  // Buffer is allocated by oneDNN to store computation results
  std::shared_ptr<mkldnn::memory> AcquireDiffWeightsMemory(void) {
    PADDLE_ENFORCE_NOT_NULL(
        bwd_w_pd_,
        platform::errors::Unavailable(
            "Error: BWD_W_PD should be set when getting BWD grad of weights."));
    return this->AcquireMemoryFromPrimitive(bwd_w_pd_->diff_weights_desc(),
                                            "@diff_wei_mem_p");
  }

158
 protected:
159
  bool isCached() {
160
    const std::string key_pd = key_common_ + "@fwd_pd";
161 162
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
163

164
    const std::string key_p = key_ + "@fwd_p";
165
    return (dev_ctx_.GetBlob(key_p) != nullptr);
166 167
  }

168 169 170 171 172 173 174 175
  bool isCachedNonBlocking() {
    const std::string key_pd = key_ + "@fwd_pd";
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));

    return (fwd_pd_ != nullptr);
  }

176
  bool isBwdCached() {
177
    const std::string key_pd = key_ + "@bwd_pd";
178 179 180
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));

181
    return (bwd_pd_ != nullptr);
182 183
  }

184 185 186 187 188 189
  // If your primitive descriptor requires attributes, pass them as a
  // first argument and paramters to descriptor constructor in the following
  // arguments. Otherwise, all arguments will be forwarded to descriptor
  // constructor, including the first one.
  template <typename Arg, typename... Args>
  void AcquireForwardPrimitiveDescriptor(Arg&& first_arg, Args&&... args) {
190 191 192
    // Forward PD has to be passed to Grad op that
    // may be executed by diffrent thread, hence
    // for that one we use key that does not contain TID
193
    const std::string key_pd = key_common_ + "@fwd_pd";
194 195 196 197 198 199 200 201 202
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (fwd_pd_ == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);
      fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
          dev_ctx_.GetBlob(key_pd));
      if (fwd_pd_ == nullptr) {
203 204
        CreateForwardPrimitiveDescriptor(first_arg,
                                         std::forward<Args>(args)...);
205 206 207 208 209
        dev_ctx_.SetBlob(key_pd, fwd_pd_);
      }
    }
  }

210 211 212 213 214 215 216 217 218 219 220 221 222 223
  template <typename Arg, typename... Args>
  void AcquireForwardPrimitiveDescriptorNonBlocking(Arg&& first_arg,
                                                    Args&&... args) {
    // This is used when we can recreate FWD PD in BWD so
    // we do not need to pass FWD to BWD
    const std::string key_pd = key_ + "@fwd_pd";
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (fwd_pd_ == nullptr) {
      CreateForwardPrimitiveDescriptor(first_arg, std::forward<Args>(args)...);
      dev_ctx_.SetBlob(key_pd, fwd_pd_);
    }
  }

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
  // Using sfinae to specialise variadic function. Workaround for not having
  // if constexpr in C++ 11.
  template <class First, class... Args>
  typename std::enable_if<std::is_same<typename std::decay<First>::type,
                                       dnnl::primitive_attr>::value>::type
  CreateForwardPrimitiveDescriptor(First&& first, Args&&... args) {
    auto fwd_desc = typename TForward::desc(std::forward<Args>(args)...);
    fwd_pd_ = std::make_shared<typename TForward::primitive_desc>(
        fwd_desc, first, engine_);
  }

  template <class First, class... Args>
  typename std::enable_if<!std::is_same<typename std::decay<First>::type,
                                        dnnl::primitive_attr>::value>::type
  CreateForwardPrimitiveDescriptor(First&& first, Args&&... args) {
    auto fwd_desc = typename TForward::desc(std::forward<First>(first),
                                            std::forward<Args>(args)...);
    fwd_pd_ =
        std::make_shared<typename TForward::primitive_desc>(fwd_desc, engine_);
  }

245 246
  // TODO(jczaja): After/if all ops can used xxxNonBlocking version
  // then remove this one
247 248
  template <typename... Args>
  void AcquireBackwardPrimitiveDescriptor(Args&&... args) {
249
    const std::string key_fwd_pd = key_common_ + "@fwd_pd";
250 251
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_fwd_pd));
G
GaoWei8 已提交
252 253 254
    PADDLE_ENFORCE_NOT_NULL(
        fwd_pd_, platform::errors::Unavailable(
                     "Get MKLDNN Forward primitive %s failed.", key_fwd_pd));
255
    const std::string key_pd = key_ + "@bwd_pd";
256 257 258 259 260 261 262 263 264 265
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (bwd_pd_ == nullptr) {
      auto bwd_desc = typename TBackward::desc(std::forward<Args>(args)...);
      bwd_pd_ = std::make_shared<typename TBackward::primitive_desc>(
          bwd_desc, engine_, *fwd_pd_);
      dev_ctx_.SetBlob(key_pd, bwd_pd_);
    }
  }

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
  template <typename... Args>
  void AcquireBackwardPrimitiveDescriptorNonBlocking(Args&&... args) {
    // fwd_pd_ is set during grad by calling
    // AcquireForwardPrimitiveDescriptorNonBlocking
    PADDLE_ENFORCE_NOT_NULL(
        fwd_pd_,
        platform::errors::Unavailable("Get MKLDNN Forward primitive %s failed.",
                                      key_ + "@fwd_pd"));
    const std::string key_pd = key_ + "@bwd_pd";
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (bwd_pd_ == nullptr) {
      auto bwd_desc = typename TBackward::desc(std::forward<Args>(args)...);
      bwd_pd_ = std::make_shared<typename TBackward::primitive_desc>(
          bwd_desc, engine_, *fwd_pd_);
      dev_ctx_.SetBlob(key_pd, bwd_pd_);
    }
  }

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
  template <typename... Args>
  void AcquireBackwardWeightsPrimitiveDescriptorNonBlocking(Args&&... args) {
    // fwd_pd_ is set during grad by calling
    // AcquireForwardPrimitiveDescriptorNonBlocking
    PADDLE_ENFORCE_NOT_NULL(
        fwd_pd_,
        platform::errors::Unavailable("Get MKLDNN Forward primitive %s failed.",
                                      key_ + "@fwd_pd"));
    const std::string key_pd = key_ + "@bwd_w_pd";
    bwd_w_pd_ =
        std::static_pointer_cast<typename TBackward_params::primitive_desc>(
            dev_ctx_.GetBlob(key_pd));
    if (bwd_w_pd_ == nullptr) {
      auto bwd_desc =
          typename TBackward_params::desc(std::forward<Args>(args)...);
      bwd_w_pd_ = std::make_shared<typename TBackward_params::primitive_desc>(
          bwd_desc, engine_, *fwd_pd_);
      dev_ctx_.SetBlob(key_pd, bwd_w_pd_);
    }
  }

306 307 308 309 310 311
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
      const std::string& suffix) {
    return std::static_pointer_cast<mkldnn::memory>(
        dev_ctx_.GetBlob(key_ + suffix));
  }

312
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
A
Adam 已提交
313
      mkldnn::memory::desc md, void* ptr, const std::string& suffix) {
314
    const auto local_key = key_ + suffix;
315 316 317
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
318
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
319 320 321 322 323 324 325
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

326 327 328 329 330 331 332 333 334 335 336 337
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
      mkldnn::memory::desc md, const std::string& suffix) {
    const auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      mem_p = std::make_shared<mkldnn::memory>(md, engine_);
      dev_ctx_.SetBlob(local_key, mem_p);
    }
    return mem_p;
  }

338 339 340 341 342 343 344 345 346 347 348 349 350 351
  void AcquireReorder(const std::shared_ptr<mkldnn::memory>& user_memory_p,
                      const std::shared_ptr<mkldnn::memory>& target_memory_p,
                      const std::string& suffix) {
    const auto key_reorder_p = key_ + suffix + "reorder_p";

    auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
        dev_ctx_.GetBlob(key_reorder_p));

    if (reorder_p == nullptr) {
      reorder_p =
          std::make_shared<mkldnn::reorder>(*user_memory_p, *target_memory_p);
      dev_ctx_.SetBlob(key_reorder_p, reorder_p);
    }

352
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
353 354 355

    platform::RecordEvent record_reorder("int_reorder",
                                         platform::EventRole::kUniqueOp);
356 357 358 359 360
    reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                 {MKLDNN_ARG_TO, *target_memory_p}});
    astream.wait();
  }

361
  template <typename F = T>
362 363 364
  std::shared_ptr<mkldnn::memory> AcquireMemoryWithReorder(
      const mkldnn::memory::desc& user_md,
      const mkldnn::memory::desc& target_md, void* ptr,
365 366
      const std::string& suffix, bool is_persistent = false,
      std::function<std::shared_ptr<F>(const F*)> custom_reorder_func = {}) {
367 368 369 370 371 372 373 374
    const auto target_key = key_ + suffix + "_target";
    const auto key_reorder_p = key_ + suffix + "reorder_p";
    const auto user_key = key_ + suffix + "_user";

    auto target_memory_p =
        std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(target_key));

    if (target_memory_p == nullptr) {
375 376 377 378 379 380
      if (custom_reorder_func) {
        auto reordered_data =
            custom_reorder_func(reinterpret_cast<const F*>(ptr));
        dev_ctx_.SetBlob(key_reorder_p + "-custom_reorder", reordered_data);
        ptr = reinterpret_cast<void*>(reordered_data.get());
      }
381 382 383 384 385 386 387 388
      auto user_memory_p =
          std::make_shared<dnnl::memory>(user_md, engine_, ptr);
      if (user_md != target_md) {
        target_memory_p = std::make_shared<mkldnn::memory>(target_md, engine_);
        auto reorder_p =
            std::make_shared<dnnl::reorder>(*user_memory_p, *target_memory_p);
        dev_ctx_.SetBlob(key_reorder_p, reorder_p);

389
        auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
390 391
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
392 393 394 395 396 397 398 399 400
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
      } else {
        target_memory_p = user_memory_p;
      }
      dev_ctx_.SetBlob(user_key, user_memory_p);
      dev_ctx_.SetBlob(target_key, target_memory_p);
    } else if (!is_persistent) {
401
      auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
402 403 404 405 406 407 408 409

      auto user_memory_p =
          std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(user_key));
      user_memory_p->set_data_handle(ptr);

      auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
          dev_ctx_.GetBlob(key_reorder_p));
      if (reorder_p != nullptr) {
410 411
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
412 413 414 415 416 417 418 419
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
      }
    }
    return target_memory_p;
  }

420 421 422 423 424 425
  std::shared_ptr<mkldnn::memory> AcquireMemory(const std::string& suffix) {
    const auto local_key = key_ + suffix;
    return std::static_pointer_cast<mkldnn::memory>(
        dev_ctx_.GetBlob(local_key));
  }

426 427 428 429
  const MKLDNNDeviceContext& dev_ctx_;
  mkldnn::engine engine_;
  platform::Place place_;
  std::string key_common_;
430
  std::string key_;
431 432
  std::shared_ptr<typename TForward::primitive_desc> fwd_pd_;
  std::shared_ptr<typename TBackward::primitive_desc> bwd_pd_;
433
  std::shared_ptr<typename TBackward_params::primitive_desc> bwd_w_pd_;
434 435 436
};

// TODO(grygielski) this class will be deleted later.
J
Jacek Czaja 已提交
437 438 439 440
class MKLDNNHandler {
 public:
  MKLDNNHandler(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
                const std::string& base_key)
441 442 443
      : dev_ctx_(dev_ctx),
        engine_(engine),
        key_common_(base_key),
444 445 446
        key_(platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, base_key)) {
    platform::MKLDNNDeviceContext::tls().log_lib_version();
  }
J
Jacek Czaja 已提交
447 448 449 450 451 452 453 454 455 456 457

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_src_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_dst_mem_p");
  }

A
Adam 已提交
458
  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemory(
J
Jacek Czaja 已提交
459
      const mkldnn::memory::desc& md, void* ptr) {
A
Adam 已提交
460
    return this->AcquireMemory(md, ptr, "@user_diff_src_mem_p");
J
Jacek Czaja 已提交
461 462
  }

A
Adam 已提交
463
  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemory(
J
Jacek Czaja 已提交
464
      const mkldnn::memory::desc& md, void* ptr) {
A
Adam 已提交
465
    return this->AcquireMemory(md, ptr, "@user_diff_dst_mem_p");
J
Jacek Czaja 已提交
466 467 468
  }

  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
A
Adam 已提交
469
      mkldnn::memory::desc md, void* ptr, const std::string& suffix) {
J
Jacek Czaja 已提交
470 471 472 473
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
474
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
J
Jacek Czaja 已提交
475 476 477 478 479 480 481
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

482 483 484 485 486 487 488 489 490 491 492 493
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
      mkldnn::memory::desc md, const std::string& suffix) {
    const auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      mem_p = std::make_shared<mkldnn::memory>(md, engine_);
      dev_ctx_.SetBlob(local_key, mem_p);
    }
    return mem_p;
  }

J
Jacek Czaja 已提交
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
  // This incarnation of AcquireMemory can call user function eg. custom reorder
  // or preprocessing routine if needed
  std::shared_ptr<mkldnn::memory> AcquireMemory(
      const mkldnn::memory::desc& md, void* ptr, const std::string& suffix,
      user_function custom_func = {}) {
    /*Generate key*/
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      // Call custom reorder/preprocessing func if available
      if (custom_func) {
        auto reordered_data = custom_func(reinterpret_cast<const float*>(ptr));
        dev_ctx_.SetBlob(local_key + "-custom_reorder", reordered_data);
        ptr = reinterpret_cast<void*>(reordered_data.get());
      }

A
Adam 已提交
511
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
J
Jacek Czaja 已提交
512 513 514 515 516 517 518
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

519
  std::shared_ptr<mkldnn::memory> AcquireMemory(
A
Adam 已提交
520
      const std::vector<int64_t>& dims, const mkldnn::memory::data_type dtype,
521
      const MKLDNNMemoryFormat& fmt, void* ptr, const std::string& suffix) {
522 523 524 525 526 527 528
    /*Generate key*/
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      auto md = mkldnn::memory::desc(dims, dtype, fmt);

A
Adam 已提交
529
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
530 531 532 533 534 535 536
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

J
Jacek Czaja 已提交
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
  std::shared_ptr<mkldnn::memory> AcquireMemory(
      const std::shared_ptr<mkldnn::memory>& user_memory_p,
      const std::shared_ptr<mkldnn::memory>& target_memory_p,
      const std::string& suffix,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
    auto local_key = key_ + suffix;
    auto key_reorder_p = key_ + suffix + "reorder_p";

    auto stored_reorder_p = std::static_pointer_cast<mkldnn::reorder>(
        dev_ctx_.GetBlob(key_reorder_p));

    if (stored_reorder_p) {
      pipeline.push_back(*stored_reorder_p);
    } else {
      auto reorder_p =
          std::make_shared<mkldnn::reorder>(*user_memory_p, *target_memory_p);
      dev_ctx_.SetBlob(key_reorder_p, reorder_p);
554
      auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
555 556
      platform::RecordEvent record_reorder("int_reorder",
                                           platform::EventRole::kUniqueOp);
A
Adam 已提交
557 558 559
      reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                   {MKLDNN_ARG_TO, *target_memory_p}});
      astream.wait();
J
Jacek Czaja 已提交
560 561 562 563 564 565
    }

    return target_memory_p;
  }

  std::shared_ptr<mkldnn::memory> AcquireMemory(
A
Adam 已提交
566 567
      mkldnn::memory::desc& md,       // NOLINT
      mkldnn::memory::desc& user_md,  // NOLINT
J
Jacek Czaja 已提交
568 569 570
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      const std::string& suffix,
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
571 572
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f}, int mask = 0) {
J
Jacek Czaja 已提交
573 574 575 576 577 578
    // create reorder primitive if the input format is not the preferred one
    auto local_key = key_ + suffix;
    auto key_reorder_p = key_ + suffix + "reorder_p";

    auto target_memory_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
A
Adam 已提交
579

580
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
A
Adam 已提交
581

J
Jacek Czaja 已提交
582 583
    if (target_memory_p == nullptr) {
      target_memory_p = user_memory_p;
A
Adam 已提交
584 585 586
      if (md != user_md) {
        target_memory_p = std::make_shared<mkldnn::memory>(md, engine_);
        std::shared_ptr<mkldnn::reorder::primitive_desc> reorder_pd;
587 588 589 590 591
        if (is_INT8) {
          mkldnn::primitive_attr
              attri;  // attribute for int8 weights and bias data reorder.
          attri.set_output_scales(mask, scale_data);

A
Adam 已提交
592 593 594
          reorder_pd = std::shared_ptr<mkldnn::reorder::primitive_desc>(
              new mkldnn::reorder::primitive_desc(*user_memory_p,
                                                  *target_memory_p, attri));
595
        } else {
A
Adam 已提交
596 597 598
          reorder_pd = std::shared_ptr<mkldnn::reorder::primitive_desc>(
              new mkldnn::reorder::primitive_desc(*user_memory_p,
                                                  *target_memory_p));
599
        }
A
Adam 已提交
600 601
        auto reorder_p =
            std::shared_ptr<mkldnn::reorder>(new mkldnn::reorder(*reorder_pd));
J
Jacek Czaja 已提交
602
        dev_ctx_.SetBlob(key_reorder_p, reorder_p);
A
Adam 已提交
603

604 605
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
A
Adam 已提交
606 607 608
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
J
Jacek Czaja 已提交
609 610 611 612 613 614 615
      }
      dev_ctx_.SetBlob(local_key, target_memory_p);
    } else if (!is_persistent) {
      // Make reorder if needed
      auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
          dev_ctx_.GetBlob(key_reorder_p));
      if (reorder_p != nullptr) {
616 617
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
A
Adam 已提交
618 619 620
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
J
Jacek Czaja 已提交
621 622 623 624 625 626 627 628
      }
    }
    return target_memory_p;
  }

 protected:
  const MKLDNNDeviceContext& dev_ctx_;
  mkldnn::engine engine_;
629
  std::string key_common_;
630
  std::string key_;
J
Jacek Czaja 已提交
631 632
};

633 634 635
template <typename T>
class BinaryMKLDNNHandler : public platform::MKLDNNHandlerT<T, dnnl::binary> {
 public:
636 637
  BinaryMKLDNNHandler(const dnnl::algorithm algo, const int axis,
                      const MKLDNNDeviceContext& dev_ctx,
638 639
                      const mkldnn::engine engine, platform::Place cpu_place,
                      const Tensor* x, const Tensor* y, Tensor* z,
640
                      float scale_x, float scale_y, float scale_z,
641
                      const std::string& uniq_name)
642
      : platform::MKLDNNHandlerT<T, dnnl::binary>(
643
            dev_ctx, engine, cpu_place,
644
            platform::CreateKey(
645 646
                dev_ctx, framework::vectorize(x->dims()), uniq_name,
                (algo == dnnl::algorithm::binary_mul ? "M" : ""))) {
647
    // bradcasting combined with in-place may require
648 649
    auto rankdiff = x->dims().size() - y->dims().size();
    if (rankdiff > 0) {
650 651 652
      auto suffix = std::to_string(rankdiff);
      this->key_ += suffix;
      this->key_common_ += suffix;
653 654
    }

655 656 657
    if (!this->isCached()) {
      PADDLE_ENFORCE_EQ(
          x->layout(), DataLayout::kMKLDNN,
G
GaoWei8 已提交
658
          platform::errors::InvalidArgument("Wrong layout set for X tensor."));
659 660
      PADDLE_ENFORCE_NE(
          x->format(), MKLDNNMemoryFormat::undef,
G
GaoWei8 已提交
661
          platform::errors::InvalidArgument("Wrong format set for X tensor."));
662 663 664

      PADDLE_ENFORCE_EQ(
          y->layout(), DataLayout::kMKLDNN,
G
GaoWei8 已提交
665
          platform::errors::InvalidArgument("Wrong layout set for Y tensor."));
666 667
      PADDLE_ENFORCE_NE(
          y->format(), MKLDNNMemoryFormat::undef,
G
GaoWei8 已提交
668
          platform::errors::InvalidArgument("Wrong format set for Y tensor."));
669 670 671

      const auto src_x_tz = framework::vectorize(x->dims());
      const auto src_y_tz = framework::vectorize(y->dims());
672 673 674 675
      // if output tensor(z) is nullptr then we are computing into oneDNN
      // managed buffer
      const auto dst_tz =
          (z == nullptr) ? src_x_tz : framework::vectorize(z->dims());
676 677 678

      const auto src0_md = dnnl::memory::desc(
          src_x_tz, platform::MKLDNNGetDataType<T>(), x->format());
679
      auto src1_md = dnnl::memory::desc(
680
          src_y_tz, platform::MKLDNNGetDataType<T>(), y->format());
681
      if (rankdiff > 0) {
682 683 684
        std::vector<int64_t> dims1_ex(rankdiff, 1);
        dims1_ex.insert(next(dims1_ex.begin(), (axis == -1 ? rankdiff : axis)),
                        src_y_tz.begin(), src_y_tz.end());
685 686
        src1_md = src1_md.reshape(dims1_ex);
      }
687 688 689
      const auto dst_md = memory::desc(dst_tz, platform::MKLDNNGetDataType<T>(),
                                       MKLDNNMemoryFormat::any);

690 691 692
      auto attributes = CreateAttributes(algo, scale_x, scale_y, scale_z);
      this->AcquireForwardPrimitiveDescriptor(attributes, algo, src0_md,
                                              src1_md, dst_md);
693
    }
694 695 696 697 698 699
  }

  std::shared_ptr<mkldnn::memory> AcquireSecondSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
    return this->AcquireMemoryFromPrimitive(
700
        this->fwd_pd_->src1_desc(), to_void_cast<T>(input_data), "@src1_mem_p");
701
  }
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733

 private:
  static inline dnnl::primitive_attr CreateAttributes(dnnl::algorithm op,
                                                      float scale_x,
                                                      float scale_y,
                                                      float scale_z) {
    // Scales set in attributes for inputs contibute to the output equation
    // in the following way (assuming no broadcasting takes place):
    // output_i = scale_0 * x_i <+ or *> scale_1 * y_i;
    // Hence we have to create scales that will:
    // 1. Dequantize both values, by multiplying with (1.0 / scale_x_or_y)
    // 2. Quantize their result to output scale range, by multiplying with
    // (scale_z)
    // If we combine these two, we end up with following equation
    // output = scale_out * (1/scale_x * x <* or +> 1/scale_y * y)
    // Hence, to mimic such behaviour using provided interface,
    // For add operation the equation is equal to:
    // output = (scale_out / scale_x) * x + (scale_out / scale_y) * y
    //                <scale_0>                  <scale_1>
    // For mul operation on the other hand
    // output = (scale_out / scale_x) * x * (1.0 / scale_y) * y
    //                <scale_0>                 <scale_1>
    float scale_0 = scale_z / scale_x;
    float scale_1 =
        op == dnnl::algorithm::binary_add ? scale_z / scale_y : 1.0 / scale_y;
    dnnl::primitive_attr attributes;
    attributes.set_scales(/* input_x_id = */ DNNL_ARG_SRC_0, /* mask = */ 0,
                          {scale_0});
    attributes.set_scales(/* input_y_id = */ DNNL_ARG_SRC_1, /* mask = */ 0,
                          {scale_1});
    return attributes;
  }
734 735
};

736 737 738 739 740 741 742 743 744
template <typename T>
class BroadcastDataMKLDNNHandler
    : public platform::MKLDNNHandlerT<T, dnnl::binary> {
 public:
  BroadcastDataMKLDNNHandler(const dnnl::algorithm algo,
                             const MKLDNNDeviceContext& dev_ctx,
                             const mkldnn::engine engine,
                             platform::Place cpu_place, const Tensor* x,
                             const Tensor* y, float scale_x, float scale_y,
J
jakpiase 已提交
745
                             const std::string& uniq_name,
746
                             const std::vector<int64_t>& input_dims)
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
      : platform::MKLDNNHandlerT<T, dnnl::binary>(
            dev_ctx, engine, cpu_place,
            platform::CreateKey(dev_ctx, framework::vectorize(x->dims()),
                                uniq_name)) {
    if (!this->isCached()) {
      PADDLE_ENFORCE_EQ(
          x->layout(), DataLayout::kMKLDNN,
          platform::errors::InvalidArgument("Wrong layout set for X tensor."));
      PADDLE_ENFORCE_NE(
          x->format(), MKLDNNMemoryFormat::undef,
          platform::errors::InvalidArgument("Wrong format set for X tensor."));

      PADDLE_ENFORCE_EQ(
          y->layout(), DataLayout::kMKLDNN,
          platform::errors::InvalidArgument("Wrong layout set for Y tensor."));
      PADDLE_ENFORCE_NE(
          y->format(), MKLDNNMemoryFormat::undef,
          platform::errors::InvalidArgument("Wrong format set for Y tensor."));

      const auto src0_tz = framework::vectorize(x->dims());

      const auto src0_md = dnnl::memory::desc(
          src0_tz, platform::MKLDNNGetDataType<T>(), x->format());
      const auto src1_md = dnnl::memory::desc(
J
jakpiase 已提交
771
          input_dims, platform::MKLDNNGetDataType<T>(), x->format());
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796

      dnnl::primitive_attr attributes;
      attributes.set_scales(DNNL_ARG_SRC_0, 0, {scale_x});
      attributes.set_scales(DNNL_ARG_SRC_1, 0, {scale_y});

      this->AcquireForwardPrimitiveDescriptor(attributes, algo, src0_md,
                                              src1_md, src0_md);
    }
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(framework::Tensor* input) {
    T* input_data = input->data<T>();
    memset(input_data, 0, this->fwd_pd_->src_desc().get_size());
    return this->AcquireMemoryFromPrimitive(
        this->fwd_pd_->src_desc(), to_void_cast<T>(input_data), "@src0_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireSecondSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
    return this->AcquireMemoryFromPrimitive(
        this->fwd_pd_->src1_desc(), to_void_cast<T>(input_data), "@src1_mem_p");
  }
};

797 798 799 800 801 802 803 804
template <typename T>
class ReductionMKLDNNHandler
    : public platform::MKLDNNHandlerT<T, dnnl::reduction> {
 public:
  ReductionMKLDNNHandler(const dnnl::algorithm algo, const float p,
                         const float eps, const MKLDNNDeviceContext& dev_ctx,
                         const mkldnn::engine engine, platform::Place cpu_place,
                         const Tensor* x, const Tensor* y,
805
                         const std::string& uniq_name,
J
jakpiase 已提交
806
                         std::vector<int64_t> y_tz)
807 808 809 810 811 812 813 814 815 816 817 818 819
      : platform::MKLDNNHandlerT<T, dnnl::reduction>(
            dev_ctx, engine, cpu_place,
            platform::CreateKey(dev_ctx, framework::vectorize(x->dims()),
                                uniq_name,
                                (std::to_string(static_cast<int>(algo))))) {
    if (!this->isCached()) {
      PADDLE_ENFORCE_EQ(
          x->layout(), DataLayout::kMKLDNN,
          platform::errors::InvalidArgument("Wrong layout set for X tensor."));
      PADDLE_ENFORCE_NE(
          x->format(), MKLDNNMemoryFormat::undef,
          platform::errors::InvalidArgument("Wrong format set for X tensor."));

J
jakpiase 已提交
820
      const auto x_tz = framework::vectorize(x->dims());
821

J
jakpiase 已提交
822 823 824 825
      const auto x_md = dnnl::memory::desc(
          x_tz, platform::MKLDNNGetDataType<T>(), x->format());
      const auto y_md =
          memory::desc(y_tz, platform::MKLDNNGetDataType<T>(), x->format());
826

J
jakpiase 已提交
827
      this->AcquireForwardPrimitiveDescriptor(algo, x_md, y_md, p, eps);
828 829 830 831
    }
  }
};

832
template <typename T>
833 834 835
class ActivationMKLDNNHandler
    : public MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                            mkldnn::eltwise_backward> {
836
 public:
A
Adam 已提交
837
  ActivationMKLDNNHandler(const std::vector<int64_t>& dims,
838
                          mkldnn::algorithm algorithm, float alpha, float beta,
839
                          const MKLDNNMemoryFormat fmt,
840 841
                          const platform::MKLDNNDeviceContext& dev_ctx,
                          platform::Place cpu_place,
842
                          const std::string& unique_name, bool is_inplaced)
843

844 845 846
      : platform::MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                                 mkldnn::eltwise_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
847 848 849 850
            is_inplaced
                ? platform::CreateKey(dev_ctx, dims, "a", algorithm,
                                      unique_name)
                : platform::CreateKey(dev_ctx, dims, "a", unique_name)) {
851 852
    auto md = mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);

853 854
    this->AcquireForwardPrimitiveDescriptor(mkldnn::prop_kind::forward_training,
                                            algorithm, md, alpha, beta);
855
  }
856

A
Adam 已提交
857
  ActivationMKLDNNHandler(const std::vector<int64_t>& dims,
858 859 860 861 862 863 864
                          mkldnn::algorithm algorithm, float alpha, float beta,
                          const MKLDNNMemoryFormat fmt,
                          const MKLDNNMemoryFormat diff_fmt,
                          const platform::MKLDNNDeviceContext& dev_ctx,
                          platform::Place cpu_place,
                          const std::string& unique_name)

865 866 867
      : platform::MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                                 mkldnn::eltwise_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
868
            platform::CreateKey(dev_ctx, dims, "a", unique_name)) {
869 870 871 872 873 874 875
    auto diff_dst_md = platform::MKLDNNMemDesc(
        dims, platform::MKLDNNGetDataType<T>(), diff_fmt);
    auto src_md =
        platform::MKLDNNMemDesc(dims, platform::MKLDNNGetDataType<T>(), fmt);

    this->AcquireBackwardPrimitiveDescriptor(algorithm, diff_dst_md, src_md,
                                             alpha, beta);
876
  }
877

878 879 880
  std::shared_ptr<mkldnn::memory> AcquireBackwardSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
881
    return this->AcquireMemoryFromPrimitive(this->bwd_pd_->src_desc(),
882 883
                                            to_void_cast<T>(input_data),
                                            "@bwd-src_mem_p");
884 885 886
  }
};

887
template <typename T>
888 889
class TransposeMKLDNNHandler : public MKLDNNHandler {
 public:
A
Adam 已提交
890 891
  TransposeMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
                         std::vector<int>& axis,      // NOLINT
892 893 894 895
                         const platform::MKLDNNDeviceContext& dev_ctx,
                         mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
896 897 898 899
        axis_(axis),
        logical_axis_(dims.size(), 0) {}

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
900
      const MKLDNNMemoryFormat& fmt, void* ptr) {
901 902 903 904 905 906 907 908 909
    auto local_key = key_ + "@user_src_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      // Make memory descriptor using input format, unless it
      // cannot be trusted (nchw) then make up memory fmt manually
      for (size_t i = 0; i < logical_axis_.size(); ++i) {
        logical_axis_[i] = i;
      }
910

A
Adam 已提交
911
      auto src_md = fmt != MKLDNNMemoryFormat::nchw
912
                        ? platform::MKLDNNMemDesc(
913
                              dims_, platform::MKLDNNGetDataType<T>(), fmt)
914
                        : Axis2MemoryDesc(dims_, logical_axis_);
A
Adam 已提交
915
      mem_p = std::make_shared<mkldnn::memory>(src_md, engine_, ptr);
916 917 918 919 920 921
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }
922 923 924 925 926 927 928

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output,
                                                   platform::Place place) {
    auto local_key = key_ + "@user_dst_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
929
      auto dst_md = Axis2MemoryDesc(dims_, axis_);
930

A
Adam 已提交
931
      auto dst_data = output->mutable_data<T>(place, dst_md.get_size());
932

A
Adam 已提交
933
      mem_p = std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
934 935
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
936
      auto dst_data = output->mutable_data<T>(place);
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
      mem_p->set_data_handle(dst_data);
    }
    return mem_p;
  }

  std::shared_ptr<mkldnn::reorder> AcquireTranspose(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    auto prim_key = key_ + "@transpose_p";
    auto transpose_p =
        std::static_pointer_cast<mkldnn::reorder>(dev_ctx_.GetBlob(prim_key));
    if (transpose_p == nullptr) {
      transpose_p =
          std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
      dev_ctx_.SetBlob(prim_key, transpose_p);
    }
    return transpose_p;
  }

 protected:
A
Adam 已提交
957 958 959 960
  mkldnn::memory::desc Axis2MemoryDesc(std::vector<int64_t>& nchw_tz,  // NOLINT
                                       std::vector<int>& axis          // NOLINT
                                       ) {
    size_t ndims = axis.size();
961

A
Adam 已提交
962
    std::vector<int64_t> strides(ndims);
963
    unsigned int total_stride = 1;
A
Adam 已提交
964 965
    for (int i = ndims - 1; i >= 0; --i) {
      strides[axis[i]] = total_stride;
966 967
      total_stride *= nchw_tz[axis[i]];
    }
A
Adam 已提交
968 969 970 971
    mkldnn::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType<T>(),
                               strides);

    return mem_d;
972 973 974
  }

 private:
A
Adam 已提交
975
  std::vector<int64_t> dims_;
976
  std::vector<int> axis_;
977
  std::vector<int> logical_axis_;
978 979
};

980 981
class ReorderMKLDNNHandler : public MKLDNNHandler {
 public:
A
Adam 已提交
982
  ReorderMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
983 984 985 986 987 988 989
                       framework::proto::VarType::Type vtype,
                       mkldnn::memory::data_type dtype,
                       const platform::MKLDNNDeviceContext& dev_ctx,
                       mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
        vtype_(vtype),
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
        vtype_dst_(vtype),
        dtype_(dtype),
        dtype_dst_(dtype) {}

  ReorderMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
                       framework::proto::VarType::Type vtype,
                       mkldnn::memory::data_type dtype,
                       framework::proto::VarType::Type vtype_dst,
                       mkldnn::memory::data_type dtype_dst,
                       const platform::MKLDNNDeviceContext& dev_ctx,
                       mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
        vtype_(vtype),
        vtype_dst_(vtype_dst),
        dtype_(dtype),
        dtype_dst_(dtype_dst) {}
1007 1008

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
1009
      const MKLDNNMemoryFormat& fmt, void* ptr) {
1010
    return this->AcquireMemory(dims_, dtype_, fmt, ptr, "@user_src_mem_p");
1011 1012 1013
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
1014
      framework::Tensor* output, const MKLDNNMemoryFormat& fmt,
1015 1016 1017 1018 1019
      platform::Place place) {
    auto local_key = key_ + "@user_dst_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
1020 1021 1022
      auto dst_md = platform::MKLDNNMemDesc(dims_, dtype_dst_, fmt);
      auto dst_data =
          output->mutable_data(place, vtype_dst_, dst_md.get_size());
1023

A
Adam 已提交
1024
      mem_p = std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
1025 1026
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
1027 1028
      // Even if memory object exists , we may be using it for diffrent tensor
      auto dst_data =
1029
          output->mutable_data(place, vtype_dst_, mem_p->get_desc().get_size());
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
      mem_p->set_data_handle(dst_data);
    }
    return mem_p;
  }

  std::shared_ptr<mkldnn::reorder> AcquireReorder(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    auto prim_key = key_ + "@reorder_p";
    auto reorder_p =
        std::static_pointer_cast<mkldnn::reorder>(dev_ctx_.GetBlob(prim_key));
    if (reorder_p == nullptr) {
      reorder_p =
          std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
      dev_ctx_.SetBlob(prim_key, reorder_p);
    }
    return reorder_p;
  }

 private:
A
Adam 已提交
1050
  std::vector<int64_t> dims_;
1051 1052
  framework::proto::VarType::Type vtype_, vtype_dst_;
  mkldnn::memory::data_type dtype_, dtype_dst_;
1053 1054
};

1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
template <typename T>
struct convolutional_algorithm;

template <>
struct convolutional_algorithm<mkldnn::convolution_forward> {
  static constexpr mkldnn::algorithm T = mkldnn::algorithm::convolution_direct;
};

template <>
struct convolutional_algorithm<mkldnn::deconvolution_forward> {
  static constexpr mkldnn::algorithm T =
      mkldnn::algorithm::deconvolution_direct;
};

J
Jacek Czaja 已提交
1069 1070 1071
template <class forward_t, class backward_data_t, class backward_weights_t>
class ConvMKLDNNTemplateHandler : public MKLDNNHandler {
 public:
1072 1073 1074 1075
  ConvMKLDNNTemplateHandler(const platform::MKLDNNDeviceContext& dev_ctx,
                            mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {}

1076 1077 1078 1079 1080 1081 1082 1083 1084
  // TODO(jczaja): remove after conv int8 is adapted
  ConvMKLDNNTemplateHandler(
      std::shared_ptr<typename forward_t::primitive_desc> conv_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {
    conv_pd_ = conv_pd;
  }

J
Jacek Czaja 已提交
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
  ConvMKLDNNTemplateHandler(
      std::shared_ptr<typename forward_t::primitive_desc> conv_pd,
      std::shared_ptr<typename backward_data_t::primitive_desc>
          conv_bwd_data_pd,
      std::shared_ptr<typename backward_weights_t::primitive_desc>
          conv_bwd_weights_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        conv_pd_(conv_pd),
        conv_bwd_weights_pd_(conv_bwd_weights_pd),
        conv_bwd_data_pd_(conv_bwd_data_pd) {
    // If we are in Grad operatgor then update a key with BWD suffix to
    // distinguish from FWD memory primitives
    key_ += "-BWD";
  }

A
Adam 已提交
1102
  size_t GetDstMemorySize() const { return conv_pd_->dst_desc().get_size(); }
J
Jacek Czaja 已提交
1103

1104
  MKLDNNMemoryFormat GetDstFormat() const {
A
Adam 已提交
1105
    return paddle::platform::GetMKLDNNFormat(conv_pd_->dst_desc());
J
Jacek Czaja 已提交
1106 1107 1108
  }

  size_t GetDiffWeightsMemorySize() const {
A
Adam 已提交
1109
    return conv_bwd_weights_pd_->diff_weights_desc().get_size();
J
Jacek Czaja 已提交
1110 1111 1112
  }

  size_t GetDiffSourceMemorySize() const {
A
Adam 已提交
1113
    return conv_bwd_data_pd_->diff_src_desc().get_size();
J
Jacek Czaja 已提交
1114 1115 1116 1117 1118
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromWeightsPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1119 1120
    auto src_pd = conv_bwd_weights_pd_->src_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1121 1122 1123 1124 1125 1126 1127
    return this->AcquireMemory(src_pd, user_pd, user_memory_p,
                               "@weights-src_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromWeightsPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1128 1129
    auto diff_dst_pd = conv_bwd_weights_pd_->diff_dst_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1130 1131 1132 1133 1134 1135 1136
    return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
                               "@weights-diff_dst_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffWeightsMemoryFromWeightsPrimitive(
      void* ptr) {
    return this->AcquireMemoryFromPrimitive(
A
Adam 已提交
1137
        conv_bwd_weights_pd_->diff_weights_desc(), ptr, "@diff_weights_mem_p");
J
Jacek Czaja 已提交
1138 1139
  }

1140 1141 1142 1143 1144 1145
  std::shared_ptr<mkldnn::memory> AcquireDiffWeightsMemoryFromWeightsPrimitive(
      void) {
    return this->AcquireMemoryFromPrimitive(
        conv_bwd_weights_pd_->diff_weights_desc(), "@diff_weights_mem_p");
  }

J
Jacek Czaja 已提交
1146 1147 1148
  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromDataPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1149 1150
    auto diff_dst_pd = conv_bwd_data_pd_->diff_dst_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1151 1152 1153 1154 1155 1156 1157
    return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
                               "@data-diff_dst_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromDataPrimitive(
      const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1158 1159
    auto weights_pd = conv_bwd_data_pd_->weights_desc();
    auto user_pd = user_weights_memory_p->get_desc();
J
Jacek Czaja 已提交
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
    return this->AcquireMemory(weights_pd, user_pd, user_weights_memory_p,
                               "@data-weights_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireResidualDataMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_residual_data_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromResidualDataMemory(
      const std::shared_ptr<mkldnn::memory>& user_residual_memory_p,
      void* dst_ptr,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
    return this->AcquireMemory(user_residual_memory_p,
                               this->AcquireDstMemoryFromPrimitive(dst_ptr),
                               "@residual_data_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemoryFromDataPrimitive(
      void* ptr) {
A
Adam 已提交
1180 1181
    return this->AcquireMemoryFromPrimitive(conv_bwd_data_pd_->diff_src_desc(),
                                            ptr, "@diff_src_mem_p");
J
Jacek Czaja 已提交
1182 1183 1184
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromPrimitive(void* ptr) {
A
Adam 已提交
1185
    return this->AcquireMemoryFromPrimitive(conv_pd_->dst_desc(), ptr,
J
Jacek Czaja 已提交
1186 1187 1188 1189 1190 1191
                                            "@dst_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1192 1193
    auto src_pd = conv_pd_->src_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1194 1195 1196 1197
    return this->AcquireMemory(src_pd, user_pd, user_memory_p, "@src_mem_p",
                               pipeline);
  }

A
Adam 已提交
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
  std::shared_ptr<mkldnn::memory> AcquireWeightsMemory(
      const mkldnn::memory::desc& md, void* ptr,
      user_function custom_func = {}) {
    return this->AcquireMemory(md, ptr, "@user_weights_mem_p", custom_func);
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_bias_mem_p");
  }

J
Jacek Czaja 已提交
1209 1210 1211
  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
1212 1213
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f}, int mask = 0) {
A
Adam 已提交
1214 1215
    auto user_weights_pd = user_weights_memory_p->get_desc();
    auto weights_pd = conv_pd_->weights_desc();
1216 1217 1218
    return this->AcquireMemory(
        weights_pd, user_weights_pd, user_weights_memory_p, "@weights_mem_p",
        pipeline, is_persistent, is_INT8, scale_data, mask);
J
Jacek Czaja 已提交
1219 1220 1221 1222
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_bias_memory_p,
1223 1224 1225 1226
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f},
      int mask = 0) {  // NOLINT
A
Adam 已提交
1227 1228
    auto user_bias_pd = user_bias_memory_p->get_desc();
    auto bias_pd = conv_pd_->bias_desc();
J
Jacek Czaja 已提交
1229
    return this->AcquireMemory(bias_pd, user_bias_pd, user_bias_memory_p,
1230 1231
                               "@bias_mem_p", pipeline, is_persistent, is_INT8,
                               scale_data, mask);
J
Jacek Czaja 已提交
1232 1233
  }

1234
  mkldnn::primitive_attr CreatePostOps(
1235 1236
      std::string fuse_activation, float fuse_alpha, float fuse_beta,
      bool fuse_residual_conn, const std::vector<float> output_shift_scale = {},
1237
      float sum_scale = 1.0f) const {
1238 1239
    mkldnn::primitive_attr conv_attr;
    mkldnn::post_ops post_operations;
1240 1241 1242 1243
    if (output_shift_scale.size() > 0) {
      int mask = output_shift_scale.size() > 1 ? 1 << 1 : 0;
      conv_attr.set_output_scales(mask, output_shift_scale);
    }
1244 1245 1246 1247 1248 1249
    // Fusion with Elementwise layer relies on adding a sum post-operation with
    // the scale parameter. It is assumed that when fuse_residual_connection is
    // true, the output tensor contains the data coming from residual
    // connection. The result of this post_op is:
    // Output = scale * Output + Conv_Out.
    if (fuse_residual_conn) {
1250
      post_operations.append_sum(sum_scale);
1251 1252 1253
    }
    // Fusion with ReLU layer is executed through the PostOps feature. Create a
    // PostOps object and configure it to execute an eltwise relu operation.
1254
    if (fuse_activation == "relu" || fuse_activation == "leaky_relu") {
1255 1256
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_relu,
1257
                                     fuse_alpha, fuse_beta);
1258
    } else if (fuse_activation == "relu6") {
1259 1260 1261
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale,
                                     mkldnn::algorithm::eltwise_bounded_relu,
1262
                                     fuse_alpha, fuse_beta);
1263 1264 1265 1266
    } else if (fuse_activation == "swish") {
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_swish,
                                     fuse_alpha, fuse_beta);
1267
    }
1268 1269 1270 1271 1272 1273 1274 1275
    conv_attr.set_post_ops(post_operations);
    return conv_attr;
  }

  std::shared_ptr<typename forward_t::primitive_desc>
  AcquireConvolutionPrimitiveDescriptor(
      const mkldnn::memory::desc& src, const mkldnn::memory::desc& weights,
      boost::optional<const mkldnn::memory::desc&> bias,
A
Adam 已提交
1276
      const mkldnn::memory::desc& dst, const std::vector<int64_t>& strides,
1277
      const std::vector<int64_t>& dilations,
A
Adam 已提交
1278
      const std::vector<int64_t>& paddings, const mkldnn::engine& engine,
1279 1280
      const std::string& fuse_activation, float fuse_alpha, float fuse_beta,
      const bool fuse_residual_conn, mkldnn::prop_kind fwd_prop_kind,
1281 1282
      const std::vector<float> output_shift_scale = {},
      const float sum_scale = 1.0f) {
1283 1284 1285 1286
    // Conv PD has to be passed to Grad op that
    // may be exxecuted by diffrent thread, hence
    // for that one we use key that does not contain TID
    const std::string key_conv_pd = key_common_ + "@conv_pd";
1287

1288
    conv_pd_ = std::static_pointer_cast<typename forward_t::primitive_desc>(
1289 1290
        dev_ctx_.GetBlob(key_conv_pd));

1291 1292 1293 1294 1295 1296 1297 1298 1299
    if (conv_pd_ == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);

      conv_pd_ = std::static_pointer_cast<typename forward_t::primitive_desc>(
          dev_ctx_.GetBlob(key_conv_pd));
      if (conv_pd_ == nullptr) {
        mkldnn::memory::dims stride_dims = strides;
1300
        mkldnn::memory::dims dilations_dims = dilations;
1301
        auto mkldnn_paddings = ToMkldnnPadding(paddings);
1302 1303

        auto conv_desc =
A
Adam 已提交
1304 1305
            bias ? typename forward_t::desc(
                       fwd_prop_kind, convolutional_algorithm<forward_t>::T,
1306
                       src, weights, *bias, dst, stride_dims, dilations_dims,
A
Adam 已提交
1307 1308 1309
                       mkldnn_paddings[0], mkldnn_paddings[1])
                 : typename forward_t::desc(
                       fwd_prop_kind, convolutional_algorithm<forward_t>::T,
1310 1311
                       src, weights, dst, stride_dims, dilations_dims,
                       mkldnn_paddings[0], mkldnn_paddings[1]);
1312

1313
        mkldnn::primitive_attr conv_attr =
1314 1315
            CreatePostOps(fuse_activation, fuse_alpha, fuse_beta,
                          fuse_residual_conn, output_shift_scale, sum_scale);
1316 1317 1318 1319 1320 1321

        conv_pd_.reset(new typename forward_t::primitive_desc(
            conv_desc, conv_attr, engine));
        // Save conv_pd/src_memory/weights_memory for backward pass
        dev_ctx_.SetBlob(key_conv_pd, conv_pd_);
      }
1322 1323 1324 1325 1326
    }

    return conv_pd_;
  }

A
Adam 已提交
1327
  std::shared_ptr<forward_t> AcquireConvolution() {
J
Jacek Czaja 已提交
1328 1329 1330 1331
    auto prim_key = key_ + "@conv_p";
    auto conv_p =
        std::static_pointer_cast<forward_t>(dev_ctx_.GetBlob(prim_key));
    if (conv_p == nullptr) {
A
Adam 已提交
1332
      conv_p = std::make_shared<forward_t>(*conv_pd_);
J
Jacek Czaja 已提交
1333 1334 1335 1336 1337 1338

      dev_ctx_.SetBlob(prim_key, conv_p);
    }
    return conv_p;
  }

A
Adam 已提交
1339
  std::shared_ptr<backward_weights_t> AcquireConvolutionBackwardWeights() {
J
Jacek Czaja 已提交
1340 1341 1342 1343 1344
    auto prim_key = key_ + "@conv_bwd_weights_p";
    auto conv_bwd_weights_p = std::static_pointer_cast<backward_weights_t>(
        dev_ctx_.GetBlob(prim_key));
    if (conv_bwd_weights_p == nullptr) {
      // create backward conv primitive for weights
A
Adam 已提交
1345 1346
      conv_bwd_weights_p =
          std::make_shared<backward_weights_t>(*conv_bwd_weights_pd_);
J
Jacek Czaja 已提交
1347 1348 1349 1350 1351
      dev_ctx_.SetBlob(prim_key, conv_bwd_weights_p);
    }
    return conv_bwd_weights_p;
  }

A
Adam 已提交
1352
  std::shared_ptr<backward_data_t> AcquireConvolutionBackwardData() {
J
Jacek Czaja 已提交
1353 1354 1355 1356
    auto prim_key = key_ + "@conv_bwd_data_p";
    auto conv_bwd_data_p =
        std::static_pointer_cast<backward_data_t>(dev_ctx_.GetBlob(prim_key));
    if (conv_bwd_data_p == nullptr) {
A
Adam 已提交
1357
      conv_bwd_data_p = std::make_shared<backward_data_t>(*conv_bwd_data_pd_);
J
Jacek Czaja 已提交
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
      dev_ctx_.SetBlob(prim_key, conv_bwd_data_p);
    }
    return conv_bwd_data_p;
  }

 private:
  std::shared_ptr<typename forward_t::primitive_desc> conv_pd_;
  std::shared_ptr<typename backward_weights_t::primitive_desc>
      conv_bwd_weights_pd_;
  std::shared_ptr<typename backward_data_t::primitive_desc> conv_bwd_data_pd_;
};

using ConvMKLDNNHandler =
    ConvMKLDNNTemplateHandler<mkldnn::convolution_forward,
                              mkldnn::convolution_backward_data,
                              mkldnn::convolution_backward_weights>;

using ConvTransposeMKLDNNHandler =
    ConvMKLDNNTemplateHandler<mkldnn::deconvolution_forward,
                              mkldnn::deconvolution_backward_data,
                              mkldnn::deconvolution_backward_weights>;
1379

1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
template <typename T>
static std::shared_ptr<mkldnn::memory> SetDstMemory(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const std::shared_ptr<ConvMKLDNNHandler>& handler) {
  T* output_data =
      output->mutable_data<T>(ctx.GetPlace(), handler->GetDstMemorySize());
  std::shared_ptr<mkldnn::memory> dst_memory_p =
      handler->AcquireDstMemoryFromPrimitive(to_void_cast<T>(output_data));
  return dst_memory_p;
}

template <typename T>
static std::shared_ptr<mkldnn::memory> SetDstMemory(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const framework::Tensor* residual_param,
    const mkldnn::memory::desc& user_residual_md,
    const std::shared_ptr<ConvMKLDNNHandler>& handler,
    std::vector<mkldnn::primitive>* pipeline) {
  const T* residual_param_data = residual_param->data<T>();
1399 1400 1401 1402
  PADDLE_ENFORCE_NOT_NULL(
      residual_param_data,
      platform::errors::PreconditionNotMet("Residual parameter is required for "
                                           "the DNNL conv+elementwise_add "
G
GaoWei8 已提交
1403
                                           "fusion, but now it is missing."));
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
  std::shared_ptr<mkldnn::memory> user_residual_memory_p =
      handler->AcquireResidualDataMemory(user_residual_md,
                                         to_void_cast<T>(residual_param_data));
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  std::shared_ptr<mkldnn::memory> dst_memory_p =
      handler->AcquireDstMemoryFromResidualDataMemory(
          user_residual_memory_p, to_void_cast<T>(output_data), *pipeline);
  return dst_memory_p;
}

template <typename T>
static void SetDstMemoryHandler(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const std::shared_ptr<ConvMKLDNNHandler>& handler,
    std::shared_ptr<mkldnn::memory> dst_memory_p) {
  T* output_data =
      output->mutable_data<T>(ctx.GetPlace(), handler->GetDstMemorySize());
  dst_memory_p->set_data_handle(to_void_cast<T>(output_data));
}

1424 1425 1426
template <typename T>
static void SetDstMemoryQuantized(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
A
Adam 已提交
1427 1428
    std::vector<int64_t> dst_tz, const mkldnn::engine& engine,
    std::shared_ptr<mkldnn::memory::desc>& dst_md,  // NOLINT
1429 1430
    std::shared_ptr<mkldnn::memory>& dst_memory,    // NOLINT
    MKLDNNMemoryFormat output_format) {
1431 1432
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  const size_t dst_dims = dst_tz.size();
1433
  MKLDNNMemoryFormat dst_fmt;
G
GaoWei8 已提交
1434 1435 1436 1437
  PADDLE_ENFORCE_LE(dst_dims, 5, platform::errors::InvalidArgument(
                                     "Dst memory for quantization can not have "
                                     "dims > 5. But received dst_dims is %d.",
                                     dst_dims));
1438
  dst_fmt = platform::MKLDNNFormatForSize(dst_dims, output_format);
1439

A
Adam 已提交
1440
  auto tmp_dst_md = platform::MKLDNNMemDesc(
1441
      {dst_tz}, paddle::framework::ToMKLDNNDataType(
1442
                    framework::DataTypeTrait<T>::DataType()),
1443
      dst_fmt);
A
Adam 已提交
1444 1445 1446
  dst_md.reset(new mkldnn::memory::desc(tmp_dst_md));
  dst_memory.reset(
      new mkldnn::memory(*dst_md, engine, to_void_cast<T>(output_data)));
1447
}
J
Jacek Czaja 已提交
1448 1449
}  // namespace platform
}  // namespace paddle